##// END OF EJS Templates
sidedata: introduce a new requirement to protect the feature...
marmoute -
r43298:827cb4fe default
parent child Browse files
Show More
@@ -1,1524 +1,1528 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0, experimental=False):
42 generic=False, priority=0, experimental=False):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self.experimental = experimental
49 self.experimental = experimental
50 self._re = None
50 self._re = None
51 if generic:
51 if generic:
52 self._re = re.compile(self.name)
52 self._re = re.compile(self.name)
53
53
54 class itemregister(dict):
54 class itemregister(dict):
55 """A specialized dictionary that can handle wild-card selection"""
55 """A specialized dictionary that can handle wild-card selection"""
56
56
57 def __init__(self):
57 def __init__(self):
58 super(itemregister, self).__init__()
58 super(itemregister, self).__init__()
59 self._generics = set()
59 self._generics = set()
60
60
61 def update(self, other):
61 def update(self, other):
62 super(itemregister, self).update(other)
62 super(itemregister, self).update(other)
63 self._generics.update(other._generics)
63 self._generics.update(other._generics)
64
64
65 def __setitem__(self, key, item):
65 def __setitem__(self, key, item):
66 super(itemregister, self).__setitem__(key, item)
66 super(itemregister, self).__setitem__(key, item)
67 if item.generic:
67 if item.generic:
68 self._generics.add(item)
68 self._generics.add(item)
69
69
70 def get(self, key):
70 def get(self, key):
71 baseitem = super(itemregister, self).get(key)
71 baseitem = super(itemregister, self).get(key)
72 if baseitem is not None and not baseitem.generic:
72 if baseitem is not None and not baseitem.generic:
73 return baseitem
73 return baseitem
74
74
75 # search for a matching generic item
75 # search for a matching generic item
76 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
77 for item in generics:
77 for item in generics:
78 # we use 'match' instead of 'search' to make the matching simpler
78 # we use 'match' instead of 'search' to make the matching simpler
79 # for people unfamiliar with regular expression. Having the match
79 # for people unfamiliar with regular expression. Having the match
80 # rooted to the start of the string will produce less surprising
80 # rooted to the start of the string will produce less surprising
81 # result for user writing simple regex for sub-attribute.
81 # result for user writing simple regex for sub-attribute.
82 #
82 #
83 # For example using "color\..*" match produces an unsurprising
83 # For example using "color\..*" match produces an unsurprising
84 # result, while using search could suddenly match apparently
84 # result, while using search could suddenly match apparently
85 # unrelated configuration that happens to contains "color."
85 # unrelated configuration that happens to contains "color."
86 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # anywhere. This is a tradeoff where we favor requiring ".*" on
87 # some match to avoid the need to prefix most pattern with "^".
87 # some match to avoid the need to prefix most pattern with "^".
88 # The "^" seems more error prone.
88 # The "^" seems more error prone.
89 if item._re.match(key):
89 if item._re.match(key):
90 return item
90 return item
91
91
92 return None
92 return None
93
93
94 coreitems = {}
94 coreitems = {}
95
95
96 def _register(configtable, *args, **kwargs):
96 def _register(configtable, *args, **kwargs):
97 item = configitem(*args, **kwargs)
97 item = configitem(*args, **kwargs)
98 section = configtable.setdefault(item.section, itemregister())
98 section = configtable.setdefault(item.section, itemregister())
99 if item.name in section:
99 if item.name in section:
100 msg = "duplicated config item registration for '%s.%s'"
100 msg = "duplicated config item registration for '%s.%s'"
101 raise error.ProgrammingError(msg % (item.section, item.name))
101 raise error.ProgrammingError(msg % (item.section, item.name))
102 section[item.name] = item
102 section[item.name] = item
103
103
104 # special value for case where the default is derived from other values
104 # special value for case where the default is derived from other values
105 dynamicdefault = object()
105 dynamicdefault = object()
106
106
107 # Registering actual config items
107 # Registering actual config items
108
108
109 def getitemregister(configtable):
109 def getitemregister(configtable):
110 f = functools.partial(_register, configtable)
110 f = functools.partial(_register, configtable)
111 # export pseudo enum as configitem.*
111 # export pseudo enum as configitem.*
112 f.dynamicdefault = dynamicdefault
112 f.dynamicdefault = dynamicdefault
113 return f
113 return f
114
114
115 coreconfigitem = getitemregister(coreitems)
115 coreconfigitem = getitemregister(coreitems)
116
116
117 def _registerdiffopts(section, configprefix=''):
117 def _registerdiffopts(section, configprefix=''):
118 coreconfigitem(section, configprefix + 'nodates',
118 coreconfigitem(section, configprefix + 'nodates',
119 default=False,
119 default=False,
120 )
120 )
121 coreconfigitem(section, configprefix + 'showfunc',
121 coreconfigitem(section, configprefix + 'showfunc',
122 default=False,
122 default=False,
123 )
123 )
124 coreconfigitem(section, configprefix + 'unified',
124 coreconfigitem(section, configprefix + 'unified',
125 default=None,
125 default=None,
126 )
126 )
127 coreconfigitem(section, configprefix + 'git',
127 coreconfigitem(section, configprefix + 'git',
128 default=False,
128 default=False,
129 )
129 )
130 coreconfigitem(section, configprefix + 'ignorews',
130 coreconfigitem(section, configprefix + 'ignorews',
131 default=False,
131 default=False,
132 )
132 )
133 coreconfigitem(section, configprefix + 'ignorewsamount',
133 coreconfigitem(section, configprefix + 'ignorewsamount',
134 default=False,
134 default=False,
135 )
135 )
136 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 coreconfigitem(section, configprefix + 'ignoreblanklines',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(section, configprefix + 'ignorewseol',
139 coreconfigitem(section, configprefix + 'ignorewseol',
140 default=False,
140 default=False,
141 )
141 )
142 coreconfigitem(section, configprefix + 'nobinary',
142 coreconfigitem(section, configprefix + 'nobinary',
143 default=False,
143 default=False,
144 )
144 )
145 coreconfigitem(section, configprefix + 'noprefix',
145 coreconfigitem(section, configprefix + 'noprefix',
146 default=False,
146 default=False,
147 )
147 )
148 coreconfigitem(section, configprefix + 'word-diff',
148 coreconfigitem(section, configprefix + 'word-diff',
149 default=False,
149 default=False,
150 )
150 )
151
151
152 coreconfigitem('alias', '.*',
152 coreconfigitem('alias', '.*',
153 default=dynamicdefault,
153 default=dynamicdefault,
154 generic=True,
154 generic=True,
155 )
155 )
156 coreconfigitem('auth', 'cookiefile',
156 coreconfigitem('auth', 'cookiefile',
157 default=None,
157 default=None,
158 )
158 )
159 _registerdiffopts(section='annotate')
159 _registerdiffopts(section='annotate')
160 # bookmarks.pushing: internal hack for discovery
160 # bookmarks.pushing: internal hack for discovery
161 coreconfigitem('bookmarks', 'pushing',
161 coreconfigitem('bookmarks', 'pushing',
162 default=list,
162 default=list,
163 )
163 )
164 # bundle.mainreporoot: internal hack for bundlerepo
164 # bundle.mainreporoot: internal hack for bundlerepo
165 coreconfigitem('bundle', 'mainreporoot',
165 coreconfigitem('bundle', 'mainreporoot',
166 default='',
166 default='',
167 )
167 )
168 coreconfigitem('censor', 'policy',
168 coreconfigitem('censor', 'policy',
169 default='abort',
169 default='abort',
170 experimental=True,
170 experimental=True,
171 )
171 )
172 coreconfigitem('chgserver', 'idletimeout',
172 coreconfigitem('chgserver', 'idletimeout',
173 default=3600,
173 default=3600,
174 )
174 )
175 coreconfigitem('chgserver', 'skiphash',
175 coreconfigitem('chgserver', 'skiphash',
176 default=False,
176 default=False,
177 )
177 )
178 coreconfigitem('cmdserver', 'log',
178 coreconfigitem('cmdserver', 'log',
179 default=None,
179 default=None,
180 )
180 )
181 coreconfigitem('cmdserver', 'max-log-files',
181 coreconfigitem('cmdserver', 'max-log-files',
182 default=7,
182 default=7,
183 )
183 )
184 coreconfigitem('cmdserver', 'max-log-size',
184 coreconfigitem('cmdserver', 'max-log-size',
185 default='1 MB',
185 default='1 MB',
186 )
186 )
187 coreconfigitem('cmdserver', 'max-repo-cache',
187 coreconfigitem('cmdserver', 'max-repo-cache',
188 default=0,
188 default=0,
189 experimental=True,
189 experimental=True,
190 )
190 )
191 coreconfigitem('cmdserver', 'message-encodings',
191 coreconfigitem('cmdserver', 'message-encodings',
192 default=list,
192 default=list,
193 experimental=True,
193 experimental=True,
194 )
194 )
195 coreconfigitem('cmdserver', 'track-log',
195 coreconfigitem('cmdserver', 'track-log',
196 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
196 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
197 )
197 )
198 coreconfigitem('color', '.*',
198 coreconfigitem('color', '.*',
199 default=None,
199 default=None,
200 generic=True,
200 generic=True,
201 )
201 )
202 coreconfigitem('color', 'mode',
202 coreconfigitem('color', 'mode',
203 default='auto',
203 default='auto',
204 )
204 )
205 coreconfigitem('color', 'pagermode',
205 coreconfigitem('color', 'pagermode',
206 default=dynamicdefault,
206 default=dynamicdefault,
207 )
207 )
208 _registerdiffopts(section='commands', configprefix='commit.interactive.')
208 _registerdiffopts(section='commands', configprefix='commit.interactive.')
209 coreconfigitem('commands', 'commit.post-status',
209 coreconfigitem('commands', 'commit.post-status',
210 default=False,
210 default=False,
211 )
211 )
212 coreconfigitem('commands', 'grep.all-files',
212 coreconfigitem('commands', 'grep.all-files',
213 default=False,
213 default=False,
214 experimental=True,
214 experimental=True,
215 )
215 )
216 coreconfigitem('commands', 'resolve.confirm',
216 coreconfigitem('commands', 'resolve.confirm',
217 default=False,
217 default=False,
218 )
218 )
219 coreconfigitem('commands', 'resolve.explicit-re-merge',
219 coreconfigitem('commands', 'resolve.explicit-re-merge',
220 default=False,
220 default=False,
221 )
221 )
222 coreconfigitem('commands', 'resolve.mark-check',
222 coreconfigitem('commands', 'resolve.mark-check',
223 default='none',
223 default='none',
224 )
224 )
225 _registerdiffopts(section='commands', configprefix='revert.interactive.')
225 _registerdiffopts(section='commands', configprefix='revert.interactive.')
226 coreconfigitem('commands', 'show.aliasprefix',
226 coreconfigitem('commands', 'show.aliasprefix',
227 default=list,
227 default=list,
228 )
228 )
229 coreconfigitem('commands', 'status.relative',
229 coreconfigitem('commands', 'status.relative',
230 default=False,
230 default=False,
231 )
231 )
232 coreconfigitem('commands', 'status.skipstates',
232 coreconfigitem('commands', 'status.skipstates',
233 default=[],
233 default=[],
234 experimental=True,
234 experimental=True,
235 )
235 )
236 coreconfigitem('commands', 'status.terse',
236 coreconfigitem('commands', 'status.terse',
237 default='',
237 default='',
238 )
238 )
239 coreconfigitem('commands', 'status.verbose',
239 coreconfigitem('commands', 'status.verbose',
240 default=False,
240 default=False,
241 )
241 )
242 coreconfigitem('commands', 'update.check',
242 coreconfigitem('commands', 'update.check',
243 default=None,
243 default=None,
244 )
244 )
245 coreconfigitem('commands', 'update.requiredest',
245 coreconfigitem('commands', 'update.requiredest',
246 default=False,
246 default=False,
247 )
247 )
248 coreconfigitem('committemplate', '.*',
248 coreconfigitem('committemplate', '.*',
249 default=None,
249 default=None,
250 generic=True,
250 generic=True,
251 )
251 )
252 coreconfigitem('convert', 'bzr.saverev',
252 coreconfigitem('convert', 'bzr.saverev',
253 default=True,
253 default=True,
254 )
254 )
255 coreconfigitem('convert', 'cvsps.cache',
255 coreconfigitem('convert', 'cvsps.cache',
256 default=True,
256 default=True,
257 )
257 )
258 coreconfigitem('convert', 'cvsps.fuzz',
258 coreconfigitem('convert', 'cvsps.fuzz',
259 default=60,
259 default=60,
260 )
260 )
261 coreconfigitem('convert', 'cvsps.logencoding',
261 coreconfigitem('convert', 'cvsps.logencoding',
262 default=None,
262 default=None,
263 )
263 )
264 coreconfigitem('convert', 'cvsps.mergefrom',
264 coreconfigitem('convert', 'cvsps.mergefrom',
265 default=None,
265 default=None,
266 )
266 )
267 coreconfigitem('convert', 'cvsps.mergeto',
267 coreconfigitem('convert', 'cvsps.mergeto',
268 default=None,
268 default=None,
269 )
269 )
270 coreconfigitem('convert', 'git.committeractions',
270 coreconfigitem('convert', 'git.committeractions',
271 default=lambda: ['messagedifferent'],
271 default=lambda: ['messagedifferent'],
272 )
272 )
273 coreconfigitem('convert', 'git.extrakeys',
273 coreconfigitem('convert', 'git.extrakeys',
274 default=list,
274 default=list,
275 )
275 )
276 coreconfigitem('convert', 'git.findcopiesharder',
276 coreconfigitem('convert', 'git.findcopiesharder',
277 default=False,
277 default=False,
278 )
278 )
279 coreconfigitem('convert', 'git.remoteprefix',
279 coreconfigitem('convert', 'git.remoteprefix',
280 default='remote',
280 default='remote',
281 )
281 )
282 coreconfigitem('convert', 'git.renamelimit',
282 coreconfigitem('convert', 'git.renamelimit',
283 default=400,
283 default=400,
284 )
284 )
285 coreconfigitem('convert', 'git.saverev',
285 coreconfigitem('convert', 'git.saverev',
286 default=True,
286 default=True,
287 )
287 )
288 coreconfigitem('convert', 'git.similarity',
288 coreconfigitem('convert', 'git.similarity',
289 default=50,
289 default=50,
290 )
290 )
291 coreconfigitem('convert', 'git.skipsubmodules',
291 coreconfigitem('convert', 'git.skipsubmodules',
292 default=False,
292 default=False,
293 )
293 )
294 coreconfigitem('convert', 'hg.clonebranches',
294 coreconfigitem('convert', 'hg.clonebranches',
295 default=False,
295 default=False,
296 )
296 )
297 coreconfigitem('convert', 'hg.ignoreerrors',
297 coreconfigitem('convert', 'hg.ignoreerrors',
298 default=False,
298 default=False,
299 )
299 )
300 coreconfigitem('convert', 'hg.preserve-hash',
300 coreconfigitem('convert', 'hg.preserve-hash',
301 default=False,
301 default=False,
302 )
302 )
303 coreconfigitem('convert', 'hg.revs',
303 coreconfigitem('convert', 'hg.revs',
304 default=None,
304 default=None,
305 )
305 )
306 coreconfigitem('convert', 'hg.saverev',
306 coreconfigitem('convert', 'hg.saverev',
307 default=False,
307 default=False,
308 )
308 )
309 coreconfigitem('convert', 'hg.sourcename',
309 coreconfigitem('convert', 'hg.sourcename',
310 default=None,
310 default=None,
311 )
311 )
312 coreconfigitem('convert', 'hg.startrev',
312 coreconfigitem('convert', 'hg.startrev',
313 default=None,
313 default=None,
314 )
314 )
315 coreconfigitem('convert', 'hg.tagsbranch',
315 coreconfigitem('convert', 'hg.tagsbranch',
316 default='default',
316 default='default',
317 )
317 )
318 coreconfigitem('convert', 'hg.usebranchnames',
318 coreconfigitem('convert', 'hg.usebranchnames',
319 default=True,
319 default=True,
320 )
320 )
321 coreconfigitem('convert', 'ignoreancestorcheck',
321 coreconfigitem('convert', 'ignoreancestorcheck',
322 default=False,
322 default=False,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 coreconfigitem('convert', 'localtimezone',
325 coreconfigitem('convert', 'localtimezone',
326 default=False,
326 default=False,
327 )
327 )
328 coreconfigitem('convert', 'p4.encoding',
328 coreconfigitem('convert', 'p4.encoding',
329 default=dynamicdefault,
329 default=dynamicdefault,
330 )
330 )
331 coreconfigitem('convert', 'p4.startrev',
331 coreconfigitem('convert', 'p4.startrev',
332 default=0,
332 default=0,
333 )
333 )
334 coreconfigitem('convert', 'skiptags',
334 coreconfigitem('convert', 'skiptags',
335 default=False,
335 default=False,
336 )
336 )
337 coreconfigitem('convert', 'svn.debugsvnlog',
337 coreconfigitem('convert', 'svn.debugsvnlog',
338 default=True,
338 default=True,
339 )
339 )
340 coreconfigitem('convert', 'svn.trunk',
340 coreconfigitem('convert', 'svn.trunk',
341 default=None,
341 default=None,
342 )
342 )
343 coreconfigitem('convert', 'svn.tags',
343 coreconfigitem('convert', 'svn.tags',
344 default=None,
344 default=None,
345 )
345 )
346 coreconfigitem('convert', 'svn.branches',
346 coreconfigitem('convert', 'svn.branches',
347 default=None,
347 default=None,
348 )
348 )
349 coreconfigitem('convert', 'svn.startrev',
349 coreconfigitem('convert', 'svn.startrev',
350 default=0,
350 default=0,
351 )
351 )
352 coreconfigitem('debug', 'dirstate.delaywrite',
352 coreconfigitem('debug', 'dirstate.delaywrite',
353 default=0,
353 default=0,
354 )
354 )
355 coreconfigitem('defaults', '.*',
355 coreconfigitem('defaults', '.*',
356 default=None,
356 default=None,
357 generic=True,
357 generic=True,
358 )
358 )
359 coreconfigitem('devel', 'all-warnings',
359 coreconfigitem('devel', 'all-warnings',
360 default=False,
360 default=False,
361 )
361 )
362 coreconfigitem('devel', 'bundle2.debug',
362 coreconfigitem('devel', 'bundle2.debug',
363 default=False,
363 default=False,
364 )
364 )
365 coreconfigitem('devel', 'bundle.delta',
365 coreconfigitem('devel', 'bundle.delta',
366 default='',
366 default='',
367 )
367 )
368 coreconfigitem('devel', 'cache-vfs',
368 coreconfigitem('devel', 'cache-vfs',
369 default=None,
369 default=None,
370 )
370 )
371 coreconfigitem('devel', 'check-locks',
371 coreconfigitem('devel', 'check-locks',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem('devel', 'check-relroot',
374 coreconfigitem('devel', 'check-relroot',
375 default=False,
375 default=False,
376 )
376 )
377 coreconfigitem('devel', 'default-date',
377 coreconfigitem('devel', 'default-date',
378 default=None,
378 default=None,
379 )
379 )
380 coreconfigitem('devel', 'deprec-warn',
380 coreconfigitem('devel', 'deprec-warn',
381 default=False,
381 default=False,
382 )
382 )
383 coreconfigitem('devel', 'disableloaddefaultcerts',
383 coreconfigitem('devel', 'disableloaddefaultcerts',
384 default=False,
384 default=False,
385 )
385 )
386 coreconfigitem('devel', 'warn-empty-changegroup',
386 coreconfigitem('devel', 'warn-empty-changegroup',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem('devel', 'legacy.exchange',
389 coreconfigitem('devel', 'legacy.exchange',
390 default=list,
390 default=list,
391 )
391 )
392 coreconfigitem('devel', 'servercafile',
392 coreconfigitem('devel', 'servercafile',
393 default='',
393 default='',
394 )
394 )
395 coreconfigitem('devel', 'serverexactprotocol',
395 coreconfigitem('devel', 'serverexactprotocol',
396 default='',
396 default='',
397 )
397 )
398 coreconfigitem('devel', 'serverrequirecert',
398 coreconfigitem('devel', 'serverrequirecert',
399 default=False,
399 default=False,
400 )
400 )
401 coreconfigitem('devel', 'strip-obsmarkers',
401 coreconfigitem('devel', 'strip-obsmarkers',
402 default=True,
402 default=True,
403 )
403 )
404 coreconfigitem('devel', 'warn-config',
404 coreconfigitem('devel', 'warn-config',
405 default=None,
405 default=None,
406 )
406 )
407 coreconfigitem('devel', 'warn-config-default',
407 coreconfigitem('devel', 'warn-config-default',
408 default=None,
408 default=None,
409 )
409 )
410 coreconfigitem('devel', 'user.obsmarker',
410 coreconfigitem('devel', 'user.obsmarker',
411 default=None,
411 default=None,
412 )
412 )
413 coreconfigitem('devel', 'warn-config-unknown',
413 coreconfigitem('devel', 'warn-config-unknown',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('devel', 'debug.copies',
416 coreconfigitem('devel', 'debug.copies',
417 default=False,
417 default=False,
418 )
418 )
419 coreconfigitem('devel', 'debug.extensions',
419 coreconfigitem('devel', 'debug.extensions',
420 default=False,
420 default=False,
421 )
421 )
422 coreconfigitem('devel', 'debug.peer-request',
422 coreconfigitem('devel', 'debug.peer-request',
423 default=False,
423 default=False,
424 )
424 )
425 coreconfigitem('devel', 'discovery.randomize',
425 coreconfigitem('devel', 'discovery.randomize',
426 default=True,
426 default=True,
427 )
427 )
428 _registerdiffopts(section='diff')
428 _registerdiffopts(section='diff')
429 coreconfigitem('email', 'bcc',
429 coreconfigitem('email', 'bcc',
430 default=None,
430 default=None,
431 )
431 )
432 coreconfigitem('email', 'cc',
432 coreconfigitem('email', 'cc',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem('email', 'charsets',
435 coreconfigitem('email', 'charsets',
436 default=list,
436 default=list,
437 )
437 )
438 coreconfigitem('email', 'from',
438 coreconfigitem('email', 'from',
439 default=None,
439 default=None,
440 )
440 )
441 coreconfigitem('email', 'method',
441 coreconfigitem('email', 'method',
442 default='smtp',
442 default='smtp',
443 )
443 )
444 coreconfigitem('email', 'reply-to',
444 coreconfigitem('email', 'reply-to',
445 default=None,
445 default=None,
446 )
446 )
447 coreconfigitem('email', 'to',
447 coreconfigitem('email', 'to',
448 default=None,
448 default=None,
449 )
449 )
450 coreconfigitem('experimental', 'archivemetatemplate',
450 coreconfigitem('experimental', 'archivemetatemplate',
451 default=dynamicdefault,
451 default=dynamicdefault,
452 )
452 )
453 coreconfigitem('experimental', 'auto-publish',
453 coreconfigitem('experimental', 'auto-publish',
454 default='publish',
454 default='publish',
455 )
455 )
456 coreconfigitem('experimental', 'bundle-phases',
456 coreconfigitem('experimental', 'bundle-phases',
457 default=False,
457 default=False,
458 )
458 )
459 coreconfigitem('experimental', 'bundle2-advertise',
459 coreconfigitem('experimental', 'bundle2-advertise',
460 default=True,
460 default=True,
461 )
461 )
462 coreconfigitem('experimental', 'bundle2-output-capture',
462 coreconfigitem('experimental', 'bundle2-output-capture',
463 default=False,
463 default=False,
464 )
464 )
465 coreconfigitem('experimental', 'bundle2.pushback',
465 coreconfigitem('experimental', 'bundle2.pushback',
466 default=False,
466 default=False,
467 )
467 )
468 coreconfigitem('experimental', 'bundle2lazylocking',
468 coreconfigitem('experimental', 'bundle2lazylocking',
469 default=False,
469 default=False,
470 )
470 )
471 coreconfigitem('experimental', 'bundlecomplevel',
471 coreconfigitem('experimental', 'bundlecomplevel',
472 default=None,
472 default=None,
473 )
473 )
474 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
474 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
475 default=None,
475 default=None,
476 )
476 )
477 coreconfigitem('experimental', 'bundlecomplevel.gzip',
477 coreconfigitem('experimental', 'bundlecomplevel.gzip',
478 default=None,
478 default=None,
479 )
479 )
480 coreconfigitem('experimental', 'bundlecomplevel.none',
480 coreconfigitem('experimental', 'bundlecomplevel.none',
481 default=None,
481 default=None,
482 )
482 )
483 coreconfigitem('experimental', 'bundlecomplevel.zstd',
483 coreconfigitem('experimental', 'bundlecomplevel.zstd',
484 default=None,
484 default=None,
485 )
485 )
486 coreconfigitem('experimental', 'changegroup3',
486 coreconfigitem('experimental', 'changegroup3',
487 default=False,
487 default=False,
488 )
488 )
489 coreconfigitem('experimental', 'cleanup-as-archived',
489 coreconfigitem('experimental', 'cleanup-as-archived',
490 default=False,
490 default=False,
491 )
491 )
492 coreconfigitem('experimental', 'clientcompressionengines',
492 coreconfigitem('experimental', 'clientcompressionengines',
493 default=list,
493 default=list,
494 )
494 )
495 coreconfigitem('experimental', 'copytrace',
495 coreconfigitem('experimental', 'copytrace',
496 default='on',
496 default='on',
497 )
497 )
498 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
498 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
499 default=100,
499 default=100,
500 )
500 )
501 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
501 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
502 default=100,
502 default=100,
503 )
503 )
504 coreconfigitem('experimental', 'copies.read-from',
504 coreconfigitem('experimental', 'copies.read-from',
505 default="filelog-only",
505 default="filelog-only",
506 )
506 )
507 coreconfigitem('experimental', 'copies.write-to',
507 coreconfigitem('experimental', 'copies.write-to',
508 default='filelog-only',
508 default='filelog-only',
509 )
509 )
510 coreconfigitem('experimental', 'crecordtest',
510 coreconfigitem('experimental', 'crecordtest',
511 default=None,
511 default=None,
512 )
512 )
513 coreconfigitem('experimental', 'directaccess',
513 coreconfigitem('experimental', 'directaccess',
514 default=False,
514 default=False,
515 )
515 )
516 coreconfigitem('experimental', 'directaccess.revnums',
516 coreconfigitem('experimental', 'directaccess.revnums',
517 default=False,
517 default=False,
518 )
518 )
519 coreconfigitem('experimental', 'editortmpinhg',
519 coreconfigitem('experimental', 'editortmpinhg',
520 default=False,
520 default=False,
521 )
521 )
522 coreconfigitem('experimental', 'evolution',
522 coreconfigitem('experimental', 'evolution',
523 default=list,
523 default=list,
524 )
524 )
525 coreconfigitem('experimental', 'evolution.allowdivergence',
525 coreconfigitem('experimental', 'evolution.allowdivergence',
526 default=False,
526 default=False,
527 alias=[('experimental', 'allowdivergence')]
527 alias=[('experimental', 'allowdivergence')]
528 )
528 )
529 coreconfigitem('experimental', 'evolution.allowunstable',
529 coreconfigitem('experimental', 'evolution.allowunstable',
530 default=None,
530 default=None,
531 )
531 )
532 coreconfigitem('experimental', 'evolution.createmarkers',
532 coreconfigitem('experimental', 'evolution.createmarkers',
533 default=None,
533 default=None,
534 )
534 )
535 coreconfigitem('experimental', 'evolution.effect-flags',
535 coreconfigitem('experimental', 'evolution.effect-flags',
536 default=True,
536 default=True,
537 alias=[('experimental', 'effect-flags')]
537 alias=[('experimental', 'effect-flags')]
538 )
538 )
539 coreconfigitem('experimental', 'evolution.exchange',
539 coreconfigitem('experimental', 'evolution.exchange',
540 default=None,
540 default=None,
541 )
541 )
542 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
542 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
543 default=False,
543 default=False,
544 )
544 )
545 coreconfigitem('experimental', 'log.topo',
545 coreconfigitem('experimental', 'log.topo',
546 default=False,
546 default=False,
547 )
547 )
548 coreconfigitem('experimental', 'evolution.report-instabilities',
548 coreconfigitem('experimental', 'evolution.report-instabilities',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem('experimental', 'evolution.track-operation',
551 coreconfigitem('experimental', 'evolution.track-operation',
552 default=True,
552 default=True,
553 )
553 )
554 # repo-level config to exclude a revset visibility
554 # repo-level config to exclude a revset visibility
555 #
555 #
556 # The target use case is to use `share` to expose different subset of the same
556 # The target use case is to use `share` to expose different subset of the same
557 # repository, especially server side. See also `server.view`.
557 # repository, especially server side. See also `server.view`.
558 coreconfigitem('experimental', 'extra-filter-revs',
558 coreconfigitem('experimental', 'extra-filter-revs',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem('experimental', 'maxdeltachainspan',
561 coreconfigitem('experimental', 'maxdeltachainspan',
562 default=-1,
562 default=-1,
563 )
563 )
564 coreconfigitem('experimental', 'mergetempdirprefix',
564 coreconfigitem('experimental', 'mergetempdirprefix',
565 default=None,
565 default=None,
566 )
566 )
567 coreconfigitem('experimental', 'mmapindexthreshold',
567 coreconfigitem('experimental', 'mmapindexthreshold',
568 default=None,
568 default=None,
569 )
569 )
570 coreconfigitem('experimental', 'narrow',
570 coreconfigitem('experimental', 'narrow',
571 default=False,
571 default=False,
572 )
572 )
573 coreconfigitem('experimental', 'nonnormalparanoidcheck',
573 coreconfigitem('experimental', 'nonnormalparanoidcheck',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem('experimental', 'exportableenviron',
576 coreconfigitem('experimental', 'exportableenviron',
577 default=list,
577 default=list,
578 )
578 )
579 coreconfigitem('experimental', 'extendedheader.index',
579 coreconfigitem('experimental', 'extendedheader.index',
580 default=None,
580 default=None,
581 )
581 )
582 coreconfigitem('experimental', 'extendedheader.similarity',
582 coreconfigitem('experimental', 'extendedheader.similarity',
583 default=False,
583 default=False,
584 )
584 )
585 coreconfigitem('experimental', 'graphshorten',
585 coreconfigitem('experimental', 'graphshorten',
586 default=False,
586 default=False,
587 )
587 )
588 coreconfigitem('experimental', 'graphstyle.parent',
588 coreconfigitem('experimental', 'graphstyle.parent',
589 default=dynamicdefault,
589 default=dynamicdefault,
590 )
590 )
591 coreconfigitem('experimental', 'graphstyle.missing',
591 coreconfigitem('experimental', 'graphstyle.missing',
592 default=dynamicdefault,
592 default=dynamicdefault,
593 )
593 )
594 coreconfigitem('experimental', 'graphstyle.grandparent',
594 coreconfigitem('experimental', 'graphstyle.grandparent',
595 default=dynamicdefault,
595 default=dynamicdefault,
596 )
596 )
597 coreconfigitem('experimental', 'hook-track-tags',
597 coreconfigitem('experimental', 'hook-track-tags',
598 default=False,
598 default=False,
599 )
599 )
600 coreconfigitem('experimental', 'httppeer.advertise-v2',
600 coreconfigitem('experimental', 'httppeer.advertise-v2',
601 default=False,
601 default=False,
602 )
602 )
603 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
603 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
604 default=None,
604 default=None,
605 )
605 )
606 coreconfigitem('experimental', 'httppostargs',
606 coreconfigitem('experimental', 'httppostargs',
607 default=False,
607 default=False,
608 )
608 )
609 coreconfigitem('experimental', 'mergedriver',
609 coreconfigitem('experimental', 'mergedriver',
610 default=None,
610 default=None,
611 )
611 )
612 coreconfigitem('experimental', 'nointerrupt', default=False)
612 coreconfigitem('experimental', 'nointerrupt', default=False)
613 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
613 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
614
614
615 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
615 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
616 default=False,
616 default=False,
617 )
617 )
618 coreconfigitem('experimental', 'remotenames',
618 coreconfigitem('experimental', 'remotenames',
619 default=False,
619 default=False,
620 )
620 )
621 coreconfigitem('experimental', 'removeemptydirs',
621 coreconfigitem('experimental', 'removeemptydirs',
622 default=True,
622 default=True,
623 )
623 )
624 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
624 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
625 default=False,
625 default=False,
626 )
626 )
627 coreconfigitem('experimental', 'revisions.prefixhexnode',
627 coreconfigitem('experimental', 'revisions.prefixhexnode',
628 default=False,
628 default=False,
629 )
629 )
630 coreconfigitem('experimental', 'revlogv2',
630 coreconfigitem('experimental', 'revlogv2',
631 default=None,
631 default=None,
632 )
632 )
633 coreconfigitem('experimental', 'revisions.disambiguatewithin',
633 coreconfigitem('experimental', 'revisions.disambiguatewithin',
634 default=None,
634 default=None,
635 )
635 )
636 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
636 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
637 default=50000,
637 default=50000,
638 )
638 )
639 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
639 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
640 default=100000,
640 default=100000,
641 )
641 )
642 coreconfigitem('experimental', 'server.stream-narrow-clones',
642 coreconfigitem('experimental', 'server.stream-narrow-clones',
643 default=False,
643 default=False,
644 )
644 )
645 coreconfigitem('experimental', 'single-head-per-branch',
645 coreconfigitem('experimental', 'single-head-per-branch',
646 default=False,
646 default=False,
647 )
647 )
648 coreconfigitem('experimental', 'single-head-per-branch:account-closed-heads',
648 coreconfigitem('experimental', 'single-head-per-branch:account-closed-heads',
649 default=False,
649 default=False,
650 )
650 )
651 coreconfigitem('experimental', 'sshserver.support-v2',
651 coreconfigitem('experimental', 'sshserver.support-v2',
652 default=False,
652 default=False,
653 )
653 )
654 coreconfigitem('experimental', 'sparse-read',
654 coreconfigitem('experimental', 'sparse-read',
655 default=False,
655 default=False,
656 )
656 )
657 coreconfigitem('experimental', 'sparse-read.density-threshold',
657 coreconfigitem('experimental', 'sparse-read.density-threshold',
658 default=0.50,
658 default=0.50,
659 )
659 )
660 coreconfigitem('experimental', 'sparse-read.min-gap-size',
660 coreconfigitem('experimental', 'sparse-read.min-gap-size',
661 default='65K',
661 default='65K',
662 )
662 )
663 coreconfigitem('experimental', 'treemanifest',
663 coreconfigitem('experimental', 'treemanifest',
664 default=False,
664 default=False,
665 )
665 )
666 coreconfigitem('experimental', 'update.atomic-file',
666 coreconfigitem('experimental', 'update.atomic-file',
667 default=False,
667 default=False,
668 )
668 )
669 coreconfigitem('experimental', 'sshpeer.advertise-v2',
669 coreconfigitem('experimental', 'sshpeer.advertise-v2',
670 default=False,
670 default=False,
671 )
671 )
672 coreconfigitem('experimental', 'web.apiserver',
672 coreconfigitem('experimental', 'web.apiserver',
673 default=False,
673 default=False,
674 )
674 )
675 coreconfigitem('experimental', 'web.api.http-v2',
675 coreconfigitem('experimental', 'web.api.http-v2',
676 default=False,
676 default=False,
677 )
677 )
678 coreconfigitem('experimental', 'web.api.debugreflect',
678 coreconfigitem('experimental', 'web.api.debugreflect',
679 default=False,
679 default=False,
680 )
680 )
681 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
681 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
682 default=False,
682 default=False,
683 )
683 )
684 coreconfigitem('experimental', 'xdiff',
684 coreconfigitem('experimental', 'xdiff',
685 default=False,
685 default=False,
686 )
686 )
687 coreconfigitem('extensions', '.*',
687 coreconfigitem('extensions', '.*',
688 default=None,
688 default=None,
689 generic=True,
689 generic=True,
690 )
690 )
691 coreconfigitem('extdata', '.*',
691 coreconfigitem('extdata', '.*',
692 default=None,
692 default=None,
693 generic=True,
693 generic=True,
694 )
694 )
695 coreconfigitem('format', 'bookmarks-in-store',
695 coreconfigitem('format', 'bookmarks-in-store',
696 default=False,
696 default=False,
697 )
697 )
698 coreconfigitem('format', 'chunkcachesize',
698 coreconfigitem('format', 'chunkcachesize',
699 default=None,
699 default=None,
700 experimental=True,
700 experimental=True,
701 )
701 )
702 coreconfigitem('format', 'dotencode',
702 coreconfigitem('format', 'dotencode',
703 default=True,
703 default=True,
704 )
704 )
705 coreconfigitem('format', 'generaldelta',
705 coreconfigitem('format', 'generaldelta',
706 default=False,
706 default=False,
707 experimental=True,
707 experimental=True,
708 )
708 )
709 coreconfigitem('format', 'manifestcachesize',
709 coreconfigitem('format', 'manifestcachesize',
710 default=None,
710 default=None,
711 experimental=True,
711 experimental=True,
712 )
712 )
713 coreconfigitem('format', 'maxchainlen',
713 coreconfigitem('format', 'maxchainlen',
714 default=dynamicdefault,
714 default=dynamicdefault,
715 experimental=True,
715 experimental=True,
716 )
716 )
717 coreconfigitem('format', 'obsstore-version',
717 coreconfigitem('format', 'obsstore-version',
718 default=None,
718 default=None,
719 )
719 )
720 coreconfigitem('format', 'sparse-revlog',
720 coreconfigitem('format', 'sparse-revlog',
721 default=True,
721 default=True,
722 )
722 )
723 coreconfigitem('format', 'revlog-compression',
723 coreconfigitem('format', 'revlog-compression',
724 default='zlib',
724 default='zlib',
725 alias=[('experimental', 'format.compression')]
725 alias=[('experimental', 'format.compression')]
726 )
726 )
727 coreconfigitem('format', 'usefncache',
727 coreconfigitem('format', 'usefncache',
728 default=True,
728 default=True,
729 )
729 )
730 coreconfigitem('format', 'usegeneraldelta',
730 coreconfigitem('format', 'usegeneraldelta',
731 default=True,
731 default=True,
732 )
732 )
733 coreconfigitem('format', 'usestore',
733 coreconfigitem('format', 'usestore',
734 default=True,
734 default=True,
735 )
735 )
736 coreconfigitem('format', 'use-side-data',
737 default=False,
738 experimental=True,
739 )
736 coreconfigitem('format', 'internal-phase',
740 coreconfigitem('format', 'internal-phase',
737 default=False,
741 default=False,
738 experimental=True,
742 experimental=True,
739 )
743 )
740 coreconfigitem('fsmonitor', 'warn_when_unused',
744 coreconfigitem('fsmonitor', 'warn_when_unused',
741 default=True,
745 default=True,
742 )
746 )
743 coreconfigitem('fsmonitor', 'warn_update_file_count',
747 coreconfigitem('fsmonitor', 'warn_update_file_count',
744 default=50000,
748 default=50000,
745 )
749 )
746 coreconfigitem('help', br'hidden-command\..*',
750 coreconfigitem('help', br'hidden-command\..*',
747 default=False,
751 default=False,
748 generic=True,
752 generic=True,
749 )
753 )
750 coreconfigitem('help', br'hidden-topic\..*',
754 coreconfigitem('help', br'hidden-topic\..*',
751 default=False,
755 default=False,
752 generic=True,
756 generic=True,
753 )
757 )
754 coreconfigitem('hooks', '.*',
758 coreconfigitem('hooks', '.*',
755 default=dynamicdefault,
759 default=dynamicdefault,
756 generic=True,
760 generic=True,
757 )
761 )
758 coreconfigitem('hgweb-paths', '.*',
762 coreconfigitem('hgweb-paths', '.*',
759 default=list,
763 default=list,
760 generic=True,
764 generic=True,
761 )
765 )
762 coreconfigitem('hostfingerprints', '.*',
766 coreconfigitem('hostfingerprints', '.*',
763 default=list,
767 default=list,
764 generic=True,
768 generic=True,
765 )
769 )
766 coreconfigitem('hostsecurity', 'ciphers',
770 coreconfigitem('hostsecurity', 'ciphers',
767 default=None,
771 default=None,
768 )
772 )
769 coreconfigitem('hostsecurity', 'disabletls10warning',
773 coreconfigitem('hostsecurity', 'disabletls10warning',
770 default=False,
774 default=False,
771 )
775 )
772 coreconfigitem('hostsecurity', 'minimumprotocol',
776 coreconfigitem('hostsecurity', 'minimumprotocol',
773 default=dynamicdefault,
777 default=dynamicdefault,
774 )
778 )
775 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
779 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
776 default=dynamicdefault,
780 default=dynamicdefault,
777 generic=True,
781 generic=True,
778 )
782 )
779 coreconfigitem('hostsecurity', '.*:ciphers$',
783 coreconfigitem('hostsecurity', '.*:ciphers$',
780 default=dynamicdefault,
784 default=dynamicdefault,
781 generic=True,
785 generic=True,
782 )
786 )
783 coreconfigitem('hostsecurity', '.*:fingerprints$',
787 coreconfigitem('hostsecurity', '.*:fingerprints$',
784 default=list,
788 default=list,
785 generic=True,
789 generic=True,
786 )
790 )
787 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
791 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
788 default=None,
792 default=None,
789 generic=True,
793 generic=True,
790 )
794 )
791
795
792 coreconfigitem('http_proxy', 'always',
796 coreconfigitem('http_proxy', 'always',
793 default=False,
797 default=False,
794 )
798 )
795 coreconfigitem('http_proxy', 'host',
799 coreconfigitem('http_proxy', 'host',
796 default=None,
800 default=None,
797 )
801 )
798 coreconfigitem('http_proxy', 'no',
802 coreconfigitem('http_proxy', 'no',
799 default=list,
803 default=list,
800 )
804 )
801 coreconfigitem('http_proxy', 'passwd',
805 coreconfigitem('http_proxy', 'passwd',
802 default=None,
806 default=None,
803 )
807 )
804 coreconfigitem('http_proxy', 'user',
808 coreconfigitem('http_proxy', 'user',
805 default=None,
809 default=None,
806 )
810 )
807
811
808 coreconfigitem('http', 'timeout',
812 coreconfigitem('http', 'timeout',
809 default=None,
813 default=None,
810 )
814 )
811
815
812 coreconfigitem('logtoprocess', 'commandexception',
816 coreconfigitem('logtoprocess', 'commandexception',
813 default=None,
817 default=None,
814 )
818 )
815 coreconfigitem('logtoprocess', 'commandfinish',
819 coreconfigitem('logtoprocess', 'commandfinish',
816 default=None,
820 default=None,
817 )
821 )
818 coreconfigitem('logtoprocess', 'command',
822 coreconfigitem('logtoprocess', 'command',
819 default=None,
823 default=None,
820 )
824 )
821 coreconfigitem('logtoprocess', 'develwarn',
825 coreconfigitem('logtoprocess', 'develwarn',
822 default=None,
826 default=None,
823 )
827 )
824 coreconfigitem('logtoprocess', 'uiblocked',
828 coreconfigitem('logtoprocess', 'uiblocked',
825 default=None,
829 default=None,
826 )
830 )
827 coreconfigitem('merge', 'checkunknown',
831 coreconfigitem('merge', 'checkunknown',
828 default='abort',
832 default='abort',
829 )
833 )
830 coreconfigitem('merge', 'checkignored',
834 coreconfigitem('merge', 'checkignored',
831 default='abort',
835 default='abort',
832 )
836 )
833 coreconfigitem('experimental', 'merge.checkpathconflicts',
837 coreconfigitem('experimental', 'merge.checkpathconflicts',
834 default=False,
838 default=False,
835 )
839 )
836 coreconfigitem('merge', 'followcopies',
840 coreconfigitem('merge', 'followcopies',
837 default=True,
841 default=True,
838 )
842 )
839 coreconfigitem('merge', 'on-failure',
843 coreconfigitem('merge', 'on-failure',
840 default='continue',
844 default='continue',
841 )
845 )
842 coreconfigitem('merge', 'preferancestor',
846 coreconfigitem('merge', 'preferancestor',
843 default=lambda: ['*'],
847 default=lambda: ['*'],
844 experimental=True,
848 experimental=True,
845 )
849 )
846 coreconfigitem('merge', 'strict-capability-check',
850 coreconfigitem('merge', 'strict-capability-check',
847 default=False,
851 default=False,
848 )
852 )
849 coreconfigitem('merge-tools', '.*',
853 coreconfigitem('merge-tools', '.*',
850 default=None,
854 default=None,
851 generic=True,
855 generic=True,
852 )
856 )
853 coreconfigitem('merge-tools', br'.*\.args$',
857 coreconfigitem('merge-tools', br'.*\.args$',
854 default="$local $base $other",
858 default="$local $base $other",
855 generic=True,
859 generic=True,
856 priority=-1,
860 priority=-1,
857 )
861 )
858 coreconfigitem('merge-tools', br'.*\.binary$',
862 coreconfigitem('merge-tools', br'.*\.binary$',
859 default=False,
863 default=False,
860 generic=True,
864 generic=True,
861 priority=-1,
865 priority=-1,
862 )
866 )
863 coreconfigitem('merge-tools', br'.*\.check$',
867 coreconfigitem('merge-tools', br'.*\.check$',
864 default=list,
868 default=list,
865 generic=True,
869 generic=True,
866 priority=-1,
870 priority=-1,
867 )
871 )
868 coreconfigitem('merge-tools', br'.*\.checkchanged$',
872 coreconfigitem('merge-tools', br'.*\.checkchanged$',
869 default=False,
873 default=False,
870 generic=True,
874 generic=True,
871 priority=-1,
875 priority=-1,
872 )
876 )
873 coreconfigitem('merge-tools', br'.*\.executable$',
877 coreconfigitem('merge-tools', br'.*\.executable$',
874 default=dynamicdefault,
878 default=dynamicdefault,
875 generic=True,
879 generic=True,
876 priority=-1,
880 priority=-1,
877 )
881 )
878 coreconfigitem('merge-tools', br'.*\.fixeol$',
882 coreconfigitem('merge-tools', br'.*\.fixeol$',
879 default=False,
883 default=False,
880 generic=True,
884 generic=True,
881 priority=-1,
885 priority=-1,
882 )
886 )
883 coreconfigitem('merge-tools', br'.*\.gui$',
887 coreconfigitem('merge-tools', br'.*\.gui$',
884 default=False,
888 default=False,
885 generic=True,
889 generic=True,
886 priority=-1,
890 priority=-1,
887 )
891 )
888 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
892 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
889 default='basic',
893 default='basic',
890 generic=True,
894 generic=True,
891 priority=-1,
895 priority=-1,
892 )
896 )
893 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
897 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
894 default=dynamicdefault, # take from ui.mergemarkertemplate
898 default=dynamicdefault, # take from ui.mergemarkertemplate
895 generic=True,
899 generic=True,
896 priority=-1,
900 priority=-1,
897 )
901 )
898 coreconfigitem('merge-tools', br'.*\.priority$',
902 coreconfigitem('merge-tools', br'.*\.priority$',
899 default=0,
903 default=0,
900 generic=True,
904 generic=True,
901 priority=-1,
905 priority=-1,
902 )
906 )
903 coreconfigitem('merge-tools', br'.*\.premerge$',
907 coreconfigitem('merge-tools', br'.*\.premerge$',
904 default=dynamicdefault,
908 default=dynamicdefault,
905 generic=True,
909 generic=True,
906 priority=-1,
910 priority=-1,
907 )
911 )
908 coreconfigitem('merge-tools', br'.*\.symlink$',
912 coreconfigitem('merge-tools', br'.*\.symlink$',
909 default=False,
913 default=False,
910 generic=True,
914 generic=True,
911 priority=-1,
915 priority=-1,
912 )
916 )
913 coreconfigitem('pager', 'attend-.*',
917 coreconfigitem('pager', 'attend-.*',
914 default=dynamicdefault,
918 default=dynamicdefault,
915 generic=True,
919 generic=True,
916 )
920 )
917 coreconfigitem('pager', 'ignore',
921 coreconfigitem('pager', 'ignore',
918 default=list,
922 default=list,
919 )
923 )
920 coreconfigitem('pager', 'pager',
924 coreconfigitem('pager', 'pager',
921 default=dynamicdefault,
925 default=dynamicdefault,
922 )
926 )
923 coreconfigitem('patch', 'eol',
927 coreconfigitem('patch', 'eol',
924 default='strict',
928 default='strict',
925 )
929 )
926 coreconfigitem('patch', 'fuzz',
930 coreconfigitem('patch', 'fuzz',
927 default=2,
931 default=2,
928 )
932 )
929 coreconfigitem('paths', 'default',
933 coreconfigitem('paths', 'default',
930 default=None,
934 default=None,
931 )
935 )
932 coreconfigitem('paths', 'default-push',
936 coreconfigitem('paths', 'default-push',
933 default=None,
937 default=None,
934 )
938 )
935 coreconfigitem('paths', '.*',
939 coreconfigitem('paths', '.*',
936 default=None,
940 default=None,
937 generic=True,
941 generic=True,
938 )
942 )
939 coreconfigitem('phases', 'checksubrepos',
943 coreconfigitem('phases', 'checksubrepos',
940 default='follow',
944 default='follow',
941 )
945 )
942 coreconfigitem('phases', 'new-commit',
946 coreconfigitem('phases', 'new-commit',
943 default='draft',
947 default='draft',
944 )
948 )
945 coreconfigitem('phases', 'publish',
949 coreconfigitem('phases', 'publish',
946 default=True,
950 default=True,
947 )
951 )
948 coreconfigitem('profiling', 'enabled',
952 coreconfigitem('profiling', 'enabled',
949 default=False,
953 default=False,
950 )
954 )
951 coreconfigitem('profiling', 'format',
955 coreconfigitem('profiling', 'format',
952 default='text',
956 default='text',
953 )
957 )
954 coreconfigitem('profiling', 'freq',
958 coreconfigitem('profiling', 'freq',
955 default=1000,
959 default=1000,
956 )
960 )
957 coreconfigitem('profiling', 'limit',
961 coreconfigitem('profiling', 'limit',
958 default=30,
962 default=30,
959 )
963 )
960 coreconfigitem('profiling', 'nested',
964 coreconfigitem('profiling', 'nested',
961 default=0,
965 default=0,
962 )
966 )
963 coreconfigitem('profiling', 'output',
967 coreconfigitem('profiling', 'output',
964 default=None,
968 default=None,
965 )
969 )
966 coreconfigitem('profiling', 'showmax',
970 coreconfigitem('profiling', 'showmax',
967 default=0.999,
971 default=0.999,
968 )
972 )
969 coreconfigitem('profiling', 'showmin',
973 coreconfigitem('profiling', 'showmin',
970 default=dynamicdefault,
974 default=dynamicdefault,
971 )
975 )
972 coreconfigitem('profiling', 'showtime',
976 coreconfigitem('profiling', 'showtime',
973 default=True,
977 default=True,
974 )
978 )
975 coreconfigitem('profiling', 'sort',
979 coreconfigitem('profiling', 'sort',
976 default='inlinetime',
980 default='inlinetime',
977 )
981 )
978 coreconfigitem('profiling', 'statformat',
982 coreconfigitem('profiling', 'statformat',
979 default='hotpath',
983 default='hotpath',
980 )
984 )
981 coreconfigitem('profiling', 'time-track',
985 coreconfigitem('profiling', 'time-track',
982 default=dynamicdefault,
986 default=dynamicdefault,
983 )
987 )
984 coreconfigitem('profiling', 'type',
988 coreconfigitem('profiling', 'type',
985 default='stat',
989 default='stat',
986 )
990 )
987 coreconfigitem('progress', 'assume-tty',
991 coreconfigitem('progress', 'assume-tty',
988 default=False,
992 default=False,
989 )
993 )
990 coreconfigitem('progress', 'changedelay',
994 coreconfigitem('progress', 'changedelay',
991 default=1,
995 default=1,
992 )
996 )
993 coreconfigitem('progress', 'clear-complete',
997 coreconfigitem('progress', 'clear-complete',
994 default=True,
998 default=True,
995 )
999 )
996 coreconfigitem('progress', 'debug',
1000 coreconfigitem('progress', 'debug',
997 default=False,
1001 default=False,
998 )
1002 )
999 coreconfigitem('progress', 'delay',
1003 coreconfigitem('progress', 'delay',
1000 default=3,
1004 default=3,
1001 )
1005 )
1002 coreconfigitem('progress', 'disable',
1006 coreconfigitem('progress', 'disable',
1003 default=False,
1007 default=False,
1004 )
1008 )
1005 coreconfigitem('progress', 'estimateinterval',
1009 coreconfigitem('progress', 'estimateinterval',
1006 default=60.0,
1010 default=60.0,
1007 )
1011 )
1008 coreconfigitem('progress', 'format',
1012 coreconfigitem('progress', 'format',
1009 default=lambda: ['topic', 'bar', 'number', 'estimate'],
1013 default=lambda: ['topic', 'bar', 'number', 'estimate'],
1010 )
1014 )
1011 coreconfigitem('progress', 'refresh',
1015 coreconfigitem('progress', 'refresh',
1012 default=0.1,
1016 default=0.1,
1013 )
1017 )
1014 coreconfigitem('progress', 'width',
1018 coreconfigitem('progress', 'width',
1015 default=dynamicdefault,
1019 default=dynamicdefault,
1016 )
1020 )
1017 coreconfigitem('push', 'pushvars.server',
1021 coreconfigitem('push', 'pushvars.server',
1018 default=False,
1022 default=False,
1019 )
1023 )
1020 coreconfigitem('rewrite', 'backup-bundle',
1024 coreconfigitem('rewrite', 'backup-bundle',
1021 default=True,
1025 default=True,
1022 alias=[('ui', 'history-editing-backup')],
1026 alias=[('ui', 'history-editing-backup')],
1023 )
1027 )
1024 coreconfigitem('rewrite', 'update-timestamp',
1028 coreconfigitem('rewrite', 'update-timestamp',
1025 default=False,
1029 default=False,
1026 )
1030 )
1027 coreconfigitem('storage', 'new-repo-backend',
1031 coreconfigitem('storage', 'new-repo-backend',
1028 default='revlogv1',
1032 default='revlogv1',
1029 experimental=True,
1033 experimental=True,
1030 )
1034 )
1031 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
1035 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
1032 default=True,
1036 default=True,
1033 alias=[('format', 'aggressivemergedeltas')],
1037 alias=[('format', 'aggressivemergedeltas')],
1034 )
1038 )
1035 coreconfigitem('storage', 'revlog.reuse-external-delta',
1039 coreconfigitem('storage', 'revlog.reuse-external-delta',
1036 default=True,
1040 default=True,
1037 )
1041 )
1038 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
1042 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
1039 default=None,
1043 default=None,
1040 )
1044 )
1041 coreconfigitem('storage', 'revlog.zlib.level',
1045 coreconfigitem('storage', 'revlog.zlib.level',
1042 default=None,
1046 default=None,
1043 )
1047 )
1044 coreconfigitem('storage', 'revlog.zstd.level',
1048 coreconfigitem('storage', 'revlog.zstd.level',
1045 default=None,
1049 default=None,
1046 )
1050 )
1047 coreconfigitem('server', 'bookmarks-pushkey-compat',
1051 coreconfigitem('server', 'bookmarks-pushkey-compat',
1048 default=True,
1052 default=True,
1049 )
1053 )
1050 coreconfigitem('server', 'bundle1',
1054 coreconfigitem('server', 'bundle1',
1051 default=True,
1055 default=True,
1052 )
1056 )
1053 coreconfigitem('server', 'bundle1gd',
1057 coreconfigitem('server', 'bundle1gd',
1054 default=None,
1058 default=None,
1055 )
1059 )
1056 coreconfigitem('server', 'bundle1.pull',
1060 coreconfigitem('server', 'bundle1.pull',
1057 default=None,
1061 default=None,
1058 )
1062 )
1059 coreconfigitem('server', 'bundle1gd.pull',
1063 coreconfigitem('server', 'bundle1gd.pull',
1060 default=None,
1064 default=None,
1061 )
1065 )
1062 coreconfigitem('server', 'bundle1.push',
1066 coreconfigitem('server', 'bundle1.push',
1063 default=None,
1067 default=None,
1064 )
1068 )
1065 coreconfigitem('server', 'bundle1gd.push',
1069 coreconfigitem('server', 'bundle1gd.push',
1066 default=None,
1070 default=None,
1067 )
1071 )
1068 coreconfigitem('server', 'bundle2.stream',
1072 coreconfigitem('server', 'bundle2.stream',
1069 default=True,
1073 default=True,
1070 alias=[('experimental', 'bundle2.stream')]
1074 alias=[('experimental', 'bundle2.stream')]
1071 )
1075 )
1072 coreconfigitem('server', 'compressionengines',
1076 coreconfigitem('server', 'compressionengines',
1073 default=list,
1077 default=list,
1074 )
1078 )
1075 coreconfigitem('server', 'concurrent-push-mode',
1079 coreconfigitem('server', 'concurrent-push-mode',
1076 default='strict',
1080 default='strict',
1077 )
1081 )
1078 coreconfigitem('server', 'disablefullbundle',
1082 coreconfigitem('server', 'disablefullbundle',
1079 default=False,
1083 default=False,
1080 )
1084 )
1081 coreconfigitem('server', 'maxhttpheaderlen',
1085 coreconfigitem('server', 'maxhttpheaderlen',
1082 default=1024,
1086 default=1024,
1083 )
1087 )
1084 coreconfigitem('server', 'pullbundle',
1088 coreconfigitem('server', 'pullbundle',
1085 default=False,
1089 default=False,
1086 )
1090 )
1087 coreconfigitem('server', 'preferuncompressed',
1091 coreconfigitem('server', 'preferuncompressed',
1088 default=False,
1092 default=False,
1089 )
1093 )
1090 coreconfigitem('server', 'streamunbundle',
1094 coreconfigitem('server', 'streamunbundle',
1091 default=False,
1095 default=False,
1092 )
1096 )
1093 coreconfigitem('server', 'uncompressed',
1097 coreconfigitem('server', 'uncompressed',
1094 default=True,
1098 default=True,
1095 )
1099 )
1096 coreconfigitem('server', 'uncompressedallowsecret',
1100 coreconfigitem('server', 'uncompressedallowsecret',
1097 default=False,
1101 default=False,
1098 )
1102 )
1099 coreconfigitem('server', 'view',
1103 coreconfigitem('server', 'view',
1100 default='served',
1104 default='served',
1101 )
1105 )
1102 coreconfigitem('server', 'validate',
1106 coreconfigitem('server', 'validate',
1103 default=False,
1107 default=False,
1104 )
1108 )
1105 coreconfigitem('server', 'zliblevel',
1109 coreconfigitem('server', 'zliblevel',
1106 default=-1,
1110 default=-1,
1107 )
1111 )
1108 coreconfigitem('server', 'zstdlevel',
1112 coreconfigitem('server', 'zstdlevel',
1109 default=3,
1113 default=3,
1110 )
1114 )
1111 coreconfigitem('share', 'pool',
1115 coreconfigitem('share', 'pool',
1112 default=None,
1116 default=None,
1113 )
1117 )
1114 coreconfigitem('share', 'poolnaming',
1118 coreconfigitem('share', 'poolnaming',
1115 default='identity',
1119 default='identity',
1116 )
1120 )
1117 coreconfigitem('shelve','maxbackups',
1121 coreconfigitem('shelve','maxbackups',
1118 default=10,
1122 default=10,
1119 )
1123 )
1120 coreconfigitem('smtp', 'host',
1124 coreconfigitem('smtp', 'host',
1121 default=None,
1125 default=None,
1122 )
1126 )
1123 coreconfigitem('smtp', 'local_hostname',
1127 coreconfigitem('smtp', 'local_hostname',
1124 default=None,
1128 default=None,
1125 )
1129 )
1126 coreconfigitem('smtp', 'password',
1130 coreconfigitem('smtp', 'password',
1127 default=None,
1131 default=None,
1128 )
1132 )
1129 coreconfigitem('smtp', 'port',
1133 coreconfigitem('smtp', 'port',
1130 default=dynamicdefault,
1134 default=dynamicdefault,
1131 )
1135 )
1132 coreconfigitem('smtp', 'tls',
1136 coreconfigitem('smtp', 'tls',
1133 default='none',
1137 default='none',
1134 )
1138 )
1135 coreconfigitem('smtp', 'username',
1139 coreconfigitem('smtp', 'username',
1136 default=None,
1140 default=None,
1137 )
1141 )
1138 coreconfigitem('sparse', 'missingwarning',
1142 coreconfigitem('sparse', 'missingwarning',
1139 default=True,
1143 default=True,
1140 experimental=True,
1144 experimental=True,
1141 )
1145 )
1142 coreconfigitem('subrepos', 'allowed',
1146 coreconfigitem('subrepos', 'allowed',
1143 default=dynamicdefault, # to make backporting simpler
1147 default=dynamicdefault, # to make backporting simpler
1144 )
1148 )
1145 coreconfigitem('subrepos', 'hg:allowed',
1149 coreconfigitem('subrepos', 'hg:allowed',
1146 default=dynamicdefault,
1150 default=dynamicdefault,
1147 )
1151 )
1148 coreconfigitem('subrepos', 'git:allowed',
1152 coreconfigitem('subrepos', 'git:allowed',
1149 default=dynamicdefault,
1153 default=dynamicdefault,
1150 )
1154 )
1151 coreconfigitem('subrepos', 'svn:allowed',
1155 coreconfigitem('subrepos', 'svn:allowed',
1152 default=dynamicdefault,
1156 default=dynamicdefault,
1153 )
1157 )
1154 coreconfigitem('templates', '.*',
1158 coreconfigitem('templates', '.*',
1155 default=None,
1159 default=None,
1156 generic=True,
1160 generic=True,
1157 )
1161 )
1158 coreconfigitem('templateconfig', '.*',
1162 coreconfigitem('templateconfig', '.*',
1159 default=dynamicdefault,
1163 default=dynamicdefault,
1160 generic=True,
1164 generic=True,
1161 )
1165 )
1162 coreconfigitem('trusted', 'groups',
1166 coreconfigitem('trusted', 'groups',
1163 default=list,
1167 default=list,
1164 )
1168 )
1165 coreconfigitem('trusted', 'users',
1169 coreconfigitem('trusted', 'users',
1166 default=list,
1170 default=list,
1167 )
1171 )
1168 coreconfigitem('ui', '_usedassubrepo',
1172 coreconfigitem('ui', '_usedassubrepo',
1169 default=False,
1173 default=False,
1170 )
1174 )
1171 coreconfigitem('ui', 'allowemptycommit',
1175 coreconfigitem('ui', 'allowemptycommit',
1172 default=False,
1176 default=False,
1173 )
1177 )
1174 coreconfigitem('ui', 'archivemeta',
1178 coreconfigitem('ui', 'archivemeta',
1175 default=True,
1179 default=True,
1176 )
1180 )
1177 coreconfigitem('ui', 'askusername',
1181 coreconfigitem('ui', 'askusername',
1178 default=False,
1182 default=False,
1179 )
1183 )
1180 coreconfigitem('ui', 'clonebundlefallback',
1184 coreconfigitem('ui', 'clonebundlefallback',
1181 default=False,
1185 default=False,
1182 )
1186 )
1183 coreconfigitem('ui', 'clonebundleprefers',
1187 coreconfigitem('ui', 'clonebundleprefers',
1184 default=list,
1188 default=list,
1185 )
1189 )
1186 coreconfigitem('ui', 'clonebundles',
1190 coreconfigitem('ui', 'clonebundles',
1187 default=True,
1191 default=True,
1188 )
1192 )
1189 coreconfigitem('ui', 'color',
1193 coreconfigitem('ui', 'color',
1190 default='auto',
1194 default='auto',
1191 )
1195 )
1192 coreconfigitem('ui', 'commitsubrepos',
1196 coreconfigitem('ui', 'commitsubrepos',
1193 default=False,
1197 default=False,
1194 )
1198 )
1195 coreconfigitem('ui', 'debug',
1199 coreconfigitem('ui', 'debug',
1196 default=False,
1200 default=False,
1197 )
1201 )
1198 coreconfigitem('ui', 'debugger',
1202 coreconfigitem('ui', 'debugger',
1199 default=None,
1203 default=None,
1200 )
1204 )
1201 coreconfigitem('ui', 'editor',
1205 coreconfigitem('ui', 'editor',
1202 default=dynamicdefault,
1206 default=dynamicdefault,
1203 )
1207 )
1204 coreconfigitem('ui', 'fallbackencoding',
1208 coreconfigitem('ui', 'fallbackencoding',
1205 default=None,
1209 default=None,
1206 )
1210 )
1207 coreconfigitem('ui', 'forcecwd',
1211 coreconfigitem('ui', 'forcecwd',
1208 default=None,
1212 default=None,
1209 )
1213 )
1210 coreconfigitem('ui', 'forcemerge',
1214 coreconfigitem('ui', 'forcemerge',
1211 default=None,
1215 default=None,
1212 )
1216 )
1213 coreconfigitem('ui', 'formatdebug',
1217 coreconfigitem('ui', 'formatdebug',
1214 default=False,
1218 default=False,
1215 )
1219 )
1216 coreconfigitem('ui', 'formatjson',
1220 coreconfigitem('ui', 'formatjson',
1217 default=False,
1221 default=False,
1218 )
1222 )
1219 coreconfigitem('ui', 'formatted',
1223 coreconfigitem('ui', 'formatted',
1220 default=None,
1224 default=None,
1221 )
1225 )
1222 coreconfigitem('ui', 'graphnodetemplate',
1226 coreconfigitem('ui', 'graphnodetemplate',
1223 default=None,
1227 default=None,
1224 )
1228 )
1225 coreconfigitem('ui', 'interactive',
1229 coreconfigitem('ui', 'interactive',
1226 default=None,
1230 default=None,
1227 )
1231 )
1228 coreconfigitem('ui', 'interface',
1232 coreconfigitem('ui', 'interface',
1229 default=None,
1233 default=None,
1230 )
1234 )
1231 coreconfigitem('ui', 'interface.chunkselector',
1235 coreconfigitem('ui', 'interface.chunkselector',
1232 default=None,
1236 default=None,
1233 )
1237 )
1234 coreconfigitem('ui', 'large-file-limit',
1238 coreconfigitem('ui', 'large-file-limit',
1235 default=10000000,
1239 default=10000000,
1236 )
1240 )
1237 coreconfigitem('ui', 'logblockedtimes',
1241 coreconfigitem('ui', 'logblockedtimes',
1238 default=False,
1242 default=False,
1239 )
1243 )
1240 coreconfigitem('ui', 'logtemplate',
1244 coreconfigitem('ui', 'logtemplate',
1241 default=None,
1245 default=None,
1242 )
1246 )
1243 coreconfigitem('ui', 'merge',
1247 coreconfigitem('ui', 'merge',
1244 default=None,
1248 default=None,
1245 )
1249 )
1246 coreconfigitem('ui', 'mergemarkers',
1250 coreconfigitem('ui', 'mergemarkers',
1247 default='basic',
1251 default='basic',
1248 )
1252 )
1249 coreconfigitem('ui', 'mergemarkertemplate',
1253 coreconfigitem('ui', 'mergemarkertemplate',
1250 default=('{node|short} '
1254 default=('{node|short} '
1251 '{ifeq(tags, "tip", "", '
1255 '{ifeq(tags, "tip", "", '
1252 'ifeq(tags, "", "", "{tags} "))}'
1256 'ifeq(tags, "", "", "{tags} "))}'
1253 '{if(bookmarks, "{bookmarks} ")}'
1257 '{if(bookmarks, "{bookmarks} ")}'
1254 '{ifeq(branch, "default", "", "{branch} ")}'
1258 '{ifeq(branch, "default", "", "{branch} ")}'
1255 '- {author|user}: {desc|firstline}')
1259 '- {author|user}: {desc|firstline}')
1256 )
1260 )
1257 coreconfigitem('ui', 'message-output',
1261 coreconfigitem('ui', 'message-output',
1258 default='stdio',
1262 default='stdio',
1259 )
1263 )
1260 coreconfigitem('ui', 'nontty',
1264 coreconfigitem('ui', 'nontty',
1261 default=False,
1265 default=False,
1262 )
1266 )
1263 coreconfigitem('ui', 'origbackuppath',
1267 coreconfigitem('ui', 'origbackuppath',
1264 default=None,
1268 default=None,
1265 )
1269 )
1266 coreconfigitem('ui', 'paginate',
1270 coreconfigitem('ui', 'paginate',
1267 default=True,
1271 default=True,
1268 )
1272 )
1269 coreconfigitem('ui', 'patch',
1273 coreconfigitem('ui', 'patch',
1270 default=None,
1274 default=None,
1271 )
1275 )
1272 coreconfigitem('ui', 'pre-merge-tool-output-template',
1276 coreconfigitem('ui', 'pre-merge-tool-output-template',
1273 default=None,
1277 default=None,
1274 )
1278 )
1275 coreconfigitem('ui', 'portablefilenames',
1279 coreconfigitem('ui', 'portablefilenames',
1276 default='warn',
1280 default='warn',
1277 )
1281 )
1278 coreconfigitem('ui', 'promptecho',
1282 coreconfigitem('ui', 'promptecho',
1279 default=False,
1283 default=False,
1280 )
1284 )
1281 coreconfigitem('ui', 'quiet',
1285 coreconfigitem('ui', 'quiet',
1282 default=False,
1286 default=False,
1283 )
1287 )
1284 coreconfigitem('ui', 'quietbookmarkmove',
1288 coreconfigitem('ui', 'quietbookmarkmove',
1285 default=False,
1289 default=False,
1286 )
1290 )
1287 coreconfigitem('ui', 'relative-paths',
1291 coreconfigitem('ui', 'relative-paths',
1288 default='legacy',
1292 default='legacy',
1289 )
1293 )
1290 coreconfigitem('ui', 'remotecmd',
1294 coreconfigitem('ui', 'remotecmd',
1291 default='hg',
1295 default='hg',
1292 )
1296 )
1293 coreconfigitem('ui', 'report_untrusted',
1297 coreconfigitem('ui', 'report_untrusted',
1294 default=True,
1298 default=True,
1295 )
1299 )
1296 coreconfigitem('ui', 'rollback',
1300 coreconfigitem('ui', 'rollback',
1297 default=True,
1301 default=True,
1298 )
1302 )
1299 coreconfigitem('ui', 'signal-safe-lock',
1303 coreconfigitem('ui', 'signal-safe-lock',
1300 default=True,
1304 default=True,
1301 )
1305 )
1302 coreconfigitem('ui', 'slash',
1306 coreconfigitem('ui', 'slash',
1303 default=False,
1307 default=False,
1304 )
1308 )
1305 coreconfigitem('ui', 'ssh',
1309 coreconfigitem('ui', 'ssh',
1306 default='ssh',
1310 default='ssh',
1307 )
1311 )
1308 coreconfigitem('ui', 'ssherrorhint',
1312 coreconfigitem('ui', 'ssherrorhint',
1309 default=None,
1313 default=None,
1310 )
1314 )
1311 coreconfigitem('ui', 'statuscopies',
1315 coreconfigitem('ui', 'statuscopies',
1312 default=False,
1316 default=False,
1313 )
1317 )
1314 coreconfigitem('ui', 'strict',
1318 coreconfigitem('ui', 'strict',
1315 default=False,
1319 default=False,
1316 )
1320 )
1317 coreconfigitem('ui', 'style',
1321 coreconfigitem('ui', 'style',
1318 default='',
1322 default='',
1319 )
1323 )
1320 coreconfigitem('ui', 'supportcontact',
1324 coreconfigitem('ui', 'supportcontact',
1321 default=None,
1325 default=None,
1322 )
1326 )
1323 coreconfigitem('ui', 'textwidth',
1327 coreconfigitem('ui', 'textwidth',
1324 default=78,
1328 default=78,
1325 )
1329 )
1326 coreconfigitem('ui', 'timeout',
1330 coreconfigitem('ui', 'timeout',
1327 default='600',
1331 default='600',
1328 )
1332 )
1329 coreconfigitem('ui', 'timeout.warn',
1333 coreconfigitem('ui', 'timeout.warn',
1330 default=0,
1334 default=0,
1331 )
1335 )
1332 coreconfigitem('ui', 'traceback',
1336 coreconfigitem('ui', 'traceback',
1333 default=False,
1337 default=False,
1334 )
1338 )
1335 coreconfigitem('ui', 'tweakdefaults',
1339 coreconfigitem('ui', 'tweakdefaults',
1336 default=False,
1340 default=False,
1337 )
1341 )
1338 coreconfigitem('ui', 'username',
1342 coreconfigitem('ui', 'username',
1339 alias=[('ui', 'user')]
1343 alias=[('ui', 'user')]
1340 )
1344 )
1341 coreconfigitem('ui', 'verbose',
1345 coreconfigitem('ui', 'verbose',
1342 default=False,
1346 default=False,
1343 )
1347 )
1344 coreconfigitem('verify', 'skipflags',
1348 coreconfigitem('verify', 'skipflags',
1345 default=None,
1349 default=None,
1346 )
1350 )
1347 coreconfigitem('web', 'allowbz2',
1351 coreconfigitem('web', 'allowbz2',
1348 default=False,
1352 default=False,
1349 )
1353 )
1350 coreconfigitem('web', 'allowgz',
1354 coreconfigitem('web', 'allowgz',
1351 default=False,
1355 default=False,
1352 )
1356 )
1353 coreconfigitem('web', 'allow-pull',
1357 coreconfigitem('web', 'allow-pull',
1354 alias=[('web', 'allowpull')],
1358 alias=[('web', 'allowpull')],
1355 default=True,
1359 default=True,
1356 )
1360 )
1357 coreconfigitem('web', 'allow-push',
1361 coreconfigitem('web', 'allow-push',
1358 alias=[('web', 'allow_push')],
1362 alias=[('web', 'allow_push')],
1359 default=list,
1363 default=list,
1360 )
1364 )
1361 coreconfigitem('web', 'allowzip',
1365 coreconfigitem('web', 'allowzip',
1362 default=False,
1366 default=False,
1363 )
1367 )
1364 coreconfigitem('web', 'archivesubrepos',
1368 coreconfigitem('web', 'archivesubrepos',
1365 default=False,
1369 default=False,
1366 )
1370 )
1367 coreconfigitem('web', 'cache',
1371 coreconfigitem('web', 'cache',
1368 default=True,
1372 default=True,
1369 )
1373 )
1370 coreconfigitem('web', 'comparisoncontext',
1374 coreconfigitem('web', 'comparisoncontext',
1371 default=5,
1375 default=5,
1372 )
1376 )
1373 coreconfigitem('web', 'contact',
1377 coreconfigitem('web', 'contact',
1374 default=None,
1378 default=None,
1375 )
1379 )
1376 coreconfigitem('web', 'deny_push',
1380 coreconfigitem('web', 'deny_push',
1377 default=list,
1381 default=list,
1378 )
1382 )
1379 coreconfigitem('web', 'guessmime',
1383 coreconfigitem('web', 'guessmime',
1380 default=False,
1384 default=False,
1381 )
1385 )
1382 coreconfigitem('web', 'hidden',
1386 coreconfigitem('web', 'hidden',
1383 default=False,
1387 default=False,
1384 )
1388 )
1385 coreconfigitem('web', 'labels',
1389 coreconfigitem('web', 'labels',
1386 default=list,
1390 default=list,
1387 )
1391 )
1388 coreconfigitem('web', 'logoimg',
1392 coreconfigitem('web', 'logoimg',
1389 default='hglogo.png',
1393 default='hglogo.png',
1390 )
1394 )
1391 coreconfigitem('web', 'logourl',
1395 coreconfigitem('web', 'logourl',
1392 default='https://mercurial-scm.org/',
1396 default='https://mercurial-scm.org/',
1393 )
1397 )
1394 coreconfigitem('web', 'accesslog',
1398 coreconfigitem('web', 'accesslog',
1395 default='-',
1399 default='-',
1396 )
1400 )
1397 coreconfigitem('web', 'address',
1401 coreconfigitem('web', 'address',
1398 default='',
1402 default='',
1399 )
1403 )
1400 coreconfigitem('web', 'allow-archive',
1404 coreconfigitem('web', 'allow-archive',
1401 alias=[('web', 'allow_archive')],
1405 alias=[('web', 'allow_archive')],
1402 default=list,
1406 default=list,
1403 )
1407 )
1404 coreconfigitem('web', 'allow_read',
1408 coreconfigitem('web', 'allow_read',
1405 default=list,
1409 default=list,
1406 )
1410 )
1407 coreconfigitem('web', 'baseurl',
1411 coreconfigitem('web', 'baseurl',
1408 default=None,
1412 default=None,
1409 )
1413 )
1410 coreconfigitem('web', 'cacerts',
1414 coreconfigitem('web', 'cacerts',
1411 default=None,
1415 default=None,
1412 )
1416 )
1413 coreconfigitem('web', 'certificate',
1417 coreconfigitem('web', 'certificate',
1414 default=None,
1418 default=None,
1415 )
1419 )
1416 coreconfigitem('web', 'collapse',
1420 coreconfigitem('web', 'collapse',
1417 default=False,
1421 default=False,
1418 )
1422 )
1419 coreconfigitem('web', 'csp',
1423 coreconfigitem('web', 'csp',
1420 default=None,
1424 default=None,
1421 )
1425 )
1422 coreconfigitem('web', 'deny_read',
1426 coreconfigitem('web', 'deny_read',
1423 default=list,
1427 default=list,
1424 )
1428 )
1425 coreconfigitem('web', 'descend',
1429 coreconfigitem('web', 'descend',
1426 default=True,
1430 default=True,
1427 )
1431 )
1428 coreconfigitem('web', 'description',
1432 coreconfigitem('web', 'description',
1429 default="",
1433 default="",
1430 )
1434 )
1431 coreconfigitem('web', 'encoding',
1435 coreconfigitem('web', 'encoding',
1432 default=lambda: encoding.encoding,
1436 default=lambda: encoding.encoding,
1433 )
1437 )
1434 coreconfigitem('web', 'errorlog',
1438 coreconfigitem('web', 'errorlog',
1435 default='-',
1439 default='-',
1436 )
1440 )
1437 coreconfigitem('web', 'ipv6',
1441 coreconfigitem('web', 'ipv6',
1438 default=False,
1442 default=False,
1439 )
1443 )
1440 coreconfigitem('web', 'maxchanges',
1444 coreconfigitem('web', 'maxchanges',
1441 default=10,
1445 default=10,
1442 )
1446 )
1443 coreconfigitem('web', 'maxfiles',
1447 coreconfigitem('web', 'maxfiles',
1444 default=10,
1448 default=10,
1445 )
1449 )
1446 coreconfigitem('web', 'maxshortchanges',
1450 coreconfigitem('web', 'maxshortchanges',
1447 default=60,
1451 default=60,
1448 )
1452 )
1449 coreconfigitem('web', 'motd',
1453 coreconfigitem('web', 'motd',
1450 default='',
1454 default='',
1451 )
1455 )
1452 coreconfigitem('web', 'name',
1456 coreconfigitem('web', 'name',
1453 default=dynamicdefault,
1457 default=dynamicdefault,
1454 )
1458 )
1455 coreconfigitem('web', 'port',
1459 coreconfigitem('web', 'port',
1456 default=8000,
1460 default=8000,
1457 )
1461 )
1458 coreconfigitem('web', 'prefix',
1462 coreconfigitem('web', 'prefix',
1459 default='',
1463 default='',
1460 )
1464 )
1461 coreconfigitem('web', 'push_ssl',
1465 coreconfigitem('web', 'push_ssl',
1462 default=True,
1466 default=True,
1463 )
1467 )
1464 coreconfigitem('web', 'refreshinterval',
1468 coreconfigitem('web', 'refreshinterval',
1465 default=20,
1469 default=20,
1466 )
1470 )
1467 coreconfigitem('web', 'server-header',
1471 coreconfigitem('web', 'server-header',
1468 default=None,
1472 default=None,
1469 )
1473 )
1470 coreconfigitem('web', 'static',
1474 coreconfigitem('web', 'static',
1471 default=None,
1475 default=None,
1472 )
1476 )
1473 coreconfigitem('web', 'staticurl',
1477 coreconfigitem('web', 'staticurl',
1474 default=None,
1478 default=None,
1475 )
1479 )
1476 coreconfigitem('web', 'stripes',
1480 coreconfigitem('web', 'stripes',
1477 default=1,
1481 default=1,
1478 )
1482 )
1479 coreconfigitem('web', 'style',
1483 coreconfigitem('web', 'style',
1480 default='paper',
1484 default='paper',
1481 )
1485 )
1482 coreconfigitem('web', 'templates',
1486 coreconfigitem('web', 'templates',
1483 default=None,
1487 default=None,
1484 )
1488 )
1485 coreconfigitem('web', 'view',
1489 coreconfigitem('web', 'view',
1486 default='served',
1490 default='served',
1487 experimental=True,
1491 experimental=True,
1488 )
1492 )
1489 coreconfigitem('worker', 'backgroundclose',
1493 coreconfigitem('worker', 'backgroundclose',
1490 default=dynamicdefault,
1494 default=dynamicdefault,
1491 )
1495 )
1492 # Windows defaults to a limit of 512 open files. A buffer of 128
1496 # Windows defaults to a limit of 512 open files. A buffer of 128
1493 # should give us enough headway.
1497 # should give us enough headway.
1494 coreconfigitem('worker', 'backgroundclosemaxqueue',
1498 coreconfigitem('worker', 'backgroundclosemaxqueue',
1495 default=384,
1499 default=384,
1496 )
1500 )
1497 coreconfigitem('worker', 'backgroundcloseminfilecount',
1501 coreconfigitem('worker', 'backgroundcloseminfilecount',
1498 default=2048,
1502 default=2048,
1499 )
1503 )
1500 coreconfigitem('worker', 'backgroundclosethreadcount',
1504 coreconfigitem('worker', 'backgroundclosethreadcount',
1501 default=4,
1505 default=4,
1502 )
1506 )
1503 coreconfigitem('worker', 'enabled',
1507 coreconfigitem('worker', 'enabled',
1504 default=True,
1508 default=True,
1505 )
1509 )
1506 coreconfigitem('worker', 'numcpus',
1510 coreconfigitem('worker', 'numcpus',
1507 default=None,
1511 default=None,
1508 )
1512 )
1509
1513
1510 # Rebase related configuration moved to core because other extension are doing
1514 # Rebase related configuration moved to core because other extension are doing
1511 # strange things. For example, shelve import the extensions to reuse some bit
1515 # strange things. For example, shelve import the extensions to reuse some bit
1512 # without formally loading it.
1516 # without formally loading it.
1513 coreconfigitem('commands', 'rebase.requiredest',
1517 coreconfigitem('commands', 'rebase.requiredest',
1514 default=False,
1518 default=False,
1515 )
1519 )
1516 coreconfigitem('experimental', 'rebaseskipobsolete',
1520 coreconfigitem('experimental', 'rebaseskipobsolete',
1517 default=True,
1521 default=True,
1518 )
1522 )
1519 coreconfigitem('rebase', 'singletransaction',
1523 coreconfigitem('rebase', 'singletransaction',
1520 default=False,
1524 default=False,
1521 )
1525 )
1522 coreconfigitem('rebase', 'experimental.inmemory',
1526 coreconfigitem('rebase', 'experimental.inmemory',
1523 default=False,
1527 default=False,
1524 )
1528 )
@@ -1,3323 +1,3335 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 discovery,
35 discovery,
36 encoding,
36 encoding,
37 error,
37 error,
38 exchange,
38 exchange,
39 extensions,
39 extensions,
40 filelog,
40 filelog,
41 hook,
41 hook,
42 lock as lockmod,
42 lock as lockmod,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 sparse,
57 sparse,
58 store as storemod,
58 store as storemod,
59 subrepoutil,
59 subrepoutil,
60 tags as tagsmod,
60 tags as tagsmod,
61 transaction,
61 transaction,
62 txnutil,
62 txnutil,
63 util,
63 util,
64 vfs as vfsmod,
64 vfs as vfsmod,
65 )
65 )
66
66
67 from .interfaces import (
67 from .interfaces import (
68 repository,
68 repository,
69 util as interfaceutil,
69 util as interfaceutil,
70 )
70 )
71
71
72 from .utils import (
72 from .utils import (
73 procutil,
73 procutil,
74 stringutil,
74 stringutil,
75 )
75 )
76
76
77 from .revlogutils import (
77 from .revlogutils import (
78 constants as revlogconst,
78 constants as revlogconst,
79 )
79 )
80
80
81 release = lockmod.release
81 release = lockmod.release
82 urlerr = util.urlerr
82 urlerr = util.urlerr
83 urlreq = util.urlreq
83 urlreq = util.urlreq
84
84
85 # set of (path, vfs-location) tuples. vfs-location is:
85 # set of (path, vfs-location) tuples. vfs-location is:
86 # - 'plain for vfs relative paths
86 # - 'plain for vfs relative paths
87 # - '' for svfs relative paths
87 # - '' for svfs relative paths
88 _cachedfiles = set()
88 _cachedfiles = set()
89
89
90 class _basefilecache(scmutil.filecache):
90 class _basefilecache(scmutil.filecache):
91 """All filecache usage on repo are done for logic that should be unfiltered
91 """All filecache usage on repo are done for logic that should be unfiltered
92 """
92 """
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 if repo is None:
94 if repo is None:
95 return self
95 return self
96 # proxy to unfiltered __dict__ since filtered repo has no entry
96 # proxy to unfiltered __dict__ since filtered repo has no entry
97 unfi = repo.unfiltered()
97 unfi = repo.unfiltered()
98 try:
98 try:
99 return unfi.__dict__[self.sname]
99 return unfi.__dict__[self.sname]
100 except KeyError:
100 except KeyError:
101 pass
101 pass
102 return super(_basefilecache, self).__get__(unfi, type)
102 return super(_basefilecache, self).__get__(unfi, type)
103
103
104 def set(self, repo, value):
104 def set(self, repo, value):
105 return super(_basefilecache, self).set(repo.unfiltered(), value)
105 return super(_basefilecache, self).set(repo.unfiltered(), value)
106
106
107 class repofilecache(_basefilecache):
107 class repofilecache(_basefilecache):
108 """filecache for files in .hg but outside of .hg/store"""
108 """filecache for files in .hg but outside of .hg/store"""
109 def __init__(self, *paths):
109 def __init__(self, *paths):
110 super(repofilecache, self).__init__(*paths)
110 super(repofilecache, self).__init__(*paths)
111 for path in paths:
111 for path in paths:
112 _cachedfiles.add((path, 'plain'))
112 _cachedfiles.add((path, 'plain'))
113
113
114 def join(self, obj, fname):
114 def join(self, obj, fname):
115 return obj.vfs.join(fname)
115 return obj.vfs.join(fname)
116
116
117 class storecache(_basefilecache):
117 class storecache(_basefilecache):
118 """filecache for files in the store"""
118 """filecache for files in the store"""
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(storecache, self).__init__(*paths)
120 super(storecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, ''))
122 _cachedfiles.add((path, ''))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.sjoin(fname)
125 return obj.sjoin(fname)
126
126
127 class mixedrepostorecache(_basefilecache):
127 class mixedrepostorecache(_basefilecache):
128 """filecache for a mix files in .hg/store and outside"""
128 """filecache for a mix files in .hg/store and outside"""
129 def __init__(self, *pathsandlocations):
129 def __init__(self, *pathsandlocations):
130 # scmutil.filecache only uses the path for passing back into our
130 # scmutil.filecache only uses the path for passing back into our
131 # join(), so we can safely pass a list of paths and locations
131 # join(), so we can safely pass a list of paths and locations
132 super(mixedrepostorecache, self).__init__(*pathsandlocations)
132 super(mixedrepostorecache, self).__init__(*pathsandlocations)
133 _cachedfiles.update(pathsandlocations)
133 _cachedfiles.update(pathsandlocations)
134
134
135 def join(self, obj, fnameandlocation):
135 def join(self, obj, fnameandlocation):
136 fname, location = fnameandlocation
136 fname, location = fnameandlocation
137 if location == 'plain':
137 if location == 'plain':
138 return obj.vfs.join(fname)
138 return obj.vfs.join(fname)
139 else:
139 else:
140 if location != '':
140 if location != '':
141 raise error.ProgrammingError('unexpected location: %s' %
141 raise error.ProgrammingError('unexpected location: %s' %
142 location)
142 location)
143 return obj.sjoin(fname)
143 return obj.sjoin(fname)
144
144
145 def isfilecached(repo, name):
145 def isfilecached(repo, name):
146 """check if a repo has already cached "name" filecache-ed property
146 """check if a repo has already cached "name" filecache-ed property
147
147
148 This returns (cachedobj-or-None, iscached) tuple.
148 This returns (cachedobj-or-None, iscached) tuple.
149 """
149 """
150 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 cacheentry = repo.unfiltered()._filecache.get(name, None)
151 if not cacheentry:
151 if not cacheentry:
152 return None, False
152 return None, False
153 return cacheentry.obj, True
153 return cacheentry.obj, True
154
154
155 class unfilteredpropertycache(util.propertycache):
155 class unfilteredpropertycache(util.propertycache):
156 """propertycache that apply to unfiltered repo only"""
156 """propertycache that apply to unfiltered repo only"""
157
157
158 def __get__(self, repo, type=None):
158 def __get__(self, repo, type=None):
159 unfi = repo.unfiltered()
159 unfi = repo.unfiltered()
160 if unfi is repo:
160 if unfi is repo:
161 return super(unfilteredpropertycache, self).__get__(unfi)
161 return super(unfilteredpropertycache, self).__get__(unfi)
162 return getattr(unfi, self.name)
162 return getattr(unfi, self.name)
163
163
164 class filteredpropertycache(util.propertycache):
164 class filteredpropertycache(util.propertycache):
165 """propertycache that must take filtering in account"""
165 """propertycache that must take filtering in account"""
166
166
167 def cachevalue(self, obj, value):
167 def cachevalue(self, obj, value):
168 object.__setattr__(obj, self.name, value)
168 object.__setattr__(obj, self.name, value)
169
169
170
170
171 def hasunfilteredcache(repo, name):
171 def hasunfilteredcache(repo, name):
172 """check if a repo has an unfilteredpropertycache value for <name>"""
172 """check if a repo has an unfilteredpropertycache value for <name>"""
173 return name in vars(repo.unfiltered())
173 return name in vars(repo.unfiltered())
174
174
175 def unfilteredmethod(orig):
175 def unfilteredmethod(orig):
176 """decorate method that always need to be run on unfiltered version"""
176 """decorate method that always need to be run on unfiltered version"""
177 def wrapper(repo, *args, **kwargs):
177 def wrapper(repo, *args, **kwargs):
178 return orig(repo.unfiltered(), *args, **kwargs)
178 return orig(repo.unfiltered(), *args, **kwargs)
179 return wrapper
179 return wrapper
180
180
181 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
182 'unbundle'}
182 'unbundle'}
183 legacycaps = moderncaps.union({'changegroupsubset'})
183 legacycaps = moderncaps.union({'changegroupsubset'})
184
184
185 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 @interfaceutil.implementer(repository.ipeercommandexecutor)
186 class localcommandexecutor(object):
186 class localcommandexecutor(object):
187 def __init__(self, peer):
187 def __init__(self, peer):
188 self._peer = peer
188 self._peer = peer
189 self._sent = False
189 self._sent = False
190 self._closed = False
190 self._closed = False
191
191
192 def __enter__(self):
192 def __enter__(self):
193 return self
193 return self
194
194
195 def __exit__(self, exctype, excvalue, exctb):
195 def __exit__(self, exctype, excvalue, exctb):
196 self.close()
196 self.close()
197
197
198 def callcommand(self, command, args):
198 def callcommand(self, command, args):
199 if self._sent:
199 if self._sent:
200 raise error.ProgrammingError('callcommand() cannot be used after '
200 raise error.ProgrammingError('callcommand() cannot be used after '
201 'sendcommands()')
201 'sendcommands()')
202
202
203 if self._closed:
203 if self._closed:
204 raise error.ProgrammingError('callcommand() cannot be used after '
204 raise error.ProgrammingError('callcommand() cannot be used after '
205 'close()')
205 'close()')
206
206
207 # We don't need to support anything fancy. Just call the named
207 # We don't need to support anything fancy. Just call the named
208 # method on the peer and return a resolved future.
208 # method on the peer and return a resolved future.
209 fn = getattr(self._peer, pycompat.sysstr(command))
209 fn = getattr(self._peer, pycompat.sysstr(command))
210
210
211 f = pycompat.futures.Future()
211 f = pycompat.futures.Future()
212
212
213 try:
213 try:
214 result = fn(**pycompat.strkwargs(args))
214 result = fn(**pycompat.strkwargs(args))
215 except Exception:
215 except Exception:
216 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
217 else:
217 else:
218 f.set_result(result)
218 f.set_result(result)
219
219
220 return f
220 return f
221
221
222 def sendcommands(self):
222 def sendcommands(self):
223 self._sent = True
223 self._sent = True
224
224
225 def close(self):
225 def close(self):
226 self._closed = True
226 self._closed = True
227
227
228 @interfaceutil.implementer(repository.ipeercommands)
228 @interfaceutil.implementer(repository.ipeercommands)
229 class localpeer(repository.peer):
229 class localpeer(repository.peer):
230 '''peer for a local repo; reflects only the most recent API'''
230 '''peer for a local repo; reflects only the most recent API'''
231
231
232 def __init__(self, repo, caps=None):
232 def __init__(self, repo, caps=None):
233 super(localpeer, self).__init__()
233 super(localpeer, self).__init__()
234
234
235 if caps is None:
235 if caps is None:
236 caps = moderncaps.copy()
236 caps = moderncaps.copy()
237 self._repo = repo.filtered('served')
237 self._repo = repo.filtered('served')
238 self.ui = repo.ui
238 self.ui = repo.ui
239 self._caps = repo._restrictcapabilities(caps)
239 self._caps = repo._restrictcapabilities(caps)
240
240
241 # Begin of _basepeer interface.
241 # Begin of _basepeer interface.
242
242
243 def url(self):
243 def url(self):
244 return self._repo.url()
244 return self._repo.url()
245
245
246 def local(self):
246 def local(self):
247 return self._repo
247 return self._repo
248
248
249 def peer(self):
249 def peer(self):
250 return self
250 return self
251
251
252 def canpush(self):
252 def canpush(self):
253 return True
253 return True
254
254
255 def close(self):
255 def close(self):
256 self._repo.close()
256 self._repo.close()
257
257
258 # End of _basepeer interface.
258 # End of _basepeer interface.
259
259
260 # Begin of _basewirecommands interface.
260 # Begin of _basewirecommands interface.
261
261
262 def branchmap(self):
262 def branchmap(self):
263 return self._repo.branchmap()
263 return self._repo.branchmap()
264
264
265 def capabilities(self):
265 def capabilities(self):
266 return self._caps
266 return self._caps
267
267
268 def clonebundles(self):
268 def clonebundles(self):
269 return self._repo.tryread('clonebundles.manifest')
269 return self._repo.tryread('clonebundles.manifest')
270
270
271 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 def debugwireargs(self, one, two, three=None, four=None, five=None):
272 """Used to test argument passing over the wire"""
272 """Used to test argument passing over the wire"""
273 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
274 pycompat.bytestr(four),
274 pycompat.bytestr(four),
275 pycompat.bytestr(five))
275 pycompat.bytestr(five))
276
276
277 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
278 **kwargs):
278 **kwargs):
279 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
280 common=common, bundlecaps=bundlecaps,
280 common=common, bundlecaps=bundlecaps,
281 **kwargs)[1]
281 **kwargs)[1]
282 cb = util.chunkbuffer(chunks)
282 cb = util.chunkbuffer(chunks)
283
283
284 if exchange.bundle2requested(bundlecaps):
284 if exchange.bundle2requested(bundlecaps):
285 # When requesting a bundle2, getbundle returns a stream to make the
285 # When requesting a bundle2, getbundle returns a stream to make the
286 # wire level function happier. We need to build a proper object
286 # wire level function happier. We need to build a proper object
287 # from it in local peer.
287 # from it in local peer.
288 return bundle2.getunbundler(self.ui, cb)
288 return bundle2.getunbundler(self.ui, cb)
289 else:
289 else:
290 return changegroup.getunbundler('01', cb, None)
290 return changegroup.getunbundler('01', cb, None)
291
291
292 def heads(self):
292 def heads(self):
293 return self._repo.heads()
293 return self._repo.heads()
294
294
295 def known(self, nodes):
295 def known(self, nodes):
296 return self._repo.known(nodes)
296 return self._repo.known(nodes)
297
297
298 def listkeys(self, namespace):
298 def listkeys(self, namespace):
299 return self._repo.listkeys(namespace)
299 return self._repo.listkeys(namespace)
300
300
301 def lookup(self, key):
301 def lookup(self, key):
302 return self._repo.lookup(key)
302 return self._repo.lookup(key)
303
303
304 def pushkey(self, namespace, key, old, new):
304 def pushkey(self, namespace, key, old, new):
305 return self._repo.pushkey(namespace, key, old, new)
305 return self._repo.pushkey(namespace, key, old, new)
306
306
307 def stream_out(self):
307 def stream_out(self):
308 raise error.Abort(_('cannot perform stream clone against local '
308 raise error.Abort(_('cannot perform stream clone against local '
309 'peer'))
309 'peer'))
310
310
311 def unbundle(self, bundle, heads, url):
311 def unbundle(self, bundle, heads, url):
312 """apply a bundle on a repo
312 """apply a bundle on a repo
313
313
314 This function handles the repo locking itself."""
314 This function handles the repo locking itself."""
315 try:
315 try:
316 try:
316 try:
317 bundle = exchange.readbundle(self.ui, bundle, None)
317 bundle = exchange.readbundle(self.ui, bundle, None)
318 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
319 if util.safehasattr(ret, 'getchunks'):
319 if util.safehasattr(ret, 'getchunks'):
320 # This is a bundle20 object, turn it into an unbundler.
320 # This is a bundle20 object, turn it into an unbundler.
321 # This little dance should be dropped eventually when the
321 # This little dance should be dropped eventually when the
322 # API is finally improved.
322 # API is finally improved.
323 stream = util.chunkbuffer(ret.getchunks())
323 stream = util.chunkbuffer(ret.getchunks())
324 ret = bundle2.getunbundler(self.ui, stream)
324 ret = bundle2.getunbundler(self.ui, stream)
325 return ret
325 return ret
326 except Exception as exc:
326 except Exception as exc:
327 # If the exception contains output salvaged from a bundle2
327 # If the exception contains output salvaged from a bundle2
328 # reply, we need to make sure it is printed before continuing
328 # reply, we need to make sure it is printed before continuing
329 # to fail. So we build a bundle2 with such output and consume
329 # to fail. So we build a bundle2 with such output and consume
330 # it directly.
330 # it directly.
331 #
331 #
332 # This is not very elegant but allows a "simple" solution for
332 # This is not very elegant but allows a "simple" solution for
333 # issue4594
333 # issue4594
334 output = getattr(exc, '_bundle2salvagedoutput', ())
334 output = getattr(exc, '_bundle2salvagedoutput', ())
335 if output:
335 if output:
336 bundler = bundle2.bundle20(self._repo.ui)
336 bundler = bundle2.bundle20(self._repo.ui)
337 for out in output:
337 for out in output:
338 bundler.addpart(out)
338 bundler.addpart(out)
339 stream = util.chunkbuffer(bundler.getchunks())
339 stream = util.chunkbuffer(bundler.getchunks())
340 b = bundle2.getunbundler(self.ui, stream)
340 b = bundle2.getunbundler(self.ui, stream)
341 bundle2.processbundle(self._repo, b)
341 bundle2.processbundle(self._repo, b)
342 raise
342 raise
343 except error.PushRaced as exc:
343 except error.PushRaced as exc:
344 raise error.ResponseError(_('push failed:'),
344 raise error.ResponseError(_('push failed:'),
345 stringutil.forcebytestr(exc))
345 stringutil.forcebytestr(exc))
346
346
347 # End of _basewirecommands interface.
347 # End of _basewirecommands interface.
348
348
349 # Begin of peer interface.
349 # Begin of peer interface.
350
350
351 def commandexecutor(self):
351 def commandexecutor(self):
352 return localcommandexecutor(self)
352 return localcommandexecutor(self)
353
353
354 # End of peer interface.
354 # End of peer interface.
355
355
356 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 @interfaceutil.implementer(repository.ipeerlegacycommands)
357 class locallegacypeer(localpeer):
357 class locallegacypeer(localpeer):
358 '''peer extension which implements legacy methods too; used for tests with
358 '''peer extension which implements legacy methods too; used for tests with
359 restricted capabilities'''
359 restricted capabilities'''
360
360
361 def __init__(self, repo):
361 def __init__(self, repo):
362 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
363
363
364 # Begin of baselegacywirecommands interface.
364 # Begin of baselegacywirecommands interface.
365
365
366 def between(self, pairs):
366 def between(self, pairs):
367 return self._repo.between(pairs)
367 return self._repo.between(pairs)
368
368
369 def branches(self, nodes):
369 def branches(self, nodes):
370 return self._repo.branches(nodes)
370 return self._repo.branches(nodes)
371
371
372 def changegroup(self, nodes, source):
372 def changegroup(self, nodes, source):
373 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
374 missingheads=self._repo.heads())
374 missingheads=self._repo.heads())
375 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
376
376
377 def changegroupsubset(self, bases, heads, source):
377 def changegroupsubset(self, bases, heads, source):
378 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 outgoing = discovery.outgoing(self._repo, missingroots=bases,
379 missingheads=heads)
379 missingheads=heads)
380 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
381
381
382 # End of baselegacywirecommands interface.
382 # End of baselegacywirecommands interface.
383
383
384 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # Increment the sub-version when the revlog v2 format changes to lock out old
385 # clients.
385 # clients.
386 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
387
387
388 # A repository with the sparserevlog feature will have delta chains that
388 # A repository with the sparserevlog feature will have delta chains that
389 # can spread over a larger span. Sparse reading cuts these large spans into
389 # can spread over a larger span. Sparse reading cuts these large spans into
390 # pieces, so that each piece isn't too big.
390 # pieces, so that each piece isn't too big.
391 # Without the sparserevlog capability, reading from the repository could use
391 # Without the sparserevlog capability, reading from the repository could use
392 # huge amounts of memory, because the whole span would be read at once,
392 # huge amounts of memory, because the whole span would be read at once,
393 # including all the intermediate revisions that aren't pertinent for the chain.
393 # including all the intermediate revisions that aren't pertinent for the chain.
394 # This is why once a repository has enabled sparse-read, it becomes required.
394 # This is why once a repository has enabled sparse-read, it becomes required.
395 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
396
396
397 # A repository with the sidedataflag requirement will allow to store extra
398 # information for revision without altering their original hashes.
399 SIDEDATA_REQUIREMENT = 'exp-sidedata-flag'
400
397 # Functions receiving (ui, features) that extensions can register to impact
401 # Functions receiving (ui, features) that extensions can register to impact
398 # the ability to load repositories with custom requirements. Only
402 # the ability to load repositories with custom requirements. Only
399 # functions defined in loaded extensions are called.
403 # functions defined in loaded extensions are called.
400 #
404 #
401 # The function receives a set of requirement strings that the repository
405 # The function receives a set of requirement strings that the repository
402 # is capable of opening. Functions will typically add elements to the
406 # is capable of opening. Functions will typically add elements to the
403 # set to reflect that the extension knows how to handle that requirements.
407 # set to reflect that the extension knows how to handle that requirements.
404 featuresetupfuncs = set()
408 featuresetupfuncs = set()
405
409
406 def makelocalrepository(baseui, path, intents=None):
410 def makelocalrepository(baseui, path, intents=None):
407 """Create a local repository object.
411 """Create a local repository object.
408
412
409 Given arguments needed to construct a local repository, this function
413 Given arguments needed to construct a local repository, this function
410 performs various early repository loading functionality (such as
414 performs various early repository loading functionality (such as
411 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
415 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
412 the repository can be opened, derives a type suitable for representing
416 the repository can be opened, derives a type suitable for representing
413 that repository, and returns an instance of it.
417 that repository, and returns an instance of it.
414
418
415 The returned object conforms to the ``repository.completelocalrepository``
419 The returned object conforms to the ``repository.completelocalrepository``
416 interface.
420 interface.
417
421
418 The repository type is derived by calling a series of factory functions
422 The repository type is derived by calling a series of factory functions
419 for each aspect/interface of the final repository. These are defined by
423 for each aspect/interface of the final repository. These are defined by
420 ``REPO_INTERFACES``.
424 ``REPO_INTERFACES``.
421
425
422 Each factory function is called to produce a type implementing a specific
426 Each factory function is called to produce a type implementing a specific
423 interface. The cumulative list of returned types will be combined into a
427 interface. The cumulative list of returned types will be combined into a
424 new type and that type will be instantiated to represent the local
428 new type and that type will be instantiated to represent the local
425 repository.
429 repository.
426
430
427 The factory functions each receive various state that may be consulted
431 The factory functions each receive various state that may be consulted
428 as part of deriving a type.
432 as part of deriving a type.
429
433
430 Extensions should wrap these factory functions to customize repository type
434 Extensions should wrap these factory functions to customize repository type
431 creation. Note that an extension's wrapped function may be called even if
435 creation. Note that an extension's wrapped function may be called even if
432 that extension is not loaded for the repo being constructed. Extensions
436 that extension is not loaded for the repo being constructed. Extensions
433 should check if their ``__name__`` appears in the
437 should check if their ``__name__`` appears in the
434 ``extensionmodulenames`` set passed to the factory function and no-op if
438 ``extensionmodulenames`` set passed to the factory function and no-op if
435 not.
439 not.
436 """
440 """
437 ui = baseui.copy()
441 ui = baseui.copy()
438 # Prevent copying repo configuration.
442 # Prevent copying repo configuration.
439 ui.copy = baseui.copy
443 ui.copy = baseui.copy
440
444
441 # Working directory VFS rooted at repository root.
445 # Working directory VFS rooted at repository root.
442 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
446 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
443
447
444 # Main VFS for .hg/ directory.
448 # Main VFS for .hg/ directory.
445 hgpath = wdirvfs.join(b'.hg')
449 hgpath = wdirvfs.join(b'.hg')
446 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
450 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
447
451
448 # The .hg/ path should exist and should be a directory. All other
452 # The .hg/ path should exist and should be a directory. All other
449 # cases are errors.
453 # cases are errors.
450 if not hgvfs.isdir():
454 if not hgvfs.isdir():
451 try:
455 try:
452 hgvfs.stat()
456 hgvfs.stat()
453 except OSError as e:
457 except OSError as e:
454 if e.errno != errno.ENOENT:
458 if e.errno != errno.ENOENT:
455 raise
459 raise
456
460
457 raise error.RepoError(_(b'repository %s not found') % path)
461 raise error.RepoError(_(b'repository %s not found') % path)
458
462
459 # .hg/requires file contains a newline-delimited list of
463 # .hg/requires file contains a newline-delimited list of
460 # features/capabilities the opener (us) must have in order to use
464 # features/capabilities the opener (us) must have in order to use
461 # the repository. This file was introduced in Mercurial 0.9.2,
465 # the repository. This file was introduced in Mercurial 0.9.2,
462 # which means very old repositories may not have one. We assume
466 # which means very old repositories may not have one. We assume
463 # a missing file translates to no requirements.
467 # a missing file translates to no requirements.
464 try:
468 try:
465 requirements = set(hgvfs.read(b'requires').splitlines())
469 requirements = set(hgvfs.read(b'requires').splitlines())
466 except IOError as e:
470 except IOError as e:
467 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
468 raise
472 raise
469 requirements = set()
473 requirements = set()
470
474
471 # The .hg/hgrc file may load extensions or contain config options
475 # The .hg/hgrc file may load extensions or contain config options
472 # that influence repository construction. Attempt to load it and
476 # that influence repository construction. Attempt to load it and
473 # process any new extensions that it may have pulled in.
477 # process any new extensions that it may have pulled in.
474 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
478 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
475 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
479 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
476 extensions.loadall(ui)
480 extensions.loadall(ui)
477 extensions.populateui(ui)
481 extensions.populateui(ui)
478
482
479 # Set of module names of extensions loaded for this repository.
483 # Set of module names of extensions loaded for this repository.
480 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
484 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
481
485
482 supportedrequirements = gathersupportedrequirements(ui)
486 supportedrequirements = gathersupportedrequirements(ui)
483
487
484 # We first validate the requirements are known.
488 # We first validate the requirements are known.
485 ensurerequirementsrecognized(requirements, supportedrequirements)
489 ensurerequirementsrecognized(requirements, supportedrequirements)
486
490
487 # Then we validate that the known set is reasonable to use together.
491 # Then we validate that the known set is reasonable to use together.
488 ensurerequirementscompatible(ui, requirements)
492 ensurerequirementscompatible(ui, requirements)
489
493
490 # TODO there are unhandled edge cases related to opening repositories with
494 # TODO there are unhandled edge cases related to opening repositories with
491 # shared storage. If storage is shared, we should also test for requirements
495 # shared storage. If storage is shared, we should also test for requirements
492 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
496 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
493 # that repo, as that repo may load extensions needed to open it. This is a
497 # that repo, as that repo may load extensions needed to open it. This is a
494 # bit complicated because we don't want the other hgrc to overwrite settings
498 # bit complicated because we don't want the other hgrc to overwrite settings
495 # in this hgrc.
499 # in this hgrc.
496 #
500 #
497 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
501 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
498 # file when sharing repos. But if a requirement is added after the share is
502 # file when sharing repos. But if a requirement is added after the share is
499 # performed, thereby introducing a new requirement for the opener, we may
503 # performed, thereby introducing a new requirement for the opener, we may
500 # will not see that and could encounter a run-time error interacting with
504 # will not see that and could encounter a run-time error interacting with
501 # that shared store since it has an unknown-to-us requirement.
505 # that shared store since it has an unknown-to-us requirement.
502
506
503 # At this point, we know we should be capable of opening the repository.
507 # At this point, we know we should be capable of opening the repository.
504 # Now get on with doing that.
508 # Now get on with doing that.
505
509
506 features = set()
510 features = set()
507
511
508 # The "store" part of the repository holds versioned data. How it is
512 # The "store" part of the repository holds versioned data. How it is
509 # accessed is determined by various requirements. The ``shared`` or
513 # accessed is determined by various requirements. The ``shared`` or
510 # ``relshared`` requirements indicate the store lives in the path contained
514 # ``relshared`` requirements indicate the store lives in the path contained
511 # in the ``.hg/sharedpath`` file. This is an absolute path for
515 # in the ``.hg/sharedpath`` file. This is an absolute path for
512 # ``shared`` and relative to ``.hg/`` for ``relshared``.
516 # ``shared`` and relative to ``.hg/`` for ``relshared``.
513 if b'shared' in requirements or b'relshared' in requirements:
517 if b'shared' in requirements or b'relshared' in requirements:
514 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
518 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
515 if b'relshared' in requirements:
519 if b'relshared' in requirements:
516 sharedpath = hgvfs.join(sharedpath)
520 sharedpath = hgvfs.join(sharedpath)
517
521
518 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
522 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
519
523
520 if not sharedvfs.exists():
524 if not sharedvfs.exists():
521 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
525 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
522 b'directory %s') % sharedvfs.base)
526 b'directory %s') % sharedvfs.base)
523
527
524 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
528 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
525
529
526 storebasepath = sharedvfs.base
530 storebasepath = sharedvfs.base
527 cachepath = sharedvfs.join(b'cache')
531 cachepath = sharedvfs.join(b'cache')
528 else:
532 else:
529 storebasepath = hgvfs.base
533 storebasepath = hgvfs.base
530 cachepath = hgvfs.join(b'cache')
534 cachepath = hgvfs.join(b'cache')
531 wcachepath = hgvfs.join(b'wcache')
535 wcachepath = hgvfs.join(b'wcache')
532
536
533
537
534 # The store has changed over time and the exact layout is dictated by
538 # The store has changed over time and the exact layout is dictated by
535 # requirements. The store interface abstracts differences across all
539 # requirements. The store interface abstracts differences across all
536 # of them.
540 # of them.
537 store = makestore(requirements, storebasepath,
541 store = makestore(requirements, storebasepath,
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
542 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 hgvfs.createmode = store.createmode
543 hgvfs.createmode = store.createmode
540
544
541 storevfs = store.vfs
545 storevfs = store.vfs
542 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
546 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
543
547
544 # The cache vfs is used to manage cache files.
548 # The cache vfs is used to manage cache files.
545 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
549 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
546 cachevfs.createmode = store.createmode
550 cachevfs.createmode = store.createmode
547 # The cache vfs is used to manage cache files related to the working copy
551 # The cache vfs is used to manage cache files related to the working copy
548 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
552 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
549 wcachevfs.createmode = store.createmode
553 wcachevfs.createmode = store.createmode
550
554
551 # Now resolve the type for the repository object. We do this by repeatedly
555 # Now resolve the type for the repository object. We do this by repeatedly
552 # calling a factory function to produces types for specific aspects of the
556 # calling a factory function to produces types for specific aspects of the
553 # repo's operation. The aggregate returned types are used as base classes
557 # repo's operation. The aggregate returned types are used as base classes
554 # for a dynamically-derived type, which will represent our new repository.
558 # for a dynamically-derived type, which will represent our new repository.
555
559
556 bases = []
560 bases = []
557 extrastate = {}
561 extrastate = {}
558
562
559 for iface, fn in REPO_INTERFACES:
563 for iface, fn in REPO_INTERFACES:
560 # We pass all potentially useful state to give extensions tons of
564 # We pass all potentially useful state to give extensions tons of
561 # flexibility.
565 # flexibility.
562 typ = fn()(ui=ui,
566 typ = fn()(ui=ui,
563 intents=intents,
567 intents=intents,
564 requirements=requirements,
568 requirements=requirements,
565 features=features,
569 features=features,
566 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
567 hgvfs=hgvfs,
571 hgvfs=hgvfs,
568 store=store,
572 store=store,
569 storevfs=storevfs,
573 storevfs=storevfs,
570 storeoptions=storevfs.options,
574 storeoptions=storevfs.options,
571 cachevfs=cachevfs,
575 cachevfs=cachevfs,
572 wcachevfs=wcachevfs,
576 wcachevfs=wcachevfs,
573 extensionmodulenames=extensionmodulenames,
577 extensionmodulenames=extensionmodulenames,
574 extrastate=extrastate,
578 extrastate=extrastate,
575 baseclasses=bases)
579 baseclasses=bases)
576
580
577 if not isinstance(typ, type):
581 if not isinstance(typ, type):
578 raise error.ProgrammingError('unable to construct type for %s' %
582 raise error.ProgrammingError('unable to construct type for %s' %
579 iface)
583 iface)
580
584
581 bases.append(typ)
585 bases.append(typ)
582
586
583 # type() allows you to use characters in type names that wouldn't be
587 # type() allows you to use characters in type names that wouldn't be
584 # recognized as Python symbols in source code. We abuse that to add
588 # recognized as Python symbols in source code. We abuse that to add
585 # rich information about our constructed repo.
589 # rich information about our constructed repo.
586 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
590 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
587 wdirvfs.base,
591 wdirvfs.base,
588 b','.join(sorted(requirements))))
592 b','.join(sorted(requirements))))
589
593
590 cls = type(name, tuple(bases), {})
594 cls = type(name, tuple(bases), {})
591
595
592 return cls(
596 return cls(
593 baseui=baseui,
597 baseui=baseui,
594 ui=ui,
598 ui=ui,
595 origroot=path,
599 origroot=path,
596 wdirvfs=wdirvfs,
600 wdirvfs=wdirvfs,
597 hgvfs=hgvfs,
601 hgvfs=hgvfs,
598 requirements=requirements,
602 requirements=requirements,
599 supportedrequirements=supportedrequirements,
603 supportedrequirements=supportedrequirements,
600 sharedpath=storebasepath,
604 sharedpath=storebasepath,
601 store=store,
605 store=store,
602 cachevfs=cachevfs,
606 cachevfs=cachevfs,
603 wcachevfs=wcachevfs,
607 wcachevfs=wcachevfs,
604 features=features,
608 features=features,
605 intents=intents)
609 intents=intents)
606
610
607 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
611 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
608 """Load hgrc files/content into a ui instance.
612 """Load hgrc files/content into a ui instance.
609
613
610 This is called during repository opening to load any additional
614 This is called during repository opening to load any additional
611 config files or settings relevant to the current repository.
615 config files or settings relevant to the current repository.
612
616
613 Returns a bool indicating whether any additional configs were loaded.
617 Returns a bool indicating whether any additional configs were loaded.
614
618
615 Extensions should monkeypatch this function to modify how per-repo
619 Extensions should monkeypatch this function to modify how per-repo
616 configs are loaded. For example, an extension may wish to pull in
620 configs are loaded. For example, an extension may wish to pull in
617 configs from alternate files or sources.
621 configs from alternate files or sources.
618 """
622 """
619 try:
623 try:
620 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
624 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
621 return True
625 return True
622 except IOError:
626 except IOError:
623 return False
627 return False
624
628
625 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
629 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
626 """Perform additional actions after .hg/hgrc is loaded.
630 """Perform additional actions after .hg/hgrc is loaded.
627
631
628 This function is called during repository loading immediately after
632 This function is called during repository loading immediately after
629 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
633 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
630
634
631 The function can be used to validate configs, automatically add
635 The function can be used to validate configs, automatically add
632 options (including extensions) based on requirements, etc.
636 options (including extensions) based on requirements, etc.
633 """
637 """
634
638
635 # Map of requirements to list of extensions to load automatically when
639 # Map of requirements to list of extensions to load automatically when
636 # requirement is present.
640 # requirement is present.
637 autoextensions = {
641 autoextensions = {
638 b'largefiles': [b'largefiles'],
642 b'largefiles': [b'largefiles'],
639 b'lfs': [b'lfs'],
643 b'lfs': [b'lfs'],
640 }
644 }
641
645
642 for requirement, names in sorted(autoextensions.items()):
646 for requirement, names in sorted(autoextensions.items()):
643 if requirement not in requirements:
647 if requirement not in requirements:
644 continue
648 continue
645
649
646 for name in names:
650 for name in names:
647 if not ui.hasconfig(b'extensions', name):
651 if not ui.hasconfig(b'extensions', name):
648 ui.setconfig(b'extensions', name, b'', source='autoload')
652 ui.setconfig(b'extensions', name, b'', source='autoload')
649
653
650 def gathersupportedrequirements(ui):
654 def gathersupportedrequirements(ui):
651 """Determine the complete set of recognized requirements."""
655 """Determine the complete set of recognized requirements."""
652 # Start with all requirements supported by this file.
656 # Start with all requirements supported by this file.
653 supported = set(localrepository._basesupported)
657 supported = set(localrepository._basesupported)
654
658
655 # Execute ``featuresetupfuncs`` entries if they belong to an extension
659 # Execute ``featuresetupfuncs`` entries if they belong to an extension
656 # relevant to this ui instance.
660 # relevant to this ui instance.
657 modules = {m.__name__ for n, m in extensions.extensions(ui)}
661 modules = {m.__name__ for n, m in extensions.extensions(ui)}
658
662
659 for fn in featuresetupfuncs:
663 for fn in featuresetupfuncs:
660 if fn.__module__ in modules:
664 if fn.__module__ in modules:
661 fn(ui, supported)
665 fn(ui, supported)
662
666
663 # Add derived requirements from registered compression engines.
667 # Add derived requirements from registered compression engines.
664 for name in util.compengines:
668 for name in util.compengines:
665 engine = util.compengines[name]
669 engine = util.compengines[name]
666 if engine.available() and engine.revlogheader():
670 if engine.available() and engine.revlogheader():
667 supported.add(b'exp-compression-%s' % name)
671 supported.add(b'exp-compression-%s' % name)
668 if engine.name() == 'zstd':
672 if engine.name() == 'zstd':
669 supported.add(b'revlog-compression-zstd')
673 supported.add(b'revlog-compression-zstd')
670
674
671 return supported
675 return supported
672
676
673 def ensurerequirementsrecognized(requirements, supported):
677 def ensurerequirementsrecognized(requirements, supported):
674 """Validate that a set of local requirements is recognized.
678 """Validate that a set of local requirements is recognized.
675
679
676 Receives a set of requirements. Raises an ``error.RepoError`` if there
680 Receives a set of requirements. Raises an ``error.RepoError`` if there
677 exists any requirement in that set that currently loaded code doesn't
681 exists any requirement in that set that currently loaded code doesn't
678 recognize.
682 recognize.
679
683
680 Returns a set of supported requirements.
684 Returns a set of supported requirements.
681 """
685 """
682 missing = set()
686 missing = set()
683
687
684 for requirement in requirements:
688 for requirement in requirements:
685 if requirement in supported:
689 if requirement in supported:
686 continue
690 continue
687
691
688 if not requirement or not requirement[0:1].isalnum():
692 if not requirement or not requirement[0:1].isalnum():
689 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
693 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
690
694
691 missing.add(requirement)
695 missing.add(requirement)
692
696
693 if missing:
697 if missing:
694 raise error.RequirementError(
698 raise error.RequirementError(
695 _(b'repository requires features unknown to this Mercurial: %s') %
699 _(b'repository requires features unknown to this Mercurial: %s') %
696 b' '.join(sorted(missing)),
700 b' '.join(sorted(missing)),
697 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
701 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
698 b'for more information'))
702 b'for more information'))
699
703
700 def ensurerequirementscompatible(ui, requirements):
704 def ensurerequirementscompatible(ui, requirements):
701 """Validates that a set of recognized requirements is mutually compatible.
705 """Validates that a set of recognized requirements is mutually compatible.
702
706
703 Some requirements may not be compatible with others or require
707 Some requirements may not be compatible with others or require
704 config options that aren't enabled. This function is called during
708 config options that aren't enabled. This function is called during
705 repository opening to ensure that the set of requirements needed
709 repository opening to ensure that the set of requirements needed
706 to open a repository is sane and compatible with config options.
710 to open a repository is sane and compatible with config options.
707
711
708 Extensions can monkeypatch this function to perform additional
712 Extensions can monkeypatch this function to perform additional
709 checking.
713 checking.
710
714
711 ``error.RepoError`` should be raised on failure.
715 ``error.RepoError`` should be raised on failure.
712 """
716 """
713 if b'exp-sparse' in requirements and not sparse.enabled:
717 if b'exp-sparse' in requirements and not sparse.enabled:
714 raise error.RepoError(_(b'repository is using sparse feature but '
718 raise error.RepoError(_(b'repository is using sparse feature but '
715 b'sparse is not enabled; enable the '
719 b'sparse is not enabled; enable the '
716 b'"sparse" extensions to access'))
720 b'"sparse" extensions to access'))
717
721
718 def makestore(requirements, path, vfstype):
722 def makestore(requirements, path, vfstype):
719 """Construct a storage object for a repository."""
723 """Construct a storage object for a repository."""
720 if b'store' in requirements:
724 if b'store' in requirements:
721 if b'fncache' in requirements:
725 if b'fncache' in requirements:
722 return storemod.fncachestore(path, vfstype,
726 return storemod.fncachestore(path, vfstype,
723 b'dotencode' in requirements)
727 b'dotencode' in requirements)
724
728
725 return storemod.encodedstore(path, vfstype)
729 return storemod.encodedstore(path, vfstype)
726
730
727 return storemod.basicstore(path, vfstype)
731 return storemod.basicstore(path, vfstype)
728
732
729 def resolvestorevfsoptions(ui, requirements, features):
733 def resolvestorevfsoptions(ui, requirements, features):
730 """Resolve the options to pass to the store vfs opener.
734 """Resolve the options to pass to the store vfs opener.
731
735
732 The returned dict is used to influence behavior of the storage layer.
736 The returned dict is used to influence behavior of the storage layer.
733 """
737 """
734 options = {}
738 options = {}
735
739
736 if b'treemanifest' in requirements:
740 if b'treemanifest' in requirements:
737 options[b'treemanifest'] = True
741 options[b'treemanifest'] = True
738
742
739 # experimental config: format.manifestcachesize
743 # experimental config: format.manifestcachesize
740 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
744 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
741 if manifestcachesize is not None:
745 if manifestcachesize is not None:
742 options[b'manifestcachesize'] = manifestcachesize
746 options[b'manifestcachesize'] = manifestcachesize
743
747
744 # In the absence of another requirement superseding a revlog-related
748 # In the absence of another requirement superseding a revlog-related
745 # requirement, we have to assume the repo is using revlog version 0.
749 # requirement, we have to assume the repo is using revlog version 0.
746 # This revlog format is super old and we don't bother trying to parse
750 # This revlog format is super old and we don't bother trying to parse
747 # opener options for it because those options wouldn't do anything
751 # opener options for it because those options wouldn't do anything
748 # meaningful on such old repos.
752 # meaningful on such old repos.
749 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
753 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
750 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
754 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
751 else: # explicitly mark repo as using revlogv0
755 else: # explicitly mark repo as using revlogv0
752 options['revlogv0'] = True
756 options['revlogv0'] = True
753
757
754 writecopiesto = ui.config('experimental', 'copies.write-to')
758 writecopiesto = ui.config('experimental', 'copies.write-to')
755 copiesextramode = ('changeset-only', 'compatibility')
759 copiesextramode = ('changeset-only', 'compatibility')
756 if (writecopiesto in copiesextramode):
760 if (writecopiesto in copiesextramode):
757 options['copies-storage'] = 'extra'
761 options['copies-storage'] = 'extra'
758
762
759 return options
763 return options
760
764
761 def resolverevlogstorevfsoptions(ui, requirements, features):
765 def resolverevlogstorevfsoptions(ui, requirements, features):
762 """Resolve opener options specific to revlogs."""
766 """Resolve opener options specific to revlogs."""
763
767
764 options = {}
768 options = {}
765 options[b'flagprocessors'] = {}
769 options[b'flagprocessors'] = {}
766
770
767 if b'revlogv1' in requirements:
771 if b'revlogv1' in requirements:
768 options[b'revlogv1'] = True
772 options[b'revlogv1'] = True
769 if REVLOGV2_REQUIREMENT in requirements:
773 if REVLOGV2_REQUIREMENT in requirements:
770 options[b'revlogv2'] = True
774 options[b'revlogv2'] = True
771
775
772 if b'generaldelta' in requirements:
776 if b'generaldelta' in requirements:
773 options[b'generaldelta'] = True
777 options[b'generaldelta'] = True
774
778
775 # experimental config: format.chunkcachesize
779 # experimental config: format.chunkcachesize
776 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
780 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
777 if chunkcachesize is not None:
781 if chunkcachesize is not None:
778 options[b'chunkcachesize'] = chunkcachesize
782 options[b'chunkcachesize'] = chunkcachesize
779
783
780 deltabothparents = ui.configbool(b'storage',
784 deltabothparents = ui.configbool(b'storage',
781 b'revlog.optimize-delta-parent-choice')
785 b'revlog.optimize-delta-parent-choice')
782 options[b'deltabothparents'] = deltabothparents
786 options[b'deltabothparents'] = deltabothparents
783
787
784 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
788 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
785 lazydeltabase = False
789 lazydeltabase = False
786 if lazydelta:
790 if lazydelta:
787 lazydeltabase = ui.configbool(b'storage',
791 lazydeltabase = ui.configbool(b'storage',
788 b'revlog.reuse-external-delta-parent')
792 b'revlog.reuse-external-delta-parent')
789 if lazydeltabase is None:
793 if lazydeltabase is None:
790 lazydeltabase = not scmutil.gddeltaconfig(ui)
794 lazydeltabase = not scmutil.gddeltaconfig(ui)
791 options[b'lazydelta'] = lazydelta
795 options[b'lazydelta'] = lazydelta
792 options[b'lazydeltabase'] = lazydeltabase
796 options[b'lazydeltabase'] = lazydeltabase
793
797
794 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
798 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
795 if 0 <= chainspan:
799 if 0 <= chainspan:
796 options[b'maxdeltachainspan'] = chainspan
800 options[b'maxdeltachainspan'] = chainspan
797
801
798 mmapindexthreshold = ui.configbytes(b'experimental',
802 mmapindexthreshold = ui.configbytes(b'experimental',
799 b'mmapindexthreshold')
803 b'mmapindexthreshold')
800 if mmapindexthreshold is not None:
804 if mmapindexthreshold is not None:
801 options[b'mmapindexthreshold'] = mmapindexthreshold
805 options[b'mmapindexthreshold'] = mmapindexthreshold
802
806
803 withsparseread = ui.configbool(b'experimental', b'sparse-read')
807 withsparseread = ui.configbool(b'experimental', b'sparse-read')
804 srdensitythres = float(ui.config(b'experimental',
808 srdensitythres = float(ui.config(b'experimental',
805 b'sparse-read.density-threshold'))
809 b'sparse-read.density-threshold'))
806 srmingapsize = ui.configbytes(b'experimental',
810 srmingapsize = ui.configbytes(b'experimental',
807 b'sparse-read.min-gap-size')
811 b'sparse-read.min-gap-size')
808 options[b'with-sparse-read'] = withsparseread
812 options[b'with-sparse-read'] = withsparseread
809 options[b'sparse-read-density-threshold'] = srdensitythres
813 options[b'sparse-read-density-threshold'] = srdensitythres
810 options[b'sparse-read-min-gap-size'] = srmingapsize
814 options[b'sparse-read-min-gap-size'] = srmingapsize
811
815
812 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
816 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
813 options[b'sparse-revlog'] = sparserevlog
817 options[b'sparse-revlog'] = sparserevlog
814 if sparserevlog:
818 if sparserevlog:
815 options[b'generaldelta'] = True
819 options[b'generaldelta'] = True
816
820
821 sidedata = SIDEDATA_REQUIREMENT in requirements
822 options[b'side-data'] = sidedata
823
817 maxchainlen = None
824 maxchainlen = None
818 if sparserevlog:
825 if sparserevlog:
819 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
826 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
820 # experimental config: format.maxchainlen
827 # experimental config: format.maxchainlen
821 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
828 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
822 if maxchainlen is not None:
829 if maxchainlen is not None:
823 options[b'maxchainlen'] = maxchainlen
830 options[b'maxchainlen'] = maxchainlen
824
831
825 for r in requirements:
832 for r in requirements:
826 # we allow multiple compression engine requirement to co-exist because
833 # we allow multiple compression engine requirement to co-exist because
827 # strickly speaking, revlog seems to support mixed compression style.
834 # strickly speaking, revlog seems to support mixed compression style.
828 #
835 #
829 # The compression used for new entries will be "the last one"
836 # The compression used for new entries will be "the last one"
830 prefix = r.startswith
837 prefix = r.startswith
831 if prefix('revlog-compression-') or prefix('exp-compression-'):
838 if prefix('revlog-compression-') or prefix('exp-compression-'):
832 options[b'compengine'] = r.split('-', 2)[2]
839 options[b'compengine'] = r.split('-', 2)[2]
833
840
834 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
841 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
835 if options[b'zlib.level'] is not None:
842 if options[b'zlib.level'] is not None:
836 if not (0 <= options[b'zlib.level'] <= 9):
843 if not (0 <= options[b'zlib.level'] <= 9):
837 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
844 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
838 raise error.Abort(msg % options[b'zlib.level'])
845 raise error.Abort(msg % options[b'zlib.level'])
839 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
846 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
840 if options[b'zstd.level'] is not None:
847 if options[b'zstd.level'] is not None:
841 if not (0 <= options[b'zstd.level'] <= 22):
848 if not (0 <= options[b'zstd.level'] <= 22):
842 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
849 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
843 raise error.Abort(msg % options[b'zstd.level'])
850 raise error.Abort(msg % options[b'zstd.level'])
844
851
845 if repository.NARROW_REQUIREMENT in requirements:
852 if repository.NARROW_REQUIREMENT in requirements:
846 options[b'enableellipsis'] = True
853 options[b'enableellipsis'] = True
847
854
848 return options
855 return options
849
856
850 def makemain(**kwargs):
857 def makemain(**kwargs):
851 """Produce a type conforming to ``ilocalrepositorymain``."""
858 """Produce a type conforming to ``ilocalrepositorymain``."""
852 return localrepository
859 return localrepository
853
860
854 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
861 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
855 class revlogfilestorage(object):
862 class revlogfilestorage(object):
856 """File storage when using revlogs."""
863 """File storage when using revlogs."""
857
864
858 def file(self, path):
865 def file(self, path):
859 if path[0] == b'/':
866 if path[0] == b'/':
860 path = path[1:]
867 path = path[1:]
861
868
862 return filelog.filelog(self.svfs, path)
869 return filelog.filelog(self.svfs, path)
863
870
864 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
871 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
865 class revlognarrowfilestorage(object):
872 class revlognarrowfilestorage(object):
866 """File storage when using revlogs and narrow files."""
873 """File storage when using revlogs and narrow files."""
867
874
868 def file(self, path):
875 def file(self, path):
869 if path[0] == b'/':
876 if path[0] == b'/':
870 path = path[1:]
877 path = path[1:]
871
878
872 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
879 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
873
880
874 def makefilestorage(requirements, features, **kwargs):
881 def makefilestorage(requirements, features, **kwargs):
875 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
882 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
876 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
883 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
877 features.add(repository.REPO_FEATURE_STREAM_CLONE)
884 features.add(repository.REPO_FEATURE_STREAM_CLONE)
878
885
879 if repository.NARROW_REQUIREMENT in requirements:
886 if repository.NARROW_REQUIREMENT in requirements:
880 return revlognarrowfilestorage
887 return revlognarrowfilestorage
881 else:
888 else:
882 return revlogfilestorage
889 return revlogfilestorage
883
890
884 # List of repository interfaces and factory functions for them. Each
891 # List of repository interfaces and factory functions for them. Each
885 # will be called in order during ``makelocalrepository()`` to iteratively
892 # will be called in order during ``makelocalrepository()`` to iteratively
886 # derive the final type for a local repository instance. We capture the
893 # derive the final type for a local repository instance. We capture the
887 # function as a lambda so we don't hold a reference and the module-level
894 # function as a lambda so we don't hold a reference and the module-level
888 # functions can be wrapped.
895 # functions can be wrapped.
889 REPO_INTERFACES = [
896 REPO_INTERFACES = [
890 (repository.ilocalrepositorymain, lambda: makemain),
897 (repository.ilocalrepositorymain, lambda: makemain),
891 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
898 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
892 ]
899 ]
893
900
894 @interfaceutil.implementer(repository.ilocalrepositorymain)
901 @interfaceutil.implementer(repository.ilocalrepositorymain)
895 class localrepository(object):
902 class localrepository(object):
896 """Main class for representing local repositories.
903 """Main class for representing local repositories.
897
904
898 All local repositories are instances of this class.
905 All local repositories are instances of this class.
899
906
900 Constructed on its own, instances of this class are not usable as
907 Constructed on its own, instances of this class are not usable as
901 repository objects. To obtain a usable repository object, call
908 repository objects. To obtain a usable repository object, call
902 ``hg.repository()``, ``localrepo.instance()``, or
909 ``hg.repository()``, ``localrepo.instance()``, or
903 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
910 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
904 ``instance()`` adds support for creating new repositories.
911 ``instance()`` adds support for creating new repositories.
905 ``hg.repository()`` adds more extension integration, including calling
912 ``hg.repository()`` adds more extension integration, including calling
906 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
913 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
907 used.
914 used.
908 """
915 """
909
916
910 # obsolete experimental requirements:
917 # obsolete experimental requirements:
911 # - manifestv2: An experimental new manifest format that allowed
918 # - manifestv2: An experimental new manifest format that allowed
912 # for stem compression of long paths. Experiment ended up not
919 # for stem compression of long paths. Experiment ended up not
913 # being successful (repository sizes went up due to worse delta
920 # being successful (repository sizes went up due to worse delta
914 # chains), and the code was deleted in 4.6.
921 # chains), and the code was deleted in 4.6.
915 supportedformats = {
922 supportedformats = {
916 'revlogv1',
923 'revlogv1',
917 'generaldelta',
924 'generaldelta',
918 'treemanifest',
925 'treemanifest',
919 REVLOGV2_REQUIREMENT,
926 REVLOGV2_REQUIREMENT,
927 SIDEDATA_REQUIREMENT,
920 SPARSEREVLOG_REQUIREMENT,
928 SPARSEREVLOG_REQUIREMENT,
921 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
929 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
922 }
930 }
923 _basesupported = supportedformats | {
931 _basesupported = supportedformats | {
924 'store',
932 'store',
925 'fncache',
933 'fncache',
926 'shared',
934 'shared',
927 'relshared',
935 'relshared',
928 'dotencode',
936 'dotencode',
929 'exp-sparse',
937 'exp-sparse',
930 'internal-phase'
938 'internal-phase'
931 }
939 }
932
940
933 # list of prefix for file which can be written without 'wlock'
941 # list of prefix for file which can be written without 'wlock'
934 # Extensions should extend this list when needed
942 # Extensions should extend this list when needed
935 _wlockfreeprefix = {
943 _wlockfreeprefix = {
936 # We migh consider requiring 'wlock' for the next
944 # We migh consider requiring 'wlock' for the next
937 # two, but pretty much all the existing code assume
945 # two, but pretty much all the existing code assume
938 # wlock is not needed so we keep them excluded for
946 # wlock is not needed so we keep them excluded for
939 # now.
947 # now.
940 'hgrc',
948 'hgrc',
941 'requires',
949 'requires',
942 # XXX cache is a complicatged business someone
950 # XXX cache is a complicatged business someone
943 # should investigate this in depth at some point
951 # should investigate this in depth at some point
944 'cache/',
952 'cache/',
945 # XXX shouldn't be dirstate covered by the wlock?
953 # XXX shouldn't be dirstate covered by the wlock?
946 'dirstate',
954 'dirstate',
947 # XXX bisect was still a bit too messy at the time
955 # XXX bisect was still a bit too messy at the time
948 # this changeset was introduced. Someone should fix
956 # this changeset was introduced. Someone should fix
949 # the remainig bit and drop this line
957 # the remainig bit and drop this line
950 'bisect.state',
958 'bisect.state',
951 }
959 }
952
960
953 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
961 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
954 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
962 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
955 features, intents=None):
963 features, intents=None):
956 """Create a new local repository instance.
964 """Create a new local repository instance.
957
965
958 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
966 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
959 or ``localrepo.makelocalrepository()`` for obtaining a new repository
967 or ``localrepo.makelocalrepository()`` for obtaining a new repository
960 object.
968 object.
961
969
962 Arguments:
970 Arguments:
963
971
964 baseui
972 baseui
965 ``ui.ui`` instance that ``ui`` argument was based off of.
973 ``ui.ui`` instance that ``ui`` argument was based off of.
966
974
967 ui
975 ui
968 ``ui.ui`` instance for use by the repository.
976 ``ui.ui`` instance for use by the repository.
969
977
970 origroot
978 origroot
971 ``bytes`` path to working directory root of this repository.
979 ``bytes`` path to working directory root of this repository.
972
980
973 wdirvfs
981 wdirvfs
974 ``vfs.vfs`` rooted at the working directory.
982 ``vfs.vfs`` rooted at the working directory.
975
983
976 hgvfs
984 hgvfs
977 ``vfs.vfs`` rooted at .hg/
985 ``vfs.vfs`` rooted at .hg/
978
986
979 requirements
987 requirements
980 ``set`` of bytestrings representing repository opening requirements.
988 ``set`` of bytestrings representing repository opening requirements.
981
989
982 supportedrequirements
990 supportedrequirements
983 ``set`` of bytestrings representing repository requirements that we
991 ``set`` of bytestrings representing repository requirements that we
984 know how to open. May be a supetset of ``requirements``.
992 know how to open. May be a supetset of ``requirements``.
985
993
986 sharedpath
994 sharedpath
987 ``bytes`` Defining path to storage base directory. Points to a
995 ``bytes`` Defining path to storage base directory. Points to a
988 ``.hg/`` directory somewhere.
996 ``.hg/`` directory somewhere.
989
997
990 store
998 store
991 ``store.basicstore`` (or derived) instance providing access to
999 ``store.basicstore`` (or derived) instance providing access to
992 versioned storage.
1000 versioned storage.
993
1001
994 cachevfs
1002 cachevfs
995 ``vfs.vfs`` used for cache files.
1003 ``vfs.vfs`` used for cache files.
996
1004
997 wcachevfs
1005 wcachevfs
998 ``vfs.vfs`` used for cache files related to the working copy.
1006 ``vfs.vfs`` used for cache files related to the working copy.
999
1007
1000 features
1008 features
1001 ``set`` of bytestrings defining features/capabilities of this
1009 ``set`` of bytestrings defining features/capabilities of this
1002 instance.
1010 instance.
1003
1011
1004 intents
1012 intents
1005 ``set`` of system strings indicating what this repo will be used
1013 ``set`` of system strings indicating what this repo will be used
1006 for.
1014 for.
1007 """
1015 """
1008 self.baseui = baseui
1016 self.baseui = baseui
1009 self.ui = ui
1017 self.ui = ui
1010 self.origroot = origroot
1018 self.origroot = origroot
1011 # vfs rooted at working directory.
1019 # vfs rooted at working directory.
1012 self.wvfs = wdirvfs
1020 self.wvfs = wdirvfs
1013 self.root = wdirvfs.base
1021 self.root = wdirvfs.base
1014 # vfs rooted at .hg/. Used to access most non-store paths.
1022 # vfs rooted at .hg/. Used to access most non-store paths.
1015 self.vfs = hgvfs
1023 self.vfs = hgvfs
1016 self.path = hgvfs.base
1024 self.path = hgvfs.base
1017 self.requirements = requirements
1025 self.requirements = requirements
1018 self.supported = supportedrequirements
1026 self.supported = supportedrequirements
1019 self.sharedpath = sharedpath
1027 self.sharedpath = sharedpath
1020 self.store = store
1028 self.store = store
1021 self.cachevfs = cachevfs
1029 self.cachevfs = cachevfs
1022 self.wcachevfs = wcachevfs
1030 self.wcachevfs = wcachevfs
1023 self.features = features
1031 self.features = features
1024
1032
1025 self.filtername = None
1033 self.filtername = None
1026
1034
1027 if (self.ui.configbool('devel', 'all-warnings') or
1035 if (self.ui.configbool('devel', 'all-warnings') or
1028 self.ui.configbool('devel', 'check-locks')):
1036 self.ui.configbool('devel', 'check-locks')):
1029 self.vfs.audit = self._getvfsward(self.vfs.audit)
1037 self.vfs.audit = self._getvfsward(self.vfs.audit)
1030 # A list of callback to shape the phase if no data were found.
1038 # A list of callback to shape the phase if no data were found.
1031 # Callback are in the form: func(repo, roots) --> processed root.
1039 # Callback are in the form: func(repo, roots) --> processed root.
1032 # This list it to be filled by extension during repo setup
1040 # This list it to be filled by extension during repo setup
1033 self._phasedefaults = []
1041 self._phasedefaults = []
1034
1042
1035 color.setup(self.ui)
1043 color.setup(self.ui)
1036
1044
1037 self.spath = self.store.path
1045 self.spath = self.store.path
1038 self.svfs = self.store.vfs
1046 self.svfs = self.store.vfs
1039 self.sjoin = self.store.join
1047 self.sjoin = self.store.join
1040 if (self.ui.configbool('devel', 'all-warnings') or
1048 if (self.ui.configbool('devel', 'all-warnings') or
1041 self.ui.configbool('devel', 'check-locks')):
1049 self.ui.configbool('devel', 'check-locks')):
1042 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1050 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1043 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1051 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1044 else: # standard vfs
1052 else: # standard vfs
1045 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1053 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1046
1054
1047 self._dirstatevalidatewarned = False
1055 self._dirstatevalidatewarned = False
1048
1056
1049 self._branchcaches = branchmap.BranchMapCache()
1057 self._branchcaches = branchmap.BranchMapCache()
1050 self._revbranchcache = None
1058 self._revbranchcache = None
1051 self._filterpats = {}
1059 self._filterpats = {}
1052 self._datafilters = {}
1060 self._datafilters = {}
1053 self._transref = self._lockref = self._wlockref = None
1061 self._transref = self._lockref = self._wlockref = None
1054
1062
1055 # A cache for various files under .hg/ that tracks file changes,
1063 # A cache for various files under .hg/ that tracks file changes,
1056 # (used by the filecache decorator)
1064 # (used by the filecache decorator)
1057 #
1065 #
1058 # Maps a property name to its util.filecacheentry
1066 # Maps a property name to its util.filecacheentry
1059 self._filecache = {}
1067 self._filecache = {}
1060
1068
1061 # hold sets of revision to be filtered
1069 # hold sets of revision to be filtered
1062 # should be cleared when something might have changed the filter value:
1070 # should be cleared when something might have changed the filter value:
1063 # - new changesets,
1071 # - new changesets,
1064 # - phase change,
1072 # - phase change,
1065 # - new obsolescence marker,
1073 # - new obsolescence marker,
1066 # - working directory parent change,
1074 # - working directory parent change,
1067 # - bookmark changes
1075 # - bookmark changes
1068 self.filteredrevcache = {}
1076 self.filteredrevcache = {}
1069
1077
1070 # post-dirstate-status hooks
1078 # post-dirstate-status hooks
1071 self._postdsstatus = []
1079 self._postdsstatus = []
1072
1080
1073 # generic mapping between names and nodes
1081 # generic mapping between names and nodes
1074 self.names = namespaces.namespaces()
1082 self.names = namespaces.namespaces()
1075
1083
1076 # Key to signature value.
1084 # Key to signature value.
1077 self._sparsesignaturecache = {}
1085 self._sparsesignaturecache = {}
1078 # Signature to cached matcher instance.
1086 # Signature to cached matcher instance.
1079 self._sparsematchercache = {}
1087 self._sparsematchercache = {}
1080
1088
1081 self._extrafilterid = repoview.extrafilter(ui)
1089 self._extrafilterid = repoview.extrafilter(ui)
1082
1090
1083 def _getvfsward(self, origfunc):
1091 def _getvfsward(self, origfunc):
1084 """build a ward for self.vfs"""
1092 """build a ward for self.vfs"""
1085 rref = weakref.ref(self)
1093 rref = weakref.ref(self)
1086 def checkvfs(path, mode=None):
1094 def checkvfs(path, mode=None):
1087 ret = origfunc(path, mode=mode)
1095 ret = origfunc(path, mode=mode)
1088 repo = rref()
1096 repo = rref()
1089 if (repo is None
1097 if (repo is None
1090 or not util.safehasattr(repo, '_wlockref')
1098 or not util.safehasattr(repo, '_wlockref')
1091 or not util.safehasattr(repo, '_lockref')):
1099 or not util.safehasattr(repo, '_lockref')):
1092 return
1100 return
1093 if mode in (None, 'r', 'rb'):
1101 if mode in (None, 'r', 'rb'):
1094 return
1102 return
1095 if path.startswith(repo.path):
1103 if path.startswith(repo.path):
1096 # truncate name relative to the repository (.hg)
1104 # truncate name relative to the repository (.hg)
1097 path = path[len(repo.path) + 1:]
1105 path = path[len(repo.path) + 1:]
1098 if path.startswith('cache/'):
1106 if path.startswith('cache/'):
1099 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1107 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1100 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1108 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1101 if path.startswith('journal.') or path.startswith('undo.'):
1109 if path.startswith('journal.') or path.startswith('undo.'):
1102 # journal is covered by 'lock'
1110 # journal is covered by 'lock'
1103 if repo._currentlock(repo._lockref) is None:
1111 if repo._currentlock(repo._lockref) is None:
1104 repo.ui.develwarn('write with no lock: "%s"' % path,
1112 repo.ui.develwarn('write with no lock: "%s"' % path,
1105 stacklevel=3, config='check-locks')
1113 stacklevel=3, config='check-locks')
1106 elif repo._currentlock(repo._wlockref) is None:
1114 elif repo._currentlock(repo._wlockref) is None:
1107 # rest of vfs files are covered by 'wlock'
1115 # rest of vfs files are covered by 'wlock'
1108 #
1116 #
1109 # exclude special files
1117 # exclude special files
1110 for prefix in self._wlockfreeprefix:
1118 for prefix in self._wlockfreeprefix:
1111 if path.startswith(prefix):
1119 if path.startswith(prefix):
1112 return
1120 return
1113 repo.ui.develwarn('write with no wlock: "%s"' % path,
1121 repo.ui.develwarn('write with no wlock: "%s"' % path,
1114 stacklevel=3, config='check-locks')
1122 stacklevel=3, config='check-locks')
1115 return ret
1123 return ret
1116 return checkvfs
1124 return checkvfs
1117
1125
1118 def _getsvfsward(self, origfunc):
1126 def _getsvfsward(self, origfunc):
1119 """build a ward for self.svfs"""
1127 """build a ward for self.svfs"""
1120 rref = weakref.ref(self)
1128 rref = weakref.ref(self)
1121 def checksvfs(path, mode=None):
1129 def checksvfs(path, mode=None):
1122 ret = origfunc(path, mode=mode)
1130 ret = origfunc(path, mode=mode)
1123 repo = rref()
1131 repo = rref()
1124 if repo is None or not util.safehasattr(repo, '_lockref'):
1132 if repo is None or not util.safehasattr(repo, '_lockref'):
1125 return
1133 return
1126 if mode in (None, 'r', 'rb'):
1134 if mode in (None, 'r', 'rb'):
1127 return
1135 return
1128 if path.startswith(repo.sharedpath):
1136 if path.startswith(repo.sharedpath):
1129 # truncate name relative to the repository (.hg)
1137 # truncate name relative to the repository (.hg)
1130 path = path[len(repo.sharedpath) + 1:]
1138 path = path[len(repo.sharedpath) + 1:]
1131 if repo._currentlock(repo._lockref) is None:
1139 if repo._currentlock(repo._lockref) is None:
1132 repo.ui.develwarn('write with no lock: "%s"' % path,
1140 repo.ui.develwarn('write with no lock: "%s"' % path,
1133 stacklevel=4)
1141 stacklevel=4)
1134 return ret
1142 return ret
1135 return checksvfs
1143 return checksvfs
1136
1144
1137 def close(self):
1145 def close(self):
1138 self._writecaches()
1146 self._writecaches()
1139
1147
1140 def _writecaches(self):
1148 def _writecaches(self):
1141 if self._revbranchcache:
1149 if self._revbranchcache:
1142 self._revbranchcache.write()
1150 self._revbranchcache.write()
1143
1151
1144 def _restrictcapabilities(self, caps):
1152 def _restrictcapabilities(self, caps):
1145 if self.ui.configbool('experimental', 'bundle2-advertise'):
1153 if self.ui.configbool('experimental', 'bundle2-advertise'):
1146 caps = set(caps)
1154 caps = set(caps)
1147 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1155 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1148 role='client'))
1156 role='client'))
1149 caps.add('bundle2=' + urlreq.quote(capsblob))
1157 caps.add('bundle2=' + urlreq.quote(capsblob))
1150 return caps
1158 return caps
1151
1159
1152 def _writerequirements(self):
1160 def _writerequirements(self):
1153 scmutil.writerequires(self.vfs, self.requirements)
1161 scmutil.writerequires(self.vfs, self.requirements)
1154
1162
1155 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1163 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1156 # self -> auditor -> self._checknested -> self
1164 # self -> auditor -> self._checknested -> self
1157
1165
1158 @property
1166 @property
1159 def auditor(self):
1167 def auditor(self):
1160 # This is only used by context.workingctx.match in order to
1168 # This is only used by context.workingctx.match in order to
1161 # detect files in subrepos.
1169 # detect files in subrepos.
1162 return pathutil.pathauditor(self.root, callback=self._checknested)
1170 return pathutil.pathauditor(self.root, callback=self._checknested)
1163
1171
1164 @property
1172 @property
1165 def nofsauditor(self):
1173 def nofsauditor(self):
1166 # This is only used by context.basectx.match in order to detect
1174 # This is only used by context.basectx.match in order to detect
1167 # files in subrepos.
1175 # files in subrepos.
1168 return pathutil.pathauditor(self.root, callback=self._checknested,
1176 return pathutil.pathauditor(self.root, callback=self._checknested,
1169 realfs=False, cached=True)
1177 realfs=False, cached=True)
1170
1178
1171 def _checknested(self, path):
1179 def _checknested(self, path):
1172 """Determine if path is a legal nested repository."""
1180 """Determine if path is a legal nested repository."""
1173 if not path.startswith(self.root):
1181 if not path.startswith(self.root):
1174 return False
1182 return False
1175 subpath = path[len(self.root) + 1:]
1183 subpath = path[len(self.root) + 1:]
1176 normsubpath = util.pconvert(subpath)
1184 normsubpath = util.pconvert(subpath)
1177
1185
1178 # XXX: Checking against the current working copy is wrong in
1186 # XXX: Checking against the current working copy is wrong in
1179 # the sense that it can reject things like
1187 # the sense that it can reject things like
1180 #
1188 #
1181 # $ hg cat -r 10 sub/x.txt
1189 # $ hg cat -r 10 sub/x.txt
1182 #
1190 #
1183 # if sub/ is no longer a subrepository in the working copy
1191 # if sub/ is no longer a subrepository in the working copy
1184 # parent revision.
1192 # parent revision.
1185 #
1193 #
1186 # However, it can of course also allow things that would have
1194 # However, it can of course also allow things that would have
1187 # been rejected before, such as the above cat command if sub/
1195 # been rejected before, such as the above cat command if sub/
1188 # is a subrepository now, but was a normal directory before.
1196 # is a subrepository now, but was a normal directory before.
1189 # The old path auditor would have rejected by mistake since it
1197 # The old path auditor would have rejected by mistake since it
1190 # panics when it sees sub/.hg/.
1198 # panics when it sees sub/.hg/.
1191 #
1199 #
1192 # All in all, checking against the working copy seems sensible
1200 # All in all, checking against the working copy seems sensible
1193 # since we want to prevent access to nested repositories on
1201 # since we want to prevent access to nested repositories on
1194 # the filesystem *now*.
1202 # the filesystem *now*.
1195 ctx = self[None]
1203 ctx = self[None]
1196 parts = util.splitpath(subpath)
1204 parts = util.splitpath(subpath)
1197 while parts:
1205 while parts:
1198 prefix = '/'.join(parts)
1206 prefix = '/'.join(parts)
1199 if prefix in ctx.substate:
1207 if prefix in ctx.substate:
1200 if prefix == normsubpath:
1208 if prefix == normsubpath:
1201 return True
1209 return True
1202 else:
1210 else:
1203 sub = ctx.sub(prefix)
1211 sub = ctx.sub(prefix)
1204 return sub.checknested(subpath[len(prefix) + 1:])
1212 return sub.checknested(subpath[len(prefix) + 1:])
1205 else:
1213 else:
1206 parts.pop()
1214 parts.pop()
1207 return False
1215 return False
1208
1216
1209 def peer(self):
1217 def peer(self):
1210 return localpeer(self) # not cached to avoid reference cycle
1218 return localpeer(self) # not cached to avoid reference cycle
1211
1219
1212 def unfiltered(self):
1220 def unfiltered(self):
1213 """Return unfiltered version of the repository
1221 """Return unfiltered version of the repository
1214
1222
1215 Intended to be overwritten by filtered repo."""
1223 Intended to be overwritten by filtered repo."""
1216 return self
1224 return self
1217
1225
1218 def filtered(self, name, visibilityexceptions=None):
1226 def filtered(self, name, visibilityexceptions=None):
1219 """Return a filtered version of a repository
1227 """Return a filtered version of a repository
1220
1228
1221 The `name` parameter is the identifier of the requested view. This
1229 The `name` parameter is the identifier of the requested view. This
1222 will return a repoview object set "exactly" to the specified view.
1230 will return a repoview object set "exactly" to the specified view.
1223
1231
1224 This function does not apply recursive filtering to a repository. For
1232 This function does not apply recursive filtering to a repository. For
1225 example calling `repo.filtered("served")` will return a repoview using
1233 example calling `repo.filtered("served")` will return a repoview using
1226 the "served" view, regardless of the initial view used by `repo`.
1234 the "served" view, regardless of the initial view used by `repo`.
1227
1235
1228 In other word, there is always only one level of `repoview` "filtering".
1236 In other word, there is always only one level of `repoview` "filtering".
1229 """
1237 """
1230 if self._extrafilterid is not None and '%' not in name:
1238 if self._extrafilterid is not None and '%' not in name:
1231 name = name + '%' + self._extrafilterid
1239 name = name + '%' + self._extrafilterid
1232
1240
1233 cls = repoview.newtype(self.unfiltered().__class__)
1241 cls = repoview.newtype(self.unfiltered().__class__)
1234 return cls(self, name, visibilityexceptions)
1242 return cls(self, name, visibilityexceptions)
1235
1243
1236 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1244 @mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
1237 ('bookmarks', ''), ('00changelog.i', ''))
1245 ('bookmarks', ''), ('00changelog.i', ''))
1238 def _bookmarks(self):
1246 def _bookmarks(self):
1239 # Since the multiple files involved in the transaction cannot be
1247 # Since the multiple files involved in the transaction cannot be
1240 # written atomically (with current repository format), there is a race
1248 # written atomically (with current repository format), there is a race
1241 # condition here.
1249 # condition here.
1242 #
1250 #
1243 # 1) changelog content A is read
1251 # 1) changelog content A is read
1244 # 2) outside transaction update changelog to content B
1252 # 2) outside transaction update changelog to content B
1245 # 3) outside transaction update bookmark file referring to content B
1253 # 3) outside transaction update bookmark file referring to content B
1246 # 4) bookmarks file content is read and filtered against changelog-A
1254 # 4) bookmarks file content is read and filtered against changelog-A
1247 #
1255 #
1248 # When this happens, bookmarks against nodes missing from A are dropped.
1256 # When this happens, bookmarks against nodes missing from A are dropped.
1249 #
1257 #
1250 # Having this happening during read is not great, but it become worse
1258 # Having this happening during read is not great, but it become worse
1251 # when this happen during write because the bookmarks to the "unknown"
1259 # when this happen during write because the bookmarks to the "unknown"
1252 # nodes will be dropped for good. However, writes happen within locks.
1260 # nodes will be dropped for good. However, writes happen within locks.
1253 # This locking makes it possible to have a race free consistent read.
1261 # This locking makes it possible to have a race free consistent read.
1254 # For this purpose data read from disc before locking are
1262 # For this purpose data read from disc before locking are
1255 # "invalidated" right after the locks are taken. This invalidations are
1263 # "invalidated" right after the locks are taken. This invalidations are
1256 # "light", the `filecache` mechanism keep the data in memory and will
1264 # "light", the `filecache` mechanism keep the data in memory and will
1257 # reuse them if the underlying files did not changed. Not parsing the
1265 # reuse them if the underlying files did not changed. Not parsing the
1258 # same data multiple times helps performances.
1266 # same data multiple times helps performances.
1259 #
1267 #
1260 # Unfortunately in the case describe above, the files tracked by the
1268 # Unfortunately in the case describe above, the files tracked by the
1261 # bookmarks file cache might not have changed, but the in-memory
1269 # bookmarks file cache might not have changed, but the in-memory
1262 # content is still "wrong" because we used an older changelog content
1270 # content is still "wrong" because we used an older changelog content
1263 # to process the on-disk data. So after locking, the changelog would be
1271 # to process the on-disk data. So after locking, the changelog would be
1264 # refreshed but `_bookmarks` would be preserved.
1272 # refreshed but `_bookmarks` would be preserved.
1265 # Adding `00changelog.i` to the list of tracked file is not
1273 # Adding `00changelog.i` to the list of tracked file is not
1266 # enough, because at the time we build the content for `_bookmarks` in
1274 # enough, because at the time we build the content for `_bookmarks` in
1267 # (4), the changelog file has already diverged from the content used
1275 # (4), the changelog file has already diverged from the content used
1268 # for loading `changelog` in (1)
1276 # for loading `changelog` in (1)
1269 #
1277 #
1270 # To prevent the issue, we force the changelog to be explicitly
1278 # To prevent the issue, we force the changelog to be explicitly
1271 # reloaded while computing `_bookmarks`. The data race can still happen
1279 # reloaded while computing `_bookmarks`. The data race can still happen
1272 # without the lock (with a narrower window), but it would no longer go
1280 # without the lock (with a narrower window), but it would no longer go
1273 # undetected during the lock time refresh.
1281 # undetected during the lock time refresh.
1274 #
1282 #
1275 # The new schedule is as follow
1283 # The new schedule is as follow
1276 #
1284 #
1277 # 1) filecache logic detect that `_bookmarks` needs to be computed
1285 # 1) filecache logic detect that `_bookmarks` needs to be computed
1278 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1286 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1279 # 3) We force `changelog` filecache to be tested
1287 # 3) We force `changelog` filecache to be tested
1280 # 4) cachestat for `changelog` are captured (for changelog)
1288 # 4) cachestat for `changelog` are captured (for changelog)
1281 # 5) `_bookmarks` is computed and cached
1289 # 5) `_bookmarks` is computed and cached
1282 #
1290 #
1283 # The step in (3) ensure we have a changelog at least as recent as the
1291 # The step in (3) ensure we have a changelog at least as recent as the
1284 # cache stat computed in (1). As a result at locking time:
1292 # cache stat computed in (1). As a result at locking time:
1285 # * if the changelog did not changed since (1) -> we can reuse the data
1293 # * if the changelog did not changed since (1) -> we can reuse the data
1286 # * otherwise -> the bookmarks get refreshed.
1294 # * otherwise -> the bookmarks get refreshed.
1287 self._refreshchangelog()
1295 self._refreshchangelog()
1288 return bookmarks.bmstore(self)
1296 return bookmarks.bmstore(self)
1289
1297
1290 def _refreshchangelog(self):
1298 def _refreshchangelog(self):
1291 """make sure the in memory changelog match the on-disk one"""
1299 """make sure the in memory changelog match the on-disk one"""
1292 if ('changelog' in vars(self) and self.currenttransaction() is None):
1300 if ('changelog' in vars(self) and self.currenttransaction() is None):
1293 del self.changelog
1301 del self.changelog
1294
1302
1295 @property
1303 @property
1296 def _activebookmark(self):
1304 def _activebookmark(self):
1297 return self._bookmarks.active
1305 return self._bookmarks.active
1298
1306
1299 # _phasesets depend on changelog. what we need is to call
1307 # _phasesets depend on changelog. what we need is to call
1300 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1308 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1301 # can't be easily expressed in filecache mechanism.
1309 # can't be easily expressed in filecache mechanism.
1302 @storecache('phaseroots', '00changelog.i')
1310 @storecache('phaseroots', '00changelog.i')
1303 def _phasecache(self):
1311 def _phasecache(self):
1304 return phases.phasecache(self, self._phasedefaults)
1312 return phases.phasecache(self, self._phasedefaults)
1305
1313
1306 @storecache('obsstore')
1314 @storecache('obsstore')
1307 def obsstore(self):
1315 def obsstore(self):
1308 return obsolete.makestore(self.ui, self)
1316 return obsolete.makestore(self.ui, self)
1309
1317
1310 @storecache('00changelog.i')
1318 @storecache('00changelog.i')
1311 def changelog(self):
1319 def changelog(self):
1312 return self.store.changelog(txnutil.mayhavepending(self.root))
1320 return self.store.changelog(txnutil.mayhavepending(self.root))
1313
1321
1314 @storecache('00manifest.i')
1322 @storecache('00manifest.i')
1315 def manifestlog(self):
1323 def manifestlog(self):
1316 return self.store.manifestlog(self, self._storenarrowmatch)
1324 return self.store.manifestlog(self, self._storenarrowmatch)
1317
1325
1318 @repofilecache('dirstate')
1326 @repofilecache('dirstate')
1319 def dirstate(self):
1327 def dirstate(self):
1320 return self._makedirstate()
1328 return self._makedirstate()
1321
1329
1322 def _makedirstate(self):
1330 def _makedirstate(self):
1323 """Extension point for wrapping the dirstate per-repo."""
1331 """Extension point for wrapping the dirstate per-repo."""
1324 sparsematchfn = lambda: sparse.matcher(self)
1332 sparsematchfn = lambda: sparse.matcher(self)
1325
1333
1326 return dirstate.dirstate(self.vfs, self.ui, self.root,
1334 return dirstate.dirstate(self.vfs, self.ui, self.root,
1327 self._dirstatevalidate, sparsematchfn)
1335 self._dirstatevalidate, sparsematchfn)
1328
1336
1329 def _dirstatevalidate(self, node):
1337 def _dirstatevalidate(self, node):
1330 try:
1338 try:
1331 self.changelog.rev(node)
1339 self.changelog.rev(node)
1332 return node
1340 return node
1333 except error.LookupError:
1341 except error.LookupError:
1334 if not self._dirstatevalidatewarned:
1342 if not self._dirstatevalidatewarned:
1335 self._dirstatevalidatewarned = True
1343 self._dirstatevalidatewarned = True
1336 self.ui.warn(_("warning: ignoring unknown"
1344 self.ui.warn(_("warning: ignoring unknown"
1337 " working parent %s!\n") % short(node))
1345 " working parent %s!\n") % short(node))
1338 return nullid
1346 return nullid
1339
1347
1340 @storecache(narrowspec.FILENAME)
1348 @storecache(narrowspec.FILENAME)
1341 def narrowpats(self):
1349 def narrowpats(self):
1342 """matcher patterns for this repository's narrowspec
1350 """matcher patterns for this repository's narrowspec
1343
1351
1344 A tuple of (includes, excludes).
1352 A tuple of (includes, excludes).
1345 """
1353 """
1346 return narrowspec.load(self)
1354 return narrowspec.load(self)
1347
1355
1348 @storecache(narrowspec.FILENAME)
1356 @storecache(narrowspec.FILENAME)
1349 def _storenarrowmatch(self):
1357 def _storenarrowmatch(self):
1350 if repository.NARROW_REQUIREMENT not in self.requirements:
1358 if repository.NARROW_REQUIREMENT not in self.requirements:
1351 return matchmod.always()
1359 return matchmod.always()
1352 include, exclude = self.narrowpats
1360 include, exclude = self.narrowpats
1353 return narrowspec.match(self.root, include=include, exclude=exclude)
1361 return narrowspec.match(self.root, include=include, exclude=exclude)
1354
1362
1355 @storecache(narrowspec.FILENAME)
1363 @storecache(narrowspec.FILENAME)
1356 def _narrowmatch(self):
1364 def _narrowmatch(self):
1357 if repository.NARROW_REQUIREMENT not in self.requirements:
1365 if repository.NARROW_REQUIREMENT not in self.requirements:
1358 return matchmod.always()
1366 return matchmod.always()
1359 narrowspec.checkworkingcopynarrowspec(self)
1367 narrowspec.checkworkingcopynarrowspec(self)
1360 include, exclude = self.narrowpats
1368 include, exclude = self.narrowpats
1361 return narrowspec.match(self.root, include=include, exclude=exclude)
1369 return narrowspec.match(self.root, include=include, exclude=exclude)
1362
1370
1363 def narrowmatch(self, match=None, includeexact=False):
1371 def narrowmatch(self, match=None, includeexact=False):
1364 """matcher corresponding the the repo's narrowspec
1372 """matcher corresponding the the repo's narrowspec
1365
1373
1366 If `match` is given, then that will be intersected with the narrow
1374 If `match` is given, then that will be intersected with the narrow
1367 matcher.
1375 matcher.
1368
1376
1369 If `includeexact` is True, then any exact matches from `match` will
1377 If `includeexact` is True, then any exact matches from `match` will
1370 be included even if they're outside the narrowspec.
1378 be included even if they're outside the narrowspec.
1371 """
1379 """
1372 if match:
1380 if match:
1373 if includeexact and not self._narrowmatch.always():
1381 if includeexact and not self._narrowmatch.always():
1374 # do not exclude explicitly-specified paths so that they can
1382 # do not exclude explicitly-specified paths so that they can
1375 # be warned later on
1383 # be warned later on
1376 em = matchmod.exact(match.files())
1384 em = matchmod.exact(match.files())
1377 nm = matchmod.unionmatcher([self._narrowmatch, em])
1385 nm = matchmod.unionmatcher([self._narrowmatch, em])
1378 return matchmod.intersectmatchers(match, nm)
1386 return matchmod.intersectmatchers(match, nm)
1379 return matchmod.intersectmatchers(match, self._narrowmatch)
1387 return matchmod.intersectmatchers(match, self._narrowmatch)
1380 return self._narrowmatch
1388 return self._narrowmatch
1381
1389
1382 def setnarrowpats(self, newincludes, newexcludes):
1390 def setnarrowpats(self, newincludes, newexcludes):
1383 narrowspec.save(self, newincludes, newexcludes)
1391 narrowspec.save(self, newincludes, newexcludes)
1384 self.invalidate(clearfilecache=True)
1392 self.invalidate(clearfilecache=True)
1385
1393
1386 def __getitem__(self, changeid):
1394 def __getitem__(self, changeid):
1387 if changeid is None:
1395 if changeid is None:
1388 return context.workingctx(self)
1396 return context.workingctx(self)
1389 if isinstance(changeid, context.basectx):
1397 if isinstance(changeid, context.basectx):
1390 return changeid
1398 return changeid
1391 if isinstance(changeid, slice):
1399 if isinstance(changeid, slice):
1392 # wdirrev isn't contiguous so the slice shouldn't include it
1400 # wdirrev isn't contiguous so the slice shouldn't include it
1393 return [self[i]
1401 return [self[i]
1394 for i in pycompat.xrange(*changeid.indices(len(self)))
1402 for i in pycompat.xrange(*changeid.indices(len(self)))
1395 if i not in self.changelog.filteredrevs]
1403 if i not in self.changelog.filteredrevs]
1396 try:
1404 try:
1397 if isinstance(changeid, int):
1405 if isinstance(changeid, int):
1398 node = self.changelog.node(changeid)
1406 node = self.changelog.node(changeid)
1399 rev = changeid
1407 rev = changeid
1400 elif changeid == 'null':
1408 elif changeid == 'null':
1401 node = nullid
1409 node = nullid
1402 rev = nullrev
1410 rev = nullrev
1403 elif changeid == 'tip':
1411 elif changeid == 'tip':
1404 node = self.changelog.tip()
1412 node = self.changelog.tip()
1405 rev = self.changelog.rev(node)
1413 rev = self.changelog.rev(node)
1406 elif changeid == '.':
1414 elif changeid == '.':
1407 # this is a hack to delay/avoid loading obsmarkers
1415 # this is a hack to delay/avoid loading obsmarkers
1408 # when we know that '.' won't be hidden
1416 # when we know that '.' won't be hidden
1409 node = self.dirstate.p1()
1417 node = self.dirstate.p1()
1410 rev = self.unfiltered().changelog.rev(node)
1418 rev = self.unfiltered().changelog.rev(node)
1411 elif len(changeid) == 20:
1419 elif len(changeid) == 20:
1412 try:
1420 try:
1413 node = changeid
1421 node = changeid
1414 rev = self.changelog.rev(changeid)
1422 rev = self.changelog.rev(changeid)
1415 except error.FilteredLookupError:
1423 except error.FilteredLookupError:
1416 changeid = hex(changeid) # for the error message
1424 changeid = hex(changeid) # for the error message
1417 raise
1425 raise
1418 except LookupError:
1426 except LookupError:
1419 # check if it might have come from damaged dirstate
1427 # check if it might have come from damaged dirstate
1420 #
1428 #
1421 # XXX we could avoid the unfiltered if we had a recognizable
1429 # XXX we could avoid the unfiltered if we had a recognizable
1422 # exception for filtered changeset access
1430 # exception for filtered changeset access
1423 if (self.local()
1431 if (self.local()
1424 and changeid in self.unfiltered().dirstate.parents()):
1432 and changeid in self.unfiltered().dirstate.parents()):
1425 msg = _("working directory has unknown parent '%s'!")
1433 msg = _("working directory has unknown parent '%s'!")
1426 raise error.Abort(msg % short(changeid))
1434 raise error.Abort(msg % short(changeid))
1427 changeid = hex(changeid) # for the error message
1435 changeid = hex(changeid) # for the error message
1428 raise
1436 raise
1429
1437
1430 elif len(changeid) == 40:
1438 elif len(changeid) == 40:
1431 node = bin(changeid)
1439 node = bin(changeid)
1432 rev = self.changelog.rev(node)
1440 rev = self.changelog.rev(node)
1433 else:
1441 else:
1434 raise error.ProgrammingError(
1442 raise error.ProgrammingError(
1435 "unsupported changeid '%s' of type %s" %
1443 "unsupported changeid '%s' of type %s" %
1436 (changeid, type(changeid)))
1444 (changeid, type(changeid)))
1437
1445
1438 return context.changectx(self, rev, node)
1446 return context.changectx(self, rev, node)
1439
1447
1440 except (error.FilteredIndexError, error.FilteredLookupError):
1448 except (error.FilteredIndexError, error.FilteredLookupError):
1441 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1449 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1442 % pycompat.bytestr(changeid))
1450 % pycompat.bytestr(changeid))
1443 except (IndexError, LookupError):
1451 except (IndexError, LookupError):
1444 raise error.RepoLookupError(
1452 raise error.RepoLookupError(
1445 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1453 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1446 except error.WdirUnsupported:
1454 except error.WdirUnsupported:
1447 return context.workingctx(self)
1455 return context.workingctx(self)
1448
1456
1449 def __contains__(self, changeid):
1457 def __contains__(self, changeid):
1450 """True if the given changeid exists
1458 """True if the given changeid exists
1451
1459
1452 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1460 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1453 specified.
1461 specified.
1454 """
1462 """
1455 try:
1463 try:
1456 self[changeid]
1464 self[changeid]
1457 return True
1465 return True
1458 except error.RepoLookupError:
1466 except error.RepoLookupError:
1459 return False
1467 return False
1460
1468
1461 def __nonzero__(self):
1469 def __nonzero__(self):
1462 return True
1470 return True
1463
1471
1464 __bool__ = __nonzero__
1472 __bool__ = __nonzero__
1465
1473
1466 def __len__(self):
1474 def __len__(self):
1467 # no need to pay the cost of repoview.changelog
1475 # no need to pay the cost of repoview.changelog
1468 unfi = self.unfiltered()
1476 unfi = self.unfiltered()
1469 return len(unfi.changelog)
1477 return len(unfi.changelog)
1470
1478
1471 def __iter__(self):
1479 def __iter__(self):
1472 return iter(self.changelog)
1480 return iter(self.changelog)
1473
1481
1474 def revs(self, expr, *args):
1482 def revs(self, expr, *args):
1475 '''Find revisions matching a revset.
1483 '''Find revisions matching a revset.
1476
1484
1477 The revset is specified as a string ``expr`` that may contain
1485 The revset is specified as a string ``expr`` that may contain
1478 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1486 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1479
1487
1480 Revset aliases from the configuration are not expanded. To expand
1488 Revset aliases from the configuration are not expanded. To expand
1481 user aliases, consider calling ``scmutil.revrange()`` or
1489 user aliases, consider calling ``scmutil.revrange()`` or
1482 ``repo.anyrevs([expr], user=True)``.
1490 ``repo.anyrevs([expr], user=True)``.
1483
1491
1484 Returns a revset.abstractsmartset, which is a list-like interface
1492 Returns a revset.abstractsmartset, which is a list-like interface
1485 that contains integer revisions.
1493 that contains integer revisions.
1486 '''
1494 '''
1487 tree = revsetlang.spectree(expr, *args)
1495 tree = revsetlang.spectree(expr, *args)
1488 return revset.makematcher(tree)(self)
1496 return revset.makematcher(tree)(self)
1489
1497
1490 def set(self, expr, *args):
1498 def set(self, expr, *args):
1491 '''Find revisions matching a revset and emit changectx instances.
1499 '''Find revisions matching a revset and emit changectx instances.
1492
1500
1493 This is a convenience wrapper around ``revs()`` that iterates the
1501 This is a convenience wrapper around ``revs()`` that iterates the
1494 result and is a generator of changectx instances.
1502 result and is a generator of changectx instances.
1495
1503
1496 Revset aliases from the configuration are not expanded. To expand
1504 Revset aliases from the configuration are not expanded. To expand
1497 user aliases, consider calling ``scmutil.revrange()``.
1505 user aliases, consider calling ``scmutil.revrange()``.
1498 '''
1506 '''
1499 for r in self.revs(expr, *args):
1507 for r in self.revs(expr, *args):
1500 yield self[r]
1508 yield self[r]
1501
1509
1502 def anyrevs(self, specs, user=False, localalias=None):
1510 def anyrevs(self, specs, user=False, localalias=None):
1503 '''Find revisions matching one of the given revsets.
1511 '''Find revisions matching one of the given revsets.
1504
1512
1505 Revset aliases from the configuration are not expanded by default. To
1513 Revset aliases from the configuration are not expanded by default. To
1506 expand user aliases, specify ``user=True``. To provide some local
1514 expand user aliases, specify ``user=True``. To provide some local
1507 definitions overriding user aliases, set ``localalias`` to
1515 definitions overriding user aliases, set ``localalias`` to
1508 ``{name: definitionstring}``.
1516 ``{name: definitionstring}``.
1509 '''
1517 '''
1510 if user:
1518 if user:
1511 m = revset.matchany(self.ui, specs,
1519 m = revset.matchany(self.ui, specs,
1512 lookup=revset.lookupfn(self),
1520 lookup=revset.lookupfn(self),
1513 localalias=localalias)
1521 localalias=localalias)
1514 else:
1522 else:
1515 m = revset.matchany(None, specs, localalias=localalias)
1523 m = revset.matchany(None, specs, localalias=localalias)
1516 return m(self)
1524 return m(self)
1517
1525
1518 def url(self):
1526 def url(self):
1519 return 'file:' + self.root
1527 return 'file:' + self.root
1520
1528
1521 def hook(self, name, throw=False, **args):
1529 def hook(self, name, throw=False, **args):
1522 """Call a hook, passing this repo instance.
1530 """Call a hook, passing this repo instance.
1523
1531
1524 This a convenience method to aid invoking hooks. Extensions likely
1532 This a convenience method to aid invoking hooks. Extensions likely
1525 won't call this unless they have registered a custom hook or are
1533 won't call this unless they have registered a custom hook or are
1526 replacing code that is expected to call a hook.
1534 replacing code that is expected to call a hook.
1527 """
1535 """
1528 return hook.hook(self.ui, self, name, throw, **args)
1536 return hook.hook(self.ui, self, name, throw, **args)
1529
1537
1530 @filteredpropertycache
1538 @filteredpropertycache
1531 def _tagscache(self):
1539 def _tagscache(self):
1532 '''Returns a tagscache object that contains various tags related
1540 '''Returns a tagscache object that contains various tags related
1533 caches.'''
1541 caches.'''
1534
1542
1535 # This simplifies its cache management by having one decorated
1543 # This simplifies its cache management by having one decorated
1536 # function (this one) and the rest simply fetch things from it.
1544 # function (this one) and the rest simply fetch things from it.
1537 class tagscache(object):
1545 class tagscache(object):
1538 def __init__(self):
1546 def __init__(self):
1539 # These two define the set of tags for this repository. tags
1547 # These two define the set of tags for this repository. tags
1540 # maps tag name to node; tagtypes maps tag name to 'global' or
1548 # maps tag name to node; tagtypes maps tag name to 'global' or
1541 # 'local'. (Global tags are defined by .hgtags across all
1549 # 'local'. (Global tags are defined by .hgtags across all
1542 # heads, and local tags are defined in .hg/localtags.)
1550 # heads, and local tags are defined in .hg/localtags.)
1543 # They constitute the in-memory cache of tags.
1551 # They constitute the in-memory cache of tags.
1544 self.tags = self.tagtypes = None
1552 self.tags = self.tagtypes = None
1545
1553
1546 self.nodetagscache = self.tagslist = None
1554 self.nodetagscache = self.tagslist = None
1547
1555
1548 cache = tagscache()
1556 cache = tagscache()
1549 cache.tags, cache.tagtypes = self._findtags()
1557 cache.tags, cache.tagtypes = self._findtags()
1550
1558
1551 return cache
1559 return cache
1552
1560
1553 def tags(self):
1561 def tags(self):
1554 '''return a mapping of tag to node'''
1562 '''return a mapping of tag to node'''
1555 t = {}
1563 t = {}
1556 if self.changelog.filteredrevs:
1564 if self.changelog.filteredrevs:
1557 tags, tt = self._findtags()
1565 tags, tt = self._findtags()
1558 else:
1566 else:
1559 tags = self._tagscache.tags
1567 tags = self._tagscache.tags
1560 rev = self.changelog.rev
1568 rev = self.changelog.rev
1561 for k, v in tags.iteritems():
1569 for k, v in tags.iteritems():
1562 try:
1570 try:
1563 # ignore tags to unknown nodes
1571 # ignore tags to unknown nodes
1564 rev(v)
1572 rev(v)
1565 t[k] = v
1573 t[k] = v
1566 except (error.LookupError, ValueError):
1574 except (error.LookupError, ValueError):
1567 pass
1575 pass
1568 return t
1576 return t
1569
1577
1570 def _findtags(self):
1578 def _findtags(self):
1571 '''Do the hard work of finding tags. Return a pair of dicts
1579 '''Do the hard work of finding tags. Return a pair of dicts
1572 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1580 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1573 maps tag name to a string like \'global\' or \'local\'.
1581 maps tag name to a string like \'global\' or \'local\'.
1574 Subclasses or extensions are free to add their own tags, but
1582 Subclasses or extensions are free to add their own tags, but
1575 should be aware that the returned dicts will be retained for the
1583 should be aware that the returned dicts will be retained for the
1576 duration of the localrepo object.'''
1584 duration of the localrepo object.'''
1577
1585
1578 # XXX what tagtype should subclasses/extensions use? Currently
1586 # XXX what tagtype should subclasses/extensions use? Currently
1579 # mq and bookmarks add tags, but do not set the tagtype at all.
1587 # mq and bookmarks add tags, but do not set the tagtype at all.
1580 # Should each extension invent its own tag type? Should there
1588 # Should each extension invent its own tag type? Should there
1581 # be one tagtype for all such "virtual" tags? Or is the status
1589 # be one tagtype for all such "virtual" tags? Or is the status
1582 # quo fine?
1590 # quo fine?
1583
1591
1584
1592
1585 # map tag name to (node, hist)
1593 # map tag name to (node, hist)
1586 alltags = tagsmod.findglobaltags(self.ui, self)
1594 alltags = tagsmod.findglobaltags(self.ui, self)
1587 # map tag name to tag type
1595 # map tag name to tag type
1588 tagtypes = dict((tag, 'global') for tag in alltags)
1596 tagtypes = dict((tag, 'global') for tag in alltags)
1589
1597
1590 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1591
1599
1592 # Build the return dicts. Have to re-encode tag names because
1600 # Build the return dicts. Have to re-encode tag names because
1593 # the tags module always uses UTF-8 (in order not to lose info
1601 # the tags module always uses UTF-8 (in order not to lose info
1594 # writing to the cache), but the rest of Mercurial wants them in
1602 # writing to the cache), but the rest of Mercurial wants them in
1595 # local encoding.
1603 # local encoding.
1596 tags = {}
1604 tags = {}
1597 for (name, (node, hist)) in alltags.iteritems():
1605 for (name, (node, hist)) in alltags.iteritems():
1598 if node != nullid:
1606 if node != nullid:
1599 tags[encoding.tolocal(name)] = node
1607 tags[encoding.tolocal(name)] = node
1600 tags['tip'] = self.changelog.tip()
1608 tags['tip'] = self.changelog.tip()
1601 tagtypes = dict([(encoding.tolocal(name), value)
1609 tagtypes = dict([(encoding.tolocal(name), value)
1602 for (name, value) in tagtypes.iteritems()])
1610 for (name, value) in tagtypes.iteritems()])
1603 return (tags, tagtypes)
1611 return (tags, tagtypes)
1604
1612
1605 def tagtype(self, tagname):
1613 def tagtype(self, tagname):
1606 '''
1614 '''
1607 return the type of the given tag. result can be:
1615 return the type of the given tag. result can be:
1608
1616
1609 'local' : a local tag
1617 'local' : a local tag
1610 'global' : a global tag
1618 'global' : a global tag
1611 None : tag does not exist
1619 None : tag does not exist
1612 '''
1620 '''
1613
1621
1614 return self._tagscache.tagtypes.get(tagname)
1622 return self._tagscache.tagtypes.get(tagname)
1615
1623
1616 def tagslist(self):
1624 def tagslist(self):
1617 '''return a list of tags ordered by revision'''
1625 '''return a list of tags ordered by revision'''
1618 if not self._tagscache.tagslist:
1626 if not self._tagscache.tagslist:
1619 l = []
1627 l = []
1620 for t, n in self.tags().iteritems():
1628 for t, n in self.tags().iteritems():
1621 l.append((self.changelog.rev(n), t, n))
1629 l.append((self.changelog.rev(n), t, n))
1622 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1623
1631
1624 return self._tagscache.tagslist
1632 return self._tagscache.tagslist
1625
1633
1626 def nodetags(self, node):
1634 def nodetags(self, node):
1627 '''return the tags associated with a node'''
1635 '''return the tags associated with a node'''
1628 if not self._tagscache.nodetagscache:
1636 if not self._tagscache.nodetagscache:
1629 nodetagscache = {}
1637 nodetagscache = {}
1630 for t, n in self._tagscache.tags.iteritems():
1638 for t, n in self._tagscache.tags.iteritems():
1631 nodetagscache.setdefault(n, []).append(t)
1639 nodetagscache.setdefault(n, []).append(t)
1632 for tags in nodetagscache.itervalues():
1640 for tags in nodetagscache.itervalues():
1633 tags.sort()
1641 tags.sort()
1634 self._tagscache.nodetagscache = nodetagscache
1642 self._tagscache.nodetagscache = nodetagscache
1635 return self._tagscache.nodetagscache.get(node, [])
1643 return self._tagscache.nodetagscache.get(node, [])
1636
1644
1637 def nodebookmarks(self, node):
1645 def nodebookmarks(self, node):
1638 """return the list of bookmarks pointing to the specified node"""
1646 """return the list of bookmarks pointing to the specified node"""
1639 return self._bookmarks.names(node)
1647 return self._bookmarks.names(node)
1640
1648
1641 def branchmap(self):
1649 def branchmap(self):
1642 '''returns a dictionary {branch: [branchheads]} with branchheads
1650 '''returns a dictionary {branch: [branchheads]} with branchheads
1643 ordered by increasing revision number'''
1651 ordered by increasing revision number'''
1644 return self._branchcaches[self]
1652 return self._branchcaches[self]
1645
1653
1646 @unfilteredmethod
1654 @unfilteredmethod
1647 def revbranchcache(self):
1655 def revbranchcache(self):
1648 if not self._revbranchcache:
1656 if not self._revbranchcache:
1649 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1657 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1650 return self._revbranchcache
1658 return self._revbranchcache
1651
1659
1652 def branchtip(self, branch, ignoremissing=False):
1660 def branchtip(self, branch, ignoremissing=False):
1653 '''return the tip node for a given branch
1661 '''return the tip node for a given branch
1654
1662
1655 If ignoremissing is True, then this method will not raise an error.
1663 If ignoremissing is True, then this method will not raise an error.
1656 This is helpful for callers that only expect None for a missing branch
1664 This is helpful for callers that only expect None for a missing branch
1657 (e.g. namespace).
1665 (e.g. namespace).
1658
1666
1659 '''
1667 '''
1660 try:
1668 try:
1661 return self.branchmap().branchtip(branch)
1669 return self.branchmap().branchtip(branch)
1662 except KeyError:
1670 except KeyError:
1663 if not ignoremissing:
1671 if not ignoremissing:
1664 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1672 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1665 else:
1673 else:
1666 pass
1674 pass
1667
1675
1668 def lookup(self, key):
1676 def lookup(self, key):
1669 node = scmutil.revsymbol(self, key).node()
1677 node = scmutil.revsymbol(self, key).node()
1670 if node is None:
1678 if node is None:
1671 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1679 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1672 return node
1680 return node
1673
1681
1674 def lookupbranch(self, key):
1682 def lookupbranch(self, key):
1675 if self.branchmap().hasbranch(key):
1683 if self.branchmap().hasbranch(key):
1676 return key
1684 return key
1677
1685
1678 return scmutil.revsymbol(self, key).branch()
1686 return scmutil.revsymbol(self, key).branch()
1679
1687
1680 def known(self, nodes):
1688 def known(self, nodes):
1681 cl = self.changelog
1689 cl = self.changelog
1682 nm = cl.nodemap
1690 nm = cl.nodemap
1683 filtered = cl.filteredrevs
1691 filtered = cl.filteredrevs
1684 result = []
1692 result = []
1685 for n in nodes:
1693 for n in nodes:
1686 r = nm.get(n)
1694 r = nm.get(n)
1687 resp = not (r is None or r in filtered)
1695 resp = not (r is None or r in filtered)
1688 result.append(resp)
1696 result.append(resp)
1689 return result
1697 return result
1690
1698
1691 def local(self):
1699 def local(self):
1692 return self
1700 return self
1693
1701
1694 def publishing(self):
1702 def publishing(self):
1695 # it's safe (and desirable) to trust the publish flag unconditionally
1703 # it's safe (and desirable) to trust the publish flag unconditionally
1696 # so that we don't finalize changes shared between users via ssh or nfs
1704 # so that we don't finalize changes shared between users via ssh or nfs
1697 return self.ui.configbool('phases', 'publish', untrusted=True)
1705 return self.ui.configbool('phases', 'publish', untrusted=True)
1698
1706
1699 def cancopy(self):
1707 def cancopy(self):
1700 # so statichttprepo's override of local() works
1708 # so statichttprepo's override of local() works
1701 if not self.local():
1709 if not self.local():
1702 return False
1710 return False
1703 if not self.publishing():
1711 if not self.publishing():
1704 return True
1712 return True
1705 # if publishing we can't copy if there is filtered content
1713 # if publishing we can't copy if there is filtered content
1706 return not self.filtered('visible').changelog.filteredrevs
1714 return not self.filtered('visible').changelog.filteredrevs
1707
1715
1708 def shared(self):
1716 def shared(self):
1709 '''the type of shared repository (None if not shared)'''
1717 '''the type of shared repository (None if not shared)'''
1710 if self.sharedpath != self.path:
1718 if self.sharedpath != self.path:
1711 return 'store'
1719 return 'store'
1712 return None
1720 return None
1713
1721
1714 def wjoin(self, f, *insidef):
1722 def wjoin(self, f, *insidef):
1715 return self.vfs.reljoin(self.root, f, *insidef)
1723 return self.vfs.reljoin(self.root, f, *insidef)
1716
1724
1717 def setparents(self, p1, p2=nullid):
1725 def setparents(self, p1, p2=nullid):
1718 with self.dirstate.parentchange():
1726 with self.dirstate.parentchange():
1719 copies = self.dirstate.setparents(p1, p2)
1727 copies = self.dirstate.setparents(p1, p2)
1720 pctx = self[p1]
1728 pctx = self[p1]
1721 if copies:
1729 if copies:
1722 # Adjust copy records, the dirstate cannot do it, it
1730 # Adjust copy records, the dirstate cannot do it, it
1723 # requires access to parents manifests. Preserve them
1731 # requires access to parents manifests. Preserve them
1724 # only for entries added to first parent.
1732 # only for entries added to first parent.
1725 for f in copies:
1733 for f in copies:
1726 if f not in pctx and copies[f] in pctx:
1734 if f not in pctx and copies[f] in pctx:
1727 self.dirstate.copy(copies[f], f)
1735 self.dirstate.copy(copies[f], f)
1728 if p2 == nullid:
1736 if p2 == nullid:
1729 for f, s in sorted(self.dirstate.copies().items()):
1737 for f, s in sorted(self.dirstate.copies().items()):
1730 if f not in pctx and s not in pctx:
1738 if f not in pctx and s not in pctx:
1731 self.dirstate.copy(None, f)
1739 self.dirstate.copy(None, f)
1732
1740
1733 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1741 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1734 """changeid must be a changeset revision, if specified.
1742 """changeid must be a changeset revision, if specified.
1735 fileid can be a file revision or node."""
1743 fileid can be a file revision or node."""
1736 return context.filectx(self, path, changeid, fileid,
1744 return context.filectx(self, path, changeid, fileid,
1737 changectx=changectx)
1745 changectx=changectx)
1738
1746
1739 def getcwd(self):
1747 def getcwd(self):
1740 return self.dirstate.getcwd()
1748 return self.dirstate.getcwd()
1741
1749
1742 def pathto(self, f, cwd=None):
1750 def pathto(self, f, cwd=None):
1743 return self.dirstate.pathto(f, cwd)
1751 return self.dirstate.pathto(f, cwd)
1744
1752
1745 def _loadfilter(self, filter):
1753 def _loadfilter(self, filter):
1746 if filter not in self._filterpats:
1754 if filter not in self._filterpats:
1747 l = []
1755 l = []
1748 for pat, cmd in self.ui.configitems(filter):
1756 for pat, cmd in self.ui.configitems(filter):
1749 if cmd == '!':
1757 if cmd == '!':
1750 continue
1758 continue
1751 mf = matchmod.match(self.root, '', [pat])
1759 mf = matchmod.match(self.root, '', [pat])
1752 fn = None
1760 fn = None
1753 params = cmd
1761 params = cmd
1754 for name, filterfn in self._datafilters.iteritems():
1762 for name, filterfn in self._datafilters.iteritems():
1755 if cmd.startswith(name):
1763 if cmd.startswith(name):
1756 fn = filterfn
1764 fn = filterfn
1757 params = cmd[len(name):].lstrip()
1765 params = cmd[len(name):].lstrip()
1758 break
1766 break
1759 if not fn:
1767 if not fn:
1760 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1768 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1761 # Wrap old filters not supporting keyword arguments
1769 # Wrap old filters not supporting keyword arguments
1762 if not pycompat.getargspec(fn)[2]:
1770 if not pycompat.getargspec(fn)[2]:
1763 oldfn = fn
1771 oldfn = fn
1764 fn = lambda s, c, **kwargs: oldfn(s, c)
1772 fn = lambda s, c, **kwargs: oldfn(s, c)
1765 l.append((mf, fn, params))
1773 l.append((mf, fn, params))
1766 self._filterpats[filter] = l
1774 self._filterpats[filter] = l
1767 return self._filterpats[filter]
1775 return self._filterpats[filter]
1768
1776
1769 def _filter(self, filterpats, filename, data):
1777 def _filter(self, filterpats, filename, data):
1770 for mf, fn, cmd in filterpats:
1778 for mf, fn, cmd in filterpats:
1771 if mf(filename):
1779 if mf(filename):
1772 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1780 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1773 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1781 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1774 break
1782 break
1775
1783
1776 return data
1784 return data
1777
1785
1778 @unfilteredpropertycache
1786 @unfilteredpropertycache
1779 def _encodefilterpats(self):
1787 def _encodefilterpats(self):
1780 return self._loadfilter('encode')
1788 return self._loadfilter('encode')
1781
1789
1782 @unfilteredpropertycache
1790 @unfilteredpropertycache
1783 def _decodefilterpats(self):
1791 def _decodefilterpats(self):
1784 return self._loadfilter('decode')
1792 return self._loadfilter('decode')
1785
1793
1786 def adddatafilter(self, name, filter):
1794 def adddatafilter(self, name, filter):
1787 self._datafilters[name] = filter
1795 self._datafilters[name] = filter
1788
1796
1789 def wread(self, filename):
1797 def wread(self, filename):
1790 if self.wvfs.islink(filename):
1798 if self.wvfs.islink(filename):
1791 data = self.wvfs.readlink(filename)
1799 data = self.wvfs.readlink(filename)
1792 else:
1800 else:
1793 data = self.wvfs.read(filename)
1801 data = self.wvfs.read(filename)
1794 return self._filter(self._encodefilterpats, filename, data)
1802 return self._filter(self._encodefilterpats, filename, data)
1795
1803
1796 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1804 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1797 """write ``data`` into ``filename`` in the working directory
1805 """write ``data`` into ``filename`` in the working directory
1798
1806
1799 This returns length of written (maybe decoded) data.
1807 This returns length of written (maybe decoded) data.
1800 """
1808 """
1801 data = self._filter(self._decodefilterpats, filename, data)
1809 data = self._filter(self._decodefilterpats, filename, data)
1802 if 'l' in flags:
1810 if 'l' in flags:
1803 self.wvfs.symlink(data, filename)
1811 self.wvfs.symlink(data, filename)
1804 else:
1812 else:
1805 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1813 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1806 **kwargs)
1814 **kwargs)
1807 if 'x' in flags:
1815 if 'x' in flags:
1808 self.wvfs.setflags(filename, False, True)
1816 self.wvfs.setflags(filename, False, True)
1809 else:
1817 else:
1810 self.wvfs.setflags(filename, False, False)
1818 self.wvfs.setflags(filename, False, False)
1811 return len(data)
1819 return len(data)
1812
1820
1813 def wwritedata(self, filename, data):
1821 def wwritedata(self, filename, data):
1814 return self._filter(self._decodefilterpats, filename, data)
1822 return self._filter(self._decodefilterpats, filename, data)
1815
1823
1816 def currenttransaction(self):
1824 def currenttransaction(self):
1817 """return the current transaction or None if non exists"""
1825 """return the current transaction or None if non exists"""
1818 if self._transref:
1826 if self._transref:
1819 tr = self._transref()
1827 tr = self._transref()
1820 else:
1828 else:
1821 tr = None
1829 tr = None
1822
1830
1823 if tr and tr.running():
1831 if tr and tr.running():
1824 return tr
1832 return tr
1825 return None
1833 return None
1826
1834
1827 def transaction(self, desc, report=None):
1835 def transaction(self, desc, report=None):
1828 if (self.ui.configbool('devel', 'all-warnings')
1836 if (self.ui.configbool('devel', 'all-warnings')
1829 or self.ui.configbool('devel', 'check-locks')):
1837 or self.ui.configbool('devel', 'check-locks')):
1830 if self._currentlock(self._lockref) is None:
1838 if self._currentlock(self._lockref) is None:
1831 raise error.ProgrammingError('transaction requires locking')
1839 raise error.ProgrammingError('transaction requires locking')
1832 tr = self.currenttransaction()
1840 tr = self.currenttransaction()
1833 if tr is not None:
1841 if tr is not None:
1834 return tr.nest(name=desc)
1842 return tr.nest(name=desc)
1835
1843
1836 # abort here if the journal already exists
1844 # abort here if the journal already exists
1837 if self.svfs.exists("journal"):
1845 if self.svfs.exists("journal"):
1838 raise error.RepoError(
1846 raise error.RepoError(
1839 _("abandoned transaction found"),
1847 _("abandoned transaction found"),
1840 hint=_("run 'hg recover' to clean up transaction"))
1848 hint=_("run 'hg recover' to clean up transaction"))
1841
1849
1842 idbase = "%.40f#%f" % (random.random(), time.time())
1850 idbase = "%.40f#%f" % (random.random(), time.time())
1843 ha = hex(hashlib.sha1(idbase).digest())
1851 ha = hex(hashlib.sha1(idbase).digest())
1844 txnid = 'TXN:' + ha
1852 txnid = 'TXN:' + ha
1845 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1853 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1846
1854
1847 self._writejournal(desc)
1855 self._writejournal(desc)
1848 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1856 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1849 if report:
1857 if report:
1850 rp = report
1858 rp = report
1851 else:
1859 else:
1852 rp = self.ui.warn
1860 rp = self.ui.warn
1853 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1861 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1854 # we must avoid cyclic reference between repo and transaction.
1862 # we must avoid cyclic reference between repo and transaction.
1855 reporef = weakref.ref(self)
1863 reporef = weakref.ref(self)
1856 # Code to track tag movement
1864 # Code to track tag movement
1857 #
1865 #
1858 # Since tags are all handled as file content, it is actually quite hard
1866 # Since tags are all handled as file content, it is actually quite hard
1859 # to track these movement from a code perspective. So we fallback to a
1867 # to track these movement from a code perspective. So we fallback to a
1860 # tracking at the repository level. One could envision to track changes
1868 # tracking at the repository level. One could envision to track changes
1861 # to the '.hgtags' file through changegroup apply but that fails to
1869 # to the '.hgtags' file through changegroup apply but that fails to
1862 # cope with case where transaction expose new heads without changegroup
1870 # cope with case where transaction expose new heads without changegroup
1863 # being involved (eg: phase movement).
1871 # being involved (eg: phase movement).
1864 #
1872 #
1865 # For now, We gate the feature behind a flag since this likely comes
1873 # For now, We gate the feature behind a flag since this likely comes
1866 # with performance impacts. The current code run more often than needed
1874 # with performance impacts. The current code run more often than needed
1867 # and do not use caches as much as it could. The current focus is on
1875 # and do not use caches as much as it could. The current focus is on
1868 # the behavior of the feature so we disable it by default. The flag
1876 # the behavior of the feature so we disable it by default. The flag
1869 # will be removed when we are happy with the performance impact.
1877 # will be removed when we are happy with the performance impact.
1870 #
1878 #
1871 # Once this feature is no longer experimental move the following
1879 # Once this feature is no longer experimental move the following
1872 # documentation to the appropriate help section:
1880 # documentation to the appropriate help section:
1873 #
1881 #
1874 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1882 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1875 # tags (new or changed or deleted tags). In addition the details of
1883 # tags (new or changed or deleted tags). In addition the details of
1876 # these changes are made available in a file at:
1884 # these changes are made available in a file at:
1877 # ``REPOROOT/.hg/changes/tags.changes``.
1885 # ``REPOROOT/.hg/changes/tags.changes``.
1878 # Make sure you check for HG_TAG_MOVED before reading that file as it
1886 # Make sure you check for HG_TAG_MOVED before reading that file as it
1879 # might exist from a previous transaction even if no tag were touched
1887 # might exist from a previous transaction even if no tag were touched
1880 # in this one. Changes are recorded in a line base format::
1888 # in this one. Changes are recorded in a line base format::
1881 #
1889 #
1882 # <action> <hex-node> <tag-name>\n
1890 # <action> <hex-node> <tag-name>\n
1883 #
1891 #
1884 # Actions are defined as follow:
1892 # Actions are defined as follow:
1885 # "-R": tag is removed,
1893 # "-R": tag is removed,
1886 # "+A": tag is added,
1894 # "+A": tag is added,
1887 # "-M": tag is moved (old value),
1895 # "-M": tag is moved (old value),
1888 # "+M": tag is moved (new value),
1896 # "+M": tag is moved (new value),
1889 tracktags = lambda x: None
1897 tracktags = lambda x: None
1890 # experimental config: experimental.hook-track-tags
1898 # experimental config: experimental.hook-track-tags
1891 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1899 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1892 if desc != 'strip' and shouldtracktags:
1900 if desc != 'strip' and shouldtracktags:
1893 oldheads = self.changelog.headrevs()
1901 oldheads = self.changelog.headrevs()
1894 def tracktags(tr2):
1902 def tracktags(tr2):
1895 repo = reporef()
1903 repo = reporef()
1896 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1904 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1897 newheads = repo.changelog.headrevs()
1905 newheads = repo.changelog.headrevs()
1898 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1906 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1899 # notes: we compare lists here.
1907 # notes: we compare lists here.
1900 # As we do it only once buiding set would not be cheaper
1908 # As we do it only once buiding set would not be cheaper
1901 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1909 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1902 if changes:
1910 if changes:
1903 tr2.hookargs['tag_moved'] = '1'
1911 tr2.hookargs['tag_moved'] = '1'
1904 with repo.vfs('changes/tags.changes', 'w',
1912 with repo.vfs('changes/tags.changes', 'w',
1905 atomictemp=True) as changesfile:
1913 atomictemp=True) as changesfile:
1906 # note: we do not register the file to the transaction
1914 # note: we do not register the file to the transaction
1907 # because we needs it to still exist on the transaction
1915 # because we needs it to still exist on the transaction
1908 # is close (for txnclose hooks)
1916 # is close (for txnclose hooks)
1909 tagsmod.writediff(changesfile, changes)
1917 tagsmod.writediff(changesfile, changes)
1910 def validate(tr2):
1918 def validate(tr2):
1911 """will run pre-closing hooks"""
1919 """will run pre-closing hooks"""
1912 # XXX the transaction API is a bit lacking here so we take a hacky
1920 # XXX the transaction API is a bit lacking here so we take a hacky
1913 # path for now
1921 # path for now
1914 #
1922 #
1915 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1923 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1916 # dict is copied before these run. In addition we needs the data
1924 # dict is copied before these run. In addition we needs the data
1917 # available to in memory hooks too.
1925 # available to in memory hooks too.
1918 #
1926 #
1919 # Moreover, we also need to make sure this runs before txnclose
1927 # Moreover, we also need to make sure this runs before txnclose
1920 # hooks and there is no "pending" mechanism that would execute
1928 # hooks and there is no "pending" mechanism that would execute
1921 # logic only if hooks are about to run.
1929 # logic only if hooks are about to run.
1922 #
1930 #
1923 # Fixing this limitation of the transaction is also needed to track
1931 # Fixing this limitation of the transaction is also needed to track
1924 # other families of changes (bookmarks, phases, obsolescence).
1932 # other families of changes (bookmarks, phases, obsolescence).
1925 #
1933 #
1926 # This will have to be fixed before we remove the experimental
1934 # This will have to be fixed before we remove the experimental
1927 # gating.
1935 # gating.
1928 tracktags(tr2)
1936 tracktags(tr2)
1929 repo = reporef()
1937 repo = reporef()
1930
1938
1931 r = repo.ui.configsuboptions('experimental',
1939 r = repo.ui.configsuboptions('experimental',
1932 'single-head-per-branch')
1940 'single-head-per-branch')
1933 singlehead, singleheadsub = r
1941 singlehead, singleheadsub = r
1934 if singlehead:
1942 if singlehead:
1935 accountclosed = singleheadsub.get("account-closed-heads", False)
1943 accountclosed = singleheadsub.get("account-closed-heads", False)
1936 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
1944 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
1937 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1945 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1938 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1946 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1939 args = tr.hookargs.copy()
1947 args = tr.hookargs.copy()
1940 args.update(bookmarks.preparehookargs(name, old, new))
1948 args.update(bookmarks.preparehookargs(name, old, new))
1941 repo.hook('pretxnclose-bookmark', throw=True,
1949 repo.hook('pretxnclose-bookmark', throw=True,
1942 **pycompat.strkwargs(args))
1950 **pycompat.strkwargs(args))
1943 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1951 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1944 cl = repo.unfiltered().changelog
1952 cl = repo.unfiltered().changelog
1945 for rev, (old, new) in tr.changes['phases'].items():
1953 for rev, (old, new) in tr.changes['phases'].items():
1946 args = tr.hookargs.copy()
1954 args = tr.hookargs.copy()
1947 node = hex(cl.node(rev))
1955 node = hex(cl.node(rev))
1948 args.update(phases.preparehookargs(node, old, new))
1956 args.update(phases.preparehookargs(node, old, new))
1949 repo.hook('pretxnclose-phase', throw=True,
1957 repo.hook('pretxnclose-phase', throw=True,
1950 **pycompat.strkwargs(args))
1958 **pycompat.strkwargs(args))
1951
1959
1952 repo.hook('pretxnclose', throw=True,
1960 repo.hook('pretxnclose', throw=True,
1953 **pycompat.strkwargs(tr.hookargs))
1961 **pycompat.strkwargs(tr.hookargs))
1954 def releasefn(tr, success):
1962 def releasefn(tr, success):
1955 repo = reporef()
1963 repo = reporef()
1956 if repo is None:
1964 if repo is None:
1957 # If the repo has been GC'd (and this release function is being
1965 # If the repo has been GC'd (and this release function is being
1958 # called from transaction.__del__), there's not much we can do,
1966 # called from transaction.__del__), there's not much we can do,
1959 # so just leave the unfinished transaction there and let the
1967 # so just leave the unfinished transaction there and let the
1960 # user run `hg recover`.
1968 # user run `hg recover`.
1961 return
1969 return
1962 if success:
1970 if success:
1963 # this should be explicitly invoked here, because
1971 # this should be explicitly invoked here, because
1964 # in-memory changes aren't written out at closing
1972 # in-memory changes aren't written out at closing
1965 # transaction, if tr.addfilegenerator (via
1973 # transaction, if tr.addfilegenerator (via
1966 # dirstate.write or so) isn't invoked while
1974 # dirstate.write or so) isn't invoked while
1967 # transaction running
1975 # transaction running
1968 repo.dirstate.write(None)
1976 repo.dirstate.write(None)
1969 else:
1977 else:
1970 # discard all changes (including ones already written
1978 # discard all changes (including ones already written
1971 # out) in this transaction
1979 # out) in this transaction
1972 narrowspec.restorebackup(self, 'journal.narrowspec')
1980 narrowspec.restorebackup(self, 'journal.narrowspec')
1973 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1981 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1974 repo.dirstate.restorebackup(None, 'journal.dirstate')
1982 repo.dirstate.restorebackup(None, 'journal.dirstate')
1975
1983
1976 repo.invalidate(clearfilecache=True)
1984 repo.invalidate(clearfilecache=True)
1977
1985
1978 tr = transaction.transaction(rp, self.svfs, vfsmap,
1986 tr = transaction.transaction(rp, self.svfs, vfsmap,
1979 "journal",
1987 "journal",
1980 "undo",
1988 "undo",
1981 aftertrans(renames),
1989 aftertrans(renames),
1982 self.store.createmode,
1990 self.store.createmode,
1983 validator=validate,
1991 validator=validate,
1984 releasefn=releasefn,
1992 releasefn=releasefn,
1985 checkambigfiles=_cachedfiles,
1993 checkambigfiles=_cachedfiles,
1986 name=desc)
1994 name=desc)
1987 tr.changes['origrepolen'] = len(self)
1995 tr.changes['origrepolen'] = len(self)
1988 tr.changes['obsmarkers'] = set()
1996 tr.changes['obsmarkers'] = set()
1989 tr.changes['phases'] = {}
1997 tr.changes['phases'] = {}
1990 tr.changes['bookmarks'] = {}
1998 tr.changes['bookmarks'] = {}
1991
1999
1992 tr.hookargs['txnid'] = txnid
2000 tr.hookargs['txnid'] = txnid
1993 tr.hookargs['txnname'] = desc
2001 tr.hookargs['txnname'] = desc
1994 # note: writing the fncache only during finalize mean that the file is
2002 # note: writing the fncache only during finalize mean that the file is
1995 # outdated when running hooks. As fncache is used for streaming clone,
2003 # outdated when running hooks. As fncache is used for streaming clone,
1996 # this is not expected to break anything that happen during the hooks.
2004 # this is not expected to break anything that happen during the hooks.
1997 tr.addfinalize('flush-fncache', self.store.write)
2005 tr.addfinalize('flush-fncache', self.store.write)
1998 def txnclosehook(tr2):
2006 def txnclosehook(tr2):
1999 """To be run if transaction is successful, will schedule a hook run
2007 """To be run if transaction is successful, will schedule a hook run
2000 """
2008 """
2001 # Don't reference tr2 in hook() so we don't hold a reference.
2009 # Don't reference tr2 in hook() so we don't hold a reference.
2002 # This reduces memory consumption when there are multiple
2010 # This reduces memory consumption when there are multiple
2003 # transactions per lock. This can likely go away if issue5045
2011 # transactions per lock. This can likely go away if issue5045
2004 # fixes the function accumulation.
2012 # fixes the function accumulation.
2005 hookargs = tr2.hookargs
2013 hookargs = tr2.hookargs
2006
2014
2007 def hookfunc():
2015 def hookfunc():
2008 repo = reporef()
2016 repo = reporef()
2009 if hook.hashook(repo.ui, 'txnclose-bookmark'):
2017 if hook.hashook(repo.ui, 'txnclose-bookmark'):
2010 bmchanges = sorted(tr.changes['bookmarks'].items())
2018 bmchanges = sorted(tr.changes['bookmarks'].items())
2011 for name, (old, new) in bmchanges:
2019 for name, (old, new) in bmchanges:
2012 args = tr.hookargs.copy()
2020 args = tr.hookargs.copy()
2013 args.update(bookmarks.preparehookargs(name, old, new))
2021 args.update(bookmarks.preparehookargs(name, old, new))
2014 repo.hook('txnclose-bookmark', throw=False,
2022 repo.hook('txnclose-bookmark', throw=False,
2015 **pycompat.strkwargs(args))
2023 **pycompat.strkwargs(args))
2016
2024
2017 if hook.hashook(repo.ui, 'txnclose-phase'):
2025 if hook.hashook(repo.ui, 'txnclose-phase'):
2018 cl = repo.unfiltered().changelog
2026 cl = repo.unfiltered().changelog
2019 phasemv = sorted(tr.changes['phases'].items())
2027 phasemv = sorted(tr.changes['phases'].items())
2020 for rev, (old, new) in phasemv:
2028 for rev, (old, new) in phasemv:
2021 args = tr.hookargs.copy()
2029 args = tr.hookargs.copy()
2022 node = hex(cl.node(rev))
2030 node = hex(cl.node(rev))
2023 args.update(phases.preparehookargs(node, old, new))
2031 args.update(phases.preparehookargs(node, old, new))
2024 repo.hook('txnclose-phase', throw=False,
2032 repo.hook('txnclose-phase', throw=False,
2025 **pycompat.strkwargs(args))
2033 **pycompat.strkwargs(args))
2026
2034
2027 repo.hook('txnclose', throw=False,
2035 repo.hook('txnclose', throw=False,
2028 **pycompat.strkwargs(hookargs))
2036 **pycompat.strkwargs(hookargs))
2029 reporef()._afterlock(hookfunc)
2037 reporef()._afterlock(hookfunc)
2030 tr.addfinalize('txnclose-hook', txnclosehook)
2038 tr.addfinalize('txnclose-hook', txnclosehook)
2031 # Include a leading "-" to make it happen before the transaction summary
2039 # Include a leading "-" to make it happen before the transaction summary
2032 # reports registered via scmutil.registersummarycallback() whose names
2040 # reports registered via scmutil.registersummarycallback() whose names
2033 # are 00-txnreport etc. That way, the caches will be warm when the
2041 # are 00-txnreport etc. That way, the caches will be warm when the
2034 # callbacks run.
2042 # callbacks run.
2035 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2043 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
2036 def txnaborthook(tr2):
2044 def txnaborthook(tr2):
2037 """To be run if transaction is aborted
2045 """To be run if transaction is aborted
2038 """
2046 """
2039 reporef().hook('txnabort', throw=False,
2047 reporef().hook('txnabort', throw=False,
2040 **pycompat.strkwargs(tr2.hookargs))
2048 **pycompat.strkwargs(tr2.hookargs))
2041 tr.addabort('txnabort-hook', txnaborthook)
2049 tr.addabort('txnabort-hook', txnaborthook)
2042 # avoid eager cache invalidation. in-memory data should be identical
2050 # avoid eager cache invalidation. in-memory data should be identical
2043 # to stored data if transaction has no error.
2051 # to stored data if transaction has no error.
2044 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2052 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
2045 self._transref = weakref.ref(tr)
2053 self._transref = weakref.ref(tr)
2046 scmutil.registersummarycallback(self, tr, desc)
2054 scmutil.registersummarycallback(self, tr, desc)
2047 return tr
2055 return tr
2048
2056
2049 def _journalfiles(self):
2057 def _journalfiles(self):
2050 return ((self.svfs, 'journal'),
2058 return ((self.svfs, 'journal'),
2051 (self.svfs, 'journal.narrowspec'),
2059 (self.svfs, 'journal.narrowspec'),
2052 (self.vfs, 'journal.narrowspec.dirstate'),
2060 (self.vfs, 'journal.narrowspec.dirstate'),
2053 (self.vfs, 'journal.dirstate'),
2061 (self.vfs, 'journal.dirstate'),
2054 (self.vfs, 'journal.branch'),
2062 (self.vfs, 'journal.branch'),
2055 (self.vfs, 'journal.desc'),
2063 (self.vfs, 'journal.desc'),
2056 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2064 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
2057 (self.svfs, 'journal.phaseroots'))
2065 (self.svfs, 'journal.phaseroots'))
2058
2066
2059 def undofiles(self):
2067 def undofiles(self):
2060 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2068 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2061
2069
2062 @unfilteredmethod
2070 @unfilteredmethod
2063 def _writejournal(self, desc):
2071 def _writejournal(self, desc):
2064 self.dirstate.savebackup(None, 'journal.dirstate')
2072 self.dirstate.savebackup(None, 'journal.dirstate')
2065 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2073 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
2066 narrowspec.savebackup(self, 'journal.narrowspec')
2074 narrowspec.savebackup(self, 'journal.narrowspec')
2067 self.vfs.write("journal.branch",
2075 self.vfs.write("journal.branch",
2068 encoding.fromlocal(self.dirstate.branch()))
2076 encoding.fromlocal(self.dirstate.branch()))
2069 self.vfs.write("journal.desc",
2077 self.vfs.write("journal.desc",
2070 "%d\n%s\n" % (len(self), desc))
2078 "%d\n%s\n" % (len(self), desc))
2071 bookmarksvfs = bookmarks.bookmarksvfs(self)
2079 bookmarksvfs = bookmarks.bookmarksvfs(self)
2072 bookmarksvfs.write("journal.bookmarks",
2080 bookmarksvfs.write("journal.bookmarks",
2073 bookmarksvfs.tryread("bookmarks"))
2081 bookmarksvfs.tryread("bookmarks"))
2074 self.svfs.write("journal.phaseroots",
2082 self.svfs.write("journal.phaseroots",
2075 self.svfs.tryread("phaseroots"))
2083 self.svfs.tryread("phaseroots"))
2076
2084
2077 def recover(self):
2085 def recover(self):
2078 with self.lock():
2086 with self.lock():
2079 if self.svfs.exists("journal"):
2087 if self.svfs.exists("journal"):
2080 self.ui.status(_("rolling back interrupted transaction\n"))
2088 self.ui.status(_("rolling back interrupted transaction\n"))
2081 vfsmap = {'': self.svfs,
2089 vfsmap = {'': self.svfs,
2082 'plain': self.vfs,}
2090 'plain': self.vfs,}
2083 transaction.rollback(self.svfs, vfsmap, "journal",
2091 transaction.rollback(self.svfs, vfsmap, "journal",
2084 self.ui.warn,
2092 self.ui.warn,
2085 checkambigfiles=_cachedfiles)
2093 checkambigfiles=_cachedfiles)
2086 self.invalidate()
2094 self.invalidate()
2087 return True
2095 return True
2088 else:
2096 else:
2089 self.ui.warn(_("no interrupted transaction available\n"))
2097 self.ui.warn(_("no interrupted transaction available\n"))
2090 return False
2098 return False
2091
2099
2092 def rollback(self, dryrun=False, force=False):
2100 def rollback(self, dryrun=False, force=False):
2093 wlock = lock = dsguard = None
2101 wlock = lock = dsguard = None
2094 try:
2102 try:
2095 wlock = self.wlock()
2103 wlock = self.wlock()
2096 lock = self.lock()
2104 lock = self.lock()
2097 if self.svfs.exists("undo"):
2105 if self.svfs.exists("undo"):
2098 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2106 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2099
2107
2100 return self._rollback(dryrun, force, dsguard)
2108 return self._rollback(dryrun, force, dsguard)
2101 else:
2109 else:
2102 self.ui.warn(_("no rollback information available\n"))
2110 self.ui.warn(_("no rollback information available\n"))
2103 return 1
2111 return 1
2104 finally:
2112 finally:
2105 release(dsguard, lock, wlock)
2113 release(dsguard, lock, wlock)
2106
2114
2107 @unfilteredmethod # Until we get smarter cache management
2115 @unfilteredmethod # Until we get smarter cache management
2108 def _rollback(self, dryrun, force, dsguard):
2116 def _rollback(self, dryrun, force, dsguard):
2109 ui = self.ui
2117 ui = self.ui
2110 try:
2118 try:
2111 args = self.vfs.read('undo.desc').splitlines()
2119 args = self.vfs.read('undo.desc').splitlines()
2112 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2120 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2113 if len(args) >= 3:
2121 if len(args) >= 3:
2114 detail = args[2]
2122 detail = args[2]
2115 oldtip = oldlen - 1
2123 oldtip = oldlen - 1
2116
2124
2117 if detail and ui.verbose:
2125 if detail and ui.verbose:
2118 msg = (_('repository tip rolled back to revision %d'
2126 msg = (_('repository tip rolled back to revision %d'
2119 ' (undo %s: %s)\n')
2127 ' (undo %s: %s)\n')
2120 % (oldtip, desc, detail))
2128 % (oldtip, desc, detail))
2121 else:
2129 else:
2122 msg = (_('repository tip rolled back to revision %d'
2130 msg = (_('repository tip rolled back to revision %d'
2123 ' (undo %s)\n')
2131 ' (undo %s)\n')
2124 % (oldtip, desc))
2132 % (oldtip, desc))
2125 except IOError:
2133 except IOError:
2126 msg = _('rolling back unknown transaction\n')
2134 msg = _('rolling back unknown transaction\n')
2127 desc = None
2135 desc = None
2128
2136
2129 if not force and self['.'] != self['tip'] and desc == 'commit':
2137 if not force and self['.'] != self['tip'] and desc == 'commit':
2130 raise error.Abort(
2138 raise error.Abort(
2131 _('rollback of last commit while not checked out '
2139 _('rollback of last commit while not checked out '
2132 'may lose data'), hint=_('use -f to force'))
2140 'may lose data'), hint=_('use -f to force'))
2133
2141
2134 ui.status(msg)
2142 ui.status(msg)
2135 if dryrun:
2143 if dryrun:
2136 return 0
2144 return 0
2137
2145
2138 parents = self.dirstate.parents()
2146 parents = self.dirstate.parents()
2139 self.destroying()
2147 self.destroying()
2140 vfsmap = {'plain': self.vfs, '': self.svfs}
2148 vfsmap = {'plain': self.vfs, '': self.svfs}
2141 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2149 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2142 checkambigfiles=_cachedfiles)
2150 checkambigfiles=_cachedfiles)
2143 bookmarksvfs = bookmarks.bookmarksvfs(self)
2151 bookmarksvfs = bookmarks.bookmarksvfs(self)
2144 if bookmarksvfs.exists('undo.bookmarks'):
2152 if bookmarksvfs.exists('undo.bookmarks'):
2145 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2153 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2146 if self.svfs.exists('undo.phaseroots'):
2154 if self.svfs.exists('undo.phaseroots'):
2147 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2155 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2148 self.invalidate()
2156 self.invalidate()
2149
2157
2150 parentgone = any(p not in self.changelog.nodemap for p in parents)
2158 parentgone = any(p not in self.changelog.nodemap for p in parents)
2151 if parentgone:
2159 if parentgone:
2152 # prevent dirstateguard from overwriting already restored one
2160 # prevent dirstateguard from overwriting already restored one
2153 dsguard.close()
2161 dsguard.close()
2154
2162
2155 narrowspec.restorebackup(self, 'undo.narrowspec')
2163 narrowspec.restorebackup(self, 'undo.narrowspec')
2156 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2164 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2157 self.dirstate.restorebackup(None, 'undo.dirstate')
2165 self.dirstate.restorebackup(None, 'undo.dirstate')
2158 try:
2166 try:
2159 branch = self.vfs.read('undo.branch')
2167 branch = self.vfs.read('undo.branch')
2160 self.dirstate.setbranch(encoding.tolocal(branch))
2168 self.dirstate.setbranch(encoding.tolocal(branch))
2161 except IOError:
2169 except IOError:
2162 ui.warn(_('named branch could not be reset: '
2170 ui.warn(_('named branch could not be reset: '
2163 'current branch is still \'%s\'\n')
2171 'current branch is still \'%s\'\n')
2164 % self.dirstate.branch())
2172 % self.dirstate.branch())
2165
2173
2166 parents = tuple([p.rev() for p in self[None].parents()])
2174 parents = tuple([p.rev() for p in self[None].parents()])
2167 if len(parents) > 1:
2175 if len(parents) > 1:
2168 ui.status(_('working directory now based on '
2176 ui.status(_('working directory now based on '
2169 'revisions %d and %d\n') % parents)
2177 'revisions %d and %d\n') % parents)
2170 else:
2178 else:
2171 ui.status(_('working directory now based on '
2179 ui.status(_('working directory now based on '
2172 'revision %d\n') % parents)
2180 'revision %d\n') % parents)
2173 mergemod.mergestate.clean(self, self['.'].node())
2181 mergemod.mergestate.clean(self, self['.'].node())
2174
2182
2175 # TODO: if we know which new heads may result from this rollback, pass
2183 # TODO: if we know which new heads may result from this rollback, pass
2176 # them to destroy(), which will prevent the branchhead cache from being
2184 # them to destroy(), which will prevent the branchhead cache from being
2177 # invalidated.
2185 # invalidated.
2178 self.destroyed()
2186 self.destroyed()
2179 return 0
2187 return 0
2180
2188
2181 def _buildcacheupdater(self, newtransaction):
2189 def _buildcacheupdater(self, newtransaction):
2182 """called during transaction to build the callback updating cache
2190 """called during transaction to build the callback updating cache
2183
2191
2184 Lives on the repository to help extension who might want to augment
2192 Lives on the repository to help extension who might want to augment
2185 this logic. For this purpose, the created transaction is passed to the
2193 this logic. For this purpose, the created transaction is passed to the
2186 method.
2194 method.
2187 """
2195 """
2188 # we must avoid cyclic reference between repo and transaction.
2196 # we must avoid cyclic reference between repo and transaction.
2189 reporef = weakref.ref(self)
2197 reporef = weakref.ref(self)
2190 def updater(tr):
2198 def updater(tr):
2191 repo = reporef()
2199 repo = reporef()
2192 repo.updatecaches(tr)
2200 repo.updatecaches(tr)
2193 return updater
2201 return updater
2194
2202
2195 @unfilteredmethod
2203 @unfilteredmethod
2196 def updatecaches(self, tr=None, full=False):
2204 def updatecaches(self, tr=None, full=False):
2197 """warm appropriate caches
2205 """warm appropriate caches
2198
2206
2199 If this function is called after a transaction closed. The transaction
2207 If this function is called after a transaction closed. The transaction
2200 will be available in the 'tr' argument. This can be used to selectively
2208 will be available in the 'tr' argument. This can be used to selectively
2201 update caches relevant to the changes in that transaction.
2209 update caches relevant to the changes in that transaction.
2202
2210
2203 If 'full' is set, make sure all caches the function knows about have
2211 If 'full' is set, make sure all caches the function knows about have
2204 up-to-date data. Even the ones usually loaded more lazily.
2212 up-to-date data. Even the ones usually loaded more lazily.
2205 """
2213 """
2206 if tr is not None and tr.hookargs.get('source') == 'strip':
2214 if tr is not None and tr.hookargs.get('source') == 'strip':
2207 # During strip, many caches are invalid but
2215 # During strip, many caches are invalid but
2208 # later call to `destroyed` will refresh them.
2216 # later call to `destroyed` will refresh them.
2209 return
2217 return
2210
2218
2211 if tr is None or tr.changes['origrepolen'] < len(self):
2219 if tr is None or tr.changes['origrepolen'] < len(self):
2212 # accessing the 'ser ved' branchmap should refresh all the others,
2220 # accessing the 'ser ved' branchmap should refresh all the others,
2213 self.ui.debug('updating the branch cache\n')
2221 self.ui.debug('updating the branch cache\n')
2214 self.filtered('served').branchmap()
2222 self.filtered('served').branchmap()
2215 self.filtered('served.hidden').branchmap()
2223 self.filtered('served.hidden').branchmap()
2216
2224
2217 if full:
2225 if full:
2218 unfi = self.unfiltered()
2226 unfi = self.unfiltered()
2219 rbc = unfi.revbranchcache()
2227 rbc = unfi.revbranchcache()
2220 for r in unfi.changelog:
2228 for r in unfi.changelog:
2221 rbc.branchinfo(r)
2229 rbc.branchinfo(r)
2222 rbc.write()
2230 rbc.write()
2223
2231
2224 # ensure the working copy parents are in the manifestfulltextcache
2232 # ensure the working copy parents are in the manifestfulltextcache
2225 for ctx in self['.'].parents():
2233 for ctx in self['.'].parents():
2226 ctx.manifest() # accessing the manifest is enough
2234 ctx.manifest() # accessing the manifest is enough
2227
2235
2228 # accessing fnode cache warms the cache
2236 # accessing fnode cache warms the cache
2229 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2237 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2230 # accessing tags warm the cache
2238 # accessing tags warm the cache
2231 self.tags()
2239 self.tags()
2232 self.filtered('served').tags()
2240 self.filtered('served').tags()
2233
2241
2234 # The `full` arg is documented as updating even the lazily-loaded
2242 # The `full` arg is documented as updating even the lazily-loaded
2235 # caches immediately, so we're forcing a write to cause these caches
2243 # caches immediately, so we're forcing a write to cause these caches
2236 # to be warmed up even if they haven't explicitly been requested
2244 # to be warmed up even if they haven't explicitly been requested
2237 # yet (if they've never been used by hg, they won't ever have been
2245 # yet (if they've never been used by hg, they won't ever have been
2238 # written, even if they're a subset of another kind of cache that
2246 # written, even if they're a subset of another kind of cache that
2239 # *has* been used).
2247 # *has* been used).
2240 for filt in repoview.filtertable.keys():
2248 for filt in repoview.filtertable.keys():
2241 filtered = self.filtered(filt)
2249 filtered = self.filtered(filt)
2242 filtered.branchmap().write(filtered)
2250 filtered.branchmap().write(filtered)
2243
2251
2244 def invalidatecaches(self):
2252 def invalidatecaches(self):
2245
2253
2246 if r'_tagscache' in vars(self):
2254 if r'_tagscache' in vars(self):
2247 # can't use delattr on proxy
2255 # can't use delattr on proxy
2248 del self.__dict__[r'_tagscache']
2256 del self.__dict__[r'_tagscache']
2249
2257
2250 self._branchcaches.clear()
2258 self._branchcaches.clear()
2251 self.invalidatevolatilesets()
2259 self.invalidatevolatilesets()
2252 self._sparsesignaturecache.clear()
2260 self._sparsesignaturecache.clear()
2253
2261
2254 def invalidatevolatilesets(self):
2262 def invalidatevolatilesets(self):
2255 self.filteredrevcache.clear()
2263 self.filteredrevcache.clear()
2256 obsolete.clearobscaches(self)
2264 obsolete.clearobscaches(self)
2257
2265
2258 def invalidatedirstate(self):
2266 def invalidatedirstate(self):
2259 '''Invalidates the dirstate, causing the next call to dirstate
2267 '''Invalidates the dirstate, causing the next call to dirstate
2260 to check if it was modified since the last time it was read,
2268 to check if it was modified since the last time it was read,
2261 rereading it if it has.
2269 rereading it if it has.
2262
2270
2263 This is different to dirstate.invalidate() that it doesn't always
2271 This is different to dirstate.invalidate() that it doesn't always
2264 rereads the dirstate. Use dirstate.invalidate() if you want to
2272 rereads the dirstate. Use dirstate.invalidate() if you want to
2265 explicitly read the dirstate again (i.e. restoring it to a previous
2273 explicitly read the dirstate again (i.e. restoring it to a previous
2266 known good state).'''
2274 known good state).'''
2267 if hasunfilteredcache(self, r'dirstate'):
2275 if hasunfilteredcache(self, r'dirstate'):
2268 for k in self.dirstate._filecache:
2276 for k in self.dirstate._filecache:
2269 try:
2277 try:
2270 delattr(self.dirstate, k)
2278 delattr(self.dirstate, k)
2271 except AttributeError:
2279 except AttributeError:
2272 pass
2280 pass
2273 delattr(self.unfiltered(), r'dirstate')
2281 delattr(self.unfiltered(), r'dirstate')
2274
2282
2275 def invalidate(self, clearfilecache=False):
2283 def invalidate(self, clearfilecache=False):
2276 '''Invalidates both store and non-store parts other than dirstate
2284 '''Invalidates both store and non-store parts other than dirstate
2277
2285
2278 If a transaction is running, invalidation of store is omitted,
2286 If a transaction is running, invalidation of store is omitted,
2279 because discarding in-memory changes might cause inconsistency
2287 because discarding in-memory changes might cause inconsistency
2280 (e.g. incomplete fncache causes unintentional failure, but
2288 (e.g. incomplete fncache causes unintentional failure, but
2281 redundant one doesn't).
2289 redundant one doesn't).
2282 '''
2290 '''
2283 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2291 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2284 for k in list(self._filecache.keys()):
2292 for k in list(self._filecache.keys()):
2285 # dirstate is invalidated separately in invalidatedirstate()
2293 # dirstate is invalidated separately in invalidatedirstate()
2286 if k == 'dirstate':
2294 if k == 'dirstate':
2287 continue
2295 continue
2288 if (k == 'changelog' and
2296 if (k == 'changelog' and
2289 self.currenttransaction() and
2297 self.currenttransaction() and
2290 self.changelog._delayed):
2298 self.changelog._delayed):
2291 # The changelog object may store unwritten revisions. We don't
2299 # The changelog object may store unwritten revisions. We don't
2292 # want to lose them.
2300 # want to lose them.
2293 # TODO: Solve the problem instead of working around it.
2301 # TODO: Solve the problem instead of working around it.
2294 continue
2302 continue
2295
2303
2296 if clearfilecache:
2304 if clearfilecache:
2297 del self._filecache[k]
2305 del self._filecache[k]
2298 try:
2306 try:
2299 delattr(unfiltered, k)
2307 delattr(unfiltered, k)
2300 except AttributeError:
2308 except AttributeError:
2301 pass
2309 pass
2302 self.invalidatecaches()
2310 self.invalidatecaches()
2303 if not self.currenttransaction():
2311 if not self.currenttransaction():
2304 # TODO: Changing contents of store outside transaction
2312 # TODO: Changing contents of store outside transaction
2305 # causes inconsistency. We should make in-memory store
2313 # causes inconsistency. We should make in-memory store
2306 # changes detectable, and abort if changed.
2314 # changes detectable, and abort if changed.
2307 self.store.invalidatecaches()
2315 self.store.invalidatecaches()
2308
2316
2309 def invalidateall(self):
2317 def invalidateall(self):
2310 '''Fully invalidates both store and non-store parts, causing the
2318 '''Fully invalidates both store and non-store parts, causing the
2311 subsequent operation to reread any outside changes.'''
2319 subsequent operation to reread any outside changes.'''
2312 # extension should hook this to invalidate its caches
2320 # extension should hook this to invalidate its caches
2313 self.invalidate()
2321 self.invalidate()
2314 self.invalidatedirstate()
2322 self.invalidatedirstate()
2315
2323
2316 @unfilteredmethod
2324 @unfilteredmethod
2317 def _refreshfilecachestats(self, tr):
2325 def _refreshfilecachestats(self, tr):
2318 """Reload stats of cached files so that they are flagged as valid"""
2326 """Reload stats of cached files so that they are flagged as valid"""
2319 for k, ce in self._filecache.items():
2327 for k, ce in self._filecache.items():
2320 k = pycompat.sysstr(k)
2328 k = pycompat.sysstr(k)
2321 if k == r'dirstate' or k not in self.__dict__:
2329 if k == r'dirstate' or k not in self.__dict__:
2322 continue
2330 continue
2323 ce.refresh()
2331 ce.refresh()
2324
2332
2325 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2333 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2326 inheritchecker=None, parentenvvar=None):
2334 inheritchecker=None, parentenvvar=None):
2327 parentlock = None
2335 parentlock = None
2328 # the contents of parentenvvar are used by the underlying lock to
2336 # the contents of parentenvvar are used by the underlying lock to
2329 # determine whether it can be inherited
2337 # determine whether it can be inherited
2330 if parentenvvar is not None:
2338 if parentenvvar is not None:
2331 parentlock = encoding.environ.get(parentenvvar)
2339 parentlock = encoding.environ.get(parentenvvar)
2332
2340
2333 timeout = 0
2341 timeout = 0
2334 warntimeout = 0
2342 warntimeout = 0
2335 if wait:
2343 if wait:
2336 timeout = self.ui.configint("ui", "timeout")
2344 timeout = self.ui.configint("ui", "timeout")
2337 warntimeout = self.ui.configint("ui", "timeout.warn")
2345 warntimeout = self.ui.configint("ui", "timeout.warn")
2338 # internal config: ui.signal-safe-lock
2346 # internal config: ui.signal-safe-lock
2339 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2347 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2340
2348
2341 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2349 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2342 releasefn=releasefn,
2350 releasefn=releasefn,
2343 acquirefn=acquirefn, desc=desc,
2351 acquirefn=acquirefn, desc=desc,
2344 inheritchecker=inheritchecker,
2352 inheritchecker=inheritchecker,
2345 parentlock=parentlock,
2353 parentlock=parentlock,
2346 signalsafe=signalsafe)
2354 signalsafe=signalsafe)
2347 return l
2355 return l
2348
2356
2349 def _afterlock(self, callback):
2357 def _afterlock(self, callback):
2350 """add a callback to be run when the repository is fully unlocked
2358 """add a callback to be run when the repository is fully unlocked
2351
2359
2352 The callback will be executed when the outermost lock is released
2360 The callback will be executed when the outermost lock is released
2353 (with wlock being higher level than 'lock')."""
2361 (with wlock being higher level than 'lock')."""
2354 for ref in (self._wlockref, self._lockref):
2362 for ref in (self._wlockref, self._lockref):
2355 l = ref and ref()
2363 l = ref and ref()
2356 if l and l.held:
2364 if l and l.held:
2357 l.postrelease.append(callback)
2365 l.postrelease.append(callback)
2358 break
2366 break
2359 else: # no lock have been found.
2367 else: # no lock have been found.
2360 callback()
2368 callback()
2361
2369
2362 def lock(self, wait=True):
2370 def lock(self, wait=True):
2363 '''Lock the repository store (.hg/store) and return a weak reference
2371 '''Lock the repository store (.hg/store) and return a weak reference
2364 to the lock. Use this before modifying the store (e.g. committing or
2372 to the lock. Use this before modifying the store (e.g. committing or
2365 stripping). If you are opening a transaction, get a lock as well.)
2373 stripping). If you are opening a transaction, get a lock as well.)
2366
2374
2367 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2375 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2368 'wlock' first to avoid a dead-lock hazard.'''
2376 'wlock' first to avoid a dead-lock hazard.'''
2369 l = self._currentlock(self._lockref)
2377 l = self._currentlock(self._lockref)
2370 if l is not None:
2378 if l is not None:
2371 l.lock()
2379 l.lock()
2372 return l
2380 return l
2373
2381
2374 l = self._lock(vfs=self.svfs,
2382 l = self._lock(vfs=self.svfs,
2375 lockname="lock",
2383 lockname="lock",
2376 wait=wait,
2384 wait=wait,
2377 releasefn=None,
2385 releasefn=None,
2378 acquirefn=self.invalidate,
2386 acquirefn=self.invalidate,
2379 desc=_('repository %s') % self.origroot)
2387 desc=_('repository %s') % self.origroot)
2380 self._lockref = weakref.ref(l)
2388 self._lockref = weakref.ref(l)
2381 return l
2389 return l
2382
2390
2383 def _wlockchecktransaction(self):
2391 def _wlockchecktransaction(self):
2384 if self.currenttransaction() is not None:
2392 if self.currenttransaction() is not None:
2385 raise error.LockInheritanceContractViolation(
2393 raise error.LockInheritanceContractViolation(
2386 'wlock cannot be inherited in the middle of a transaction')
2394 'wlock cannot be inherited in the middle of a transaction')
2387
2395
2388 def wlock(self, wait=True):
2396 def wlock(self, wait=True):
2389 '''Lock the non-store parts of the repository (everything under
2397 '''Lock the non-store parts of the repository (everything under
2390 .hg except .hg/store) and return a weak reference to the lock.
2398 .hg except .hg/store) and return a weak reference to the lock.
2391
2399
2392 Use this before modifying files in .hg.
2400 Use this before modifying files in .hg.
2393
2401
2394 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2402 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2395 'wlock' first to avoid a dead-lock hazard.'''
2403 'wlock' first to avoid a dead-lock hazard.'''
2396 l = self._wlockref and self._wlockref()
2404 l = self._wlockref and self._wlockref()
2397 if l is not None and l.held:
2405 if l is not None and l.held:
2398 l.lock()
2406 l.lock()
2399 return l
2407 return l
2400
2408
2401 # We do not need to check for non-waiting lock acquisition. Such
2409 # We do not need to check for non-waiting lock acquisition. Such
2402 # acquisition would not cause dead-lock as they would just fail.
2410 # acquisition would not cause dead-lock as they would just fail.
2403 if wait and (self.ui.configbool('devel', 'all-warnings')
2411 if wait and (self.ui.configbool('devel', 'all-warnings')
2404 or self.ui.configbool('devel', 'check-locks')):
2412 or self.ui.configbool('devel', 'check-locks')):
2405 if self._currentlock(self._lockref) is not None:
2413 if self._currentlock(self._lockref) is not None:
2406 self.ui.develwarn('"wlock" acquired after "lock"')
2414 self.ui.develwarn('"wlock" acquired after "lock"')
2407
2415
2408 def unlock():
2416 def unlock():
2409 if self.dirstate.pendingparentchange():
2417 if self.dirstate.pendingparentchange():
2410 self.dirstate.invalidate()
2418 self.dirstate.invalidate()
2411 else:
2419 else:
2412 self.dirstate.write(None)
2420 self.dirstate.write(None)
2413
2421
2414 self._filecache['dirstate'].refresh()
2422 self._filecache['dirstate'].refresh()
2415
2423
2416 l = self._lock(self.vfs, "wlock", wait, unlock,
2424 l = self._lock(self.vfs, "wlock", wait, unlock,
2417 self.invalidatedirstate, _('working directory of %s') %
2425 self.invalidatedirstate, _('working directory of %s') %
2418 self.origroot,
2426 self.origroot,
2419 inheritchecker=self._wlockchecktransaction,
2427 inheritchecker=self._wlockchecktransaction,
2420 parentenvvar='HG_WLOCK_LOCKER')
2428 parentenvvar='HG_WLOCK_LOCKER')
2421 self._wlockref = weakref.ref(l)
2429 self._wlockref = weakref.ref(l)
2422 return l
2430 return l
2423
2431
2424 def _currentlock(self, lockref):
2432 def _currentlock(self, lockref):
2425 """Returns the lock if it's held, or None if it's not."""
2433 """Returns the lock if it's held, or None if it's not."""
2426 if lockref is None:
2434 if lockref is None:
2427 return None
2435 return None
2428 l = lockref()
2436 l = lockref()
2429 if l is None or not l.held:
2437 if l is None or not l.held:
2430 return None
2438 return None
2431 return l
2439 return l
2432
2440
2433 def currentwlock(self):
2441 def currentwlock(self):
2434 """Returns the wlock if it's held, or None if it's not."""
2442 """Returns the wlock if it's held, or None if it's not."""
2435 return self._currentlock(self._wlockref)
2443 return self._currentlock(self._wlockref)
2436
2444
2437 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2445 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2438 includecopymeta):
2446 includecopymeta):
2439 """
2447 """
2440 commit an individual file as part of a larger transaction
2448 commit an individual file as part of a larger transaction
2441 """
2449 """
2442
2450
2443 fname = fctx.path()
2451 fname = fctx.path()
2444 fparent1 = manifest1.get(fname, nullid)
2452 fparent1 = manifest1.get(fname, nullid)
2445 fparent2 = manifest2.get(fname, nullid)
2453 fparent2 = manifest2.get(fname, nullid)
2446 if isinstance(fctx, context.filectx):
2454 if isinstance(fctx, context.filectx):
2447 node = fctx.filenode()
2455 node = fctx.filenode()
2448 if node in [fparent1, fparent2]:
2456 if node in [fparent1, fparent2]:
2449 self.ui.debug('reusing %s filelog entry\n' % fname)
2457 self.ui.debug('reusing %s filelog entry\n' % fname)
2450 if ((fparent1 != nullid and
2458 if ((fparent1 != nullid and
2451 manifest1.flags(fname) != fctx.flags()) or
2459 manifest1.flags(fname) != fctx.flags()) or
2452 (fparent2 != nullid and
2460 (fparent2 != nullid and
2453 manifest2.flags(fname) != fctx.flags())):
2461 manifest2.flags(fname) != fctx.flags())):
2454 changelist.append(fname)
2462 changelist.append(fname)
2455 return node
2463 return node
2456
2464
2457 flog = self.file(fname)
2465 flog = self.file(fname)
2458 meta = {}
2466 meta = {}
2459 cfname = fctx.copysource()
2467 cfname = fctx.copysource()
2460 if cfname and cfname != fname:
2468 if cfname and cfname != fname:
2461 # Mark the new revision of this file as a copy of another
2469 # Mark the new revision of this file as a copy of another
2462 # file. This copy data will effectively act as a parent
2470 # file. This copy data will effectively act as a parent
2463 # of this new revision. If this is a merge, the first
2471 # of this new revision. If this is a merge, the first
2464 # parent will be the nullid (meaning "look up the copy data")
2472 # parent will be the nullid (meaning "look up the copy data")
2465 # and the second one will be the other parent. For example:
2473 # and the second one will be the other parent. For example:
2466 #
2474 #
2467 # 0 --- 1 --- 3 rev1 changes file foo
2475 # 0 --- 1 --- 3 rev1 changes file foo
2468 # \ / rev2 renames foo to bar and changes it
2476 # \ / rev2 renames foo to bar and changes it
2469 # \- 2 -/ rev3 should have bar with all changes and
2477 # \- 2 -/ rev3 should have bar with all changes and
2470 # should record that bar descends from
2478 # should record that bar descends from
2471 # bar in rev2 and foo in rev1
2479 # bar in rev2 and foo in rev1
2472 #
2480 #
2473 # this allows this merge to succeed:
2481 # this allows this merge to succeed:
2474 #
2482 #
2475 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2483 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2476 # \ / merging rev3 and rev4 should use bar@rev2
2484 # \ / merging rev3 and rev4 should use bar@rev2
2477 # \- 2 --- 4 as the merge base
2485 # \- 2 --- 4 as the merge base
2478 #
2486 #
2479
2487
2480 cnode = manifest1.get(cfname)
2488 cnode = manifest1.get(cfname)
2481 newfparent = fparent2
2489 newfparent = fparent2
2482
2490
2483 if manifest2: # branch merge
2491 if manifest2: # branch merge
2484 if fparent2 == nullid or cnode is None: # copied on remote side
2492 if fparent2 == nullid or cnode is None: # copied on remote side
2485 if cfname in manifest2:
2493 if cfname in manifest2:
2486 cnode = manifest2[cfname]
2494 cnode = manifest2[cfname]
2487 newfparent = fparent1
2495 newfparent = fparent1
2488
2496
2489 # Here, we used to search backwards through history to try to find
2497 # Here, we used to search backwards through history to try to find
2490 # where the file copy came from if the source of a copy was not in
2498 # where the file copy came from if the source of a copy was not in
2491 # the parent directory. However, this doesn't actually make sense to
2499 # the parent directory. However, this doesn't actually make sense to
2492 # do (what does a copy from something not in your working copy even
2500 # do (what does a copy from something not in your working copy even
2493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2501 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2494 # the user that copy information was dropped, so if they didn't
2502 # the user that copy information was dropped, so if they didn't
2495 # expect this outcome it can be fixed, but this is the correct
2503 # expect this outcome it can be fixed, but this is the correct
2496 # behavior in this circumstance.
2504 # behavior in this circumstance.
2497
2505
2498 if cnode:
2506 if cnode:
2499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2507 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2500 if includecopymeta:
2508 if includecopymeta:
2501 meta["copy"] = cfname
2509 meta["copy"] = cfname
2502 meta["copyrev"] = hex(cnode)
2510 meta["copyrev"] = hex(cnode)
2503 fparent1, fparent2 = nullid, newfparent
2511 fparent1, fparent2 = nullid, newfparent
2504 else:
2512 else:
2505 self.ui.warn(_("warning: can't find ancestor for '%s' "
2513 self.ui.warn(_("warning: can't find ancestor for '%s' "
2506 "copied from '%s'!\n") % (fname, cfname))
2514 "copied from '%s'!\n") % (fname, cfname))
2507
2515
2508 elif fparent1 == nullid:
2516 elif fparent1 == nullid:
2509 fparent1, fparent2 = fparent2, nullid
2517 fparent1, fparent2 = fparent2, nullid
2510 elif fparent2 != nullid:
2518 elif fparent2 != nullid:
2511 # is one parent an ancestor of the other?
2519 # is one parent an ancestor of the other?
2512 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2520 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2513 if fparent1 in fparentancestors:
2521 if fparent1 in fparentancestors:
2514 fparent1, fparent2 = fparent2, nullid
2522 fparent1, fparent2 = fparent2, nullid
2515 elif fparent2 in fparentancestors:
2523 elif fparent2 in fparentancestors:
2516 fparent2 = nullid
2524 fparent2 = nullid
2517
2525
2518 # is the file changed?
2526 # is the file changed?
2519 text = fctx.data()
2527 text = fctx.data()
2520 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2528 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2521 changelist.append(fname)
2529 changelist.append(fname)
2522 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2530 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2523 # are just the flags changed during merge?
2531 # are just the flags changed during merge?
2524 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2532 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2525 changelist.append(fname)
2533 changelist.append(fname)
2526
2534
2527 return fparent1
2535 return fparent1
2528
2536
2529 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2537 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2530 """check for commit arguments that aren't committable"""
2538 """check for commit arguments that aren't committable"""
2531 if match.isexact() or match.prefix():
2539 if match.isexact() or match.prefix():
2532 matched = set(status.modified + status.added + status.removed)
2540 matched = set(status.modified + status.added + status.removed)
2533
2541
2534 for f in match.files():
2542 for f in match.files():
2535 f = self.dirstate.normalize(f)
2543 f = self.dirstate.normalize(f)
2536 if f == '.' or f in matched or f in wctx.substate:
2544 if f == '.' or f in matched or f in wctx.substate:
2537 continue
2545 continue
2538 if f in status.deleted:
2546 if f in status.deleted:
2539 fail(f, _('file not found!'))
2547 fail(f, _('file not found!'))
2540 if f in vdirs: # visited directory
2548 if f in vdirs: # visited directory
2541 d = f + '/'
2549 d = f + '/'
2542 for mf in matched:
2550 for mf in matched:
2543 if mf.startswith(d):
2551 if mf.startswith(d):
2544 break
2552 break
2545 else:
2553 else:
2546 fail(f, _("no match under directory!"))
2554 fail(f, _("no match under directory!"))
2547 elif f not in self.dirstate:
2555 elif f not in self.dirstate:
2548 fail(f, _("file not tracked!"))
2556 fail(f, _("file not tracked!"))
2549
2557
2550 @unfilteredmethod
2558 @unfilteredmethod
2551 def commit(self, text="", user=None, date=None, match=None, force=False,
2559 def commit(self, text="", user=None, date=None, match=None, force=False,
2552 editor=False, extra=None):
2560 editor=False, extra=None):
2553 """Add a new revision to current repository.
2561 """Add a new revision to current repository.
2554
2562
2555 Revision information is gathered from the working directory,
2563 Revision information is gathered from the working directory,
2556 match can be used to filter the committed files. If editor is
2564 match can be used to filter the committed files. If editor is
2557 supplied, it is called to get a commit message.
2565 supplied, it is called to get a commit message.
2558 """
2566 """
2559 if extra is None:
2567 if extra is None:
2560 extra = {}
2568 extra = {}
2561
2569
2562 def fail(f, msg):
2570 def fail(f, msg):
2563 raise error.Abort('%s: %s' % (f, msg))
2571 raise error.Abort('%s: %s' % (f, msg))
2564
2572
2565 if not match:
2573 if not match:
2566 match = matchmod.always()
2574 match = matchmod.always()
2567
2575
2568 if not force:
2576 if not force:
2569 vdirs = []
2577 vdirs = []
2570 match.explicitdir = vdirs.append
2578 match.explicitdir = vdirs.append
2571 match.bad = fail
2579 match.bad = fail
2572
2580
2573 # lock() for recent changelog (see issue4368)
2581 # lock() for recent changelog (see issue4368)
2574 with self.wlock(), self.lock():
2582 with self.wlock(), self.lock():
2575 wctx = self[None]
2583 wctx = self[None]
2576 merge = len(wctx.parents()) > 1
2584 merge = len(wctx.parents()) > 1
2577
2585
2578 if not force and merge and not match.always():
2586 if not force and merge and not match.always():
2579 raise error.Abort(_('cannot partially commit a merge '
2587 raise error.Abort(_('cannot partially commit a merge '
2580 '(do not specify files or patterns)'))
2588 '(do not specify files or patterns)'))
2581
2589
2582 status = self.status(match=match, clean=force)
2590 status = self.status(match=match, clean=force)
2583 if force:
2591 if force:
2584 status.modified.extend(status.clean) # mq may commit clean files
2592 status.modified.extend(status.clean) # mq may commit clean files
2585
2593
2586 # check subrepos
2594 # check subrepos
2587 subs, commitsubs, newstate = subrepoutil.precommit(
2595 subs, commitsubs, newstate = subrepoutil.precommit(
2588 self.ui, wctx, status, match, force=force)
2596 self.ui, wctx, status, match, force=force)
2589
2597
2590 # make sure all explicit patterns are matched
2598 # make sure all explicit patterns are matched
2591 if not force:
2599 if not force:
2592 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2600 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2593
2601
2594 cctx = context.workingcommitctx(self, status,
2602 cctx = context.workingcommitctx(self, status,
2595 text, user, date, extra)
2603 text, user, date, extra)
2596
2604
2597 # internal config: ui.allowemptycommit
2605 # internal config: ui.allowemptycommit
2598 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2606 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2599 or extra.get('close') or merge or cctx.files()
2607 or extra.get('close') or merge or cctx.files()
2600 or self.ui.configbool('ui', 'allowemptycommit'))
2608 or self.ui.configbool('ui', 'allowemptycommit'))
2601 if not allowemptycommit:
2609 if not allowemptycommit:
2602 return None
2610 return None
2603
2611
2604 if merge and cctx.deleted():
2612 if merge and cctx.deleted():
2605 raise error.Abort(_("cannot commit merge with missing files"))
2613 raise error.Abort(_("cannot commit merge with missing files"))
2606
2614
2607 ms = mergemod.mergestate.read(self)
2615 ms = mergemod.mergestate.read(self)
2608 mergeutil.checkunresolved(ms)
2616 mergeutil.checkunresolved(ms)
2609
2617
2610 if editor:
2618 if editor:
2611 cctx._text = editor(self, cctx, subs)
2619 cctx._text = editor(self, cctx, subs)
2612 edited = (text != cctx._text)
2620 edited = (text != cctx._text)
2613
2621
2614 # Save commit message in case this transaction gets rolled back
2622 # Save commit message in case this transaction gets rolled back
2615 # (e.g. by a pretxncommit hook). Leave the content alone on
2623 # (e.g. by a pretxncommit hook). Leave the content alone on
2616 # the assumption that the user will use the same editor again.
2624 # the assumption that the user will use the same editor again.
2617 msgfn = self.savecommitmessage(cctx._text)
2625 msgfn = self.savecommitmessage(cctx._text)
2618
2626
2619 # commit subs and write new state
2627 # commit subs and write new state
2620 if subs:
2628 if subs:
2621 uipathfn = scmutil.getuipathfn(self)
2629 uipathfn = scmutil.getuipathfn(self)
2622 for s in sorted(commitsubs):
2630 for s in sorted(commitsubs):
2623 sub = wctx.sub(s)
2631 sub = wctx.sub(s)
2624 self.ui.status(_('committing subrepository %s\n') %
2632 self.ui.status(_('committing subrepository %s\n') %
2625 uipathfn(subrepoutil.subrelpath(sub)))
2633 uipathfn(subrepoutil.subrelpath(sub)))
2626 sr = sub.commit(cctx._text, user, date)
2634 sr = sub.commit(cctx._text, user, date)
2627 newstate[s] = (newstate[s][0], sr)
2635 newstate[s] = (newstate[s][0], sr)
2628 subrepoutil.writestate(self, newstate)
2636 subrepoutil.writestate(self, newstate)
2629
2637
2630 p1, p2 = self.dirstate.parents()
2638 p1, p2 = self.dirstate.parents()
2631 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2639 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2632 try:
2640 try:
2633 self.hook("precommit", throw=True, parent1=hookp1,
2641 self.hook("precommit", throw=True, parent1=hookp1,
2634 parent2=hookp2)
2642 parent2=hookp2)
2635 with self.transaction('commit'):
2643 with self.transaction('commit'):
2636 ret = self.commitctx(cctx, True)
2644 ret = self.commitctx(cctx, True)
2637 # update bookmarks, dirstate and mergestate
2645 # update bookmarks, dirstate and mergestate
2638 bookmarks.update(self, [p1, p2], ret)
2646 bookmarks.update(self, [p1, p2], ret)
2639 cctx.markcommitted(ret)
2647 cctx.markcommitted(ret)
2640 ms.reset()
2648 ms.reset()
2641 except: # re-raises
2649 except: # re-raises
2642 if edited:
2650 if edited:
2643 self.ui.write(
2651 self.ui.write(
2644 _('note: commit message saved in %s\n') % msgfn)
2652 _('note: commit message saved in %s\n') % msgfn)
2645 raise
2653 raise
2646
2654
2647 def commithook():
2655 def commithook():
2648 # hack for command that use a temporary commit (eg: histedit)
2656 # hack for command that use a temporary commit (eg: histedit)
2649 # temporary commit got stripped before hook release
2657 # temporary commit got stripped before hook release
2650 if self.changelog.hasnode(ret):
2658 if self.changelog.hasnode(ret):
2651 self.hook("commit", node=hex(ret), parent1=hookp1,
2659 self.hook("commit", node=hex(ret), parent1=hookp1,
2652 parent2=hookp2)
2660 parent2=hookp2)
2653 self._afterlock(commithook)
2661 self._afterlock(commithook)
2654 return ret
2662 return ret
2655
2663
2656 @unfilteredmethod
2664 @unfilteredmethod
2657 def commitctx(self, ctx, error=False, origctx=None):
2665 def commitctx(self, ctx, error=False, origctx=None):
2658 """Add a new revision to current repository.
2666 """Add a new revision to current repository.
2659 Revision information is passed via the context argument.
2667 Revision information is passed via the context argument.
2660
2668
2661 ctx.files() should list all files involved in this commit, i.e.
2669 ctx.files() should list all files involved in this commit, i.e.
2662 modified/added/removed files. On merge, it may be wider than the
2670 modified/added/removed files. On merge, it may be wider than the
2663 ctx.files() to be committed, since any file nodes derived directly
2671 ctx.files() to be committed, since any file nodes derived directly
2664 from p1 or p2 are excluded from the committed ctx.files().
2672 from p1 or p2 are excluded from the committed ctx.files().
2665
2673
2666 origctx is for convert to work around the problem that bug
2674 origctx is for convert to work around the problem that bug
2667 fixes to the files list in changesets change hashes. For
2675 fixes to the files list in changesets change hashes. For
2668 convert to be the identity, it can pass an origctx and this
2676 convert to be the identity, it can pass an origctx and this
2669 function will use the same files list when it makes sense to
2677 function will use the same files list when it makes sense to
2670 do so.
2678 do so.
2671 """
2679 """
2672
2680
2673 p1, p2 = ctx.p1(), ctx.p2()
2681 p1, p2 = ctx.p1(), ctx.p2()
2674 user = ctx.user()
2682 user = ctx.user()
2675
2683
2676 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2684 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2677 writefilecopymeta = writecopiesto != 'changeset-only'
2685 writefilecopymeta = writecopiesto != 'changeset-only'
2678 writechangesetcopy = (writecopiesto in
2686 writechangesetcopy = (writecopiesto in
2679 ('changeset-only', 'compatibility'))
2687 ('changeset-only', 'compatibility'))
2680 p1copies, p2copies = None, None
2688 p1copies, p2copies = None, None
2681 if writechangesetcopy:
2689 if writechangesetcopy:
2682 p1copies = ctx.p1copies()
2690 p1copies = ctx.p1copies()
2683 p2copies = ctx.p2copies()
2691 p2copies = ctx.p2copies()
2684 filesadded, filesremoved = None, None
2692 filesadded, filesremoved = None, None
2685 with self.lock(), self.transaction("commit") as tr:
2693 with self.lock(), self.transaction("commit") as tr:
2686 trp = weakref.proxy(tr)
2694 trp = weakref.proxy(tr)
2687
2695
2688 if ctx.manifestnode():
2696 if ctx.manifestnode():
2689 # reuse an existing manifest revision
2697 # reuse an existing manifest revision
2690 self.ui.debug('reusing known manifest\n')
2698 self.ui.debug('reusing known manifest\n')
2691 mn = ctx.manifestnode()
2699 mn = ctx.manifestnode()
2692 files = ctx.files()
2700 files = ctx.files()
2693 if writechangesetcopy:
2701 if writechangesetcopy:
2694 filesadded = ctx.filesadded()
2702 filesadded = ctx.filesadded()
2695 filesremoved = ctx.filesremoved()
2703 filesremoved = ctx.filesremoved()
2696 elif ctx.files():
2704 elif ctx.files():
2697 m1ctx = p1.manifestctx()
2705 m1ctx = p1.manifestctx()
2698 m2ctx = p2.manifestctx()
2706 m2ctx = p2.manifestctx()
2699 mctx = m1ctx.copy()
2707 mctx = m1ctx.copy()
2700
2708
2701 m = mctx.read()
2709 m = mctx.read()
2702 m1 = m1ctx.read()
2710 m1 = m1ctx.read()
2703 m2 = m2ctx.read()
2711 m2 = m2ctx.read()
2704
2712
2705 # check in files
2713 # check in files
2706 added = []
2714 added = []
2707 changed = []
2715 changed = []
2708 removed = list(ctx.removed())
2716 removed = list(ctx.removed())
2709 linkrev = len(self)
2717 linkrev = len(self)
2710 self.ui.note(_("committing files:\n"))
2718 self.ui.note(_("committing files:\n"))
2711 uipathfn = scmutil.getuipathfn(self)
2719 uipathfn = scmutil.getuipathfn(self)
2712 for f in sorted(ctx.modified() + ctx.added()):
2720 for f in sorted(ctx.modified() + ctx.added()):
2713 self.ui.note(uipathfn(f) + "\n")
2721 self.ui.note(uipathfn(f) + "\n")
2714 try:
2722 try:
2715 fctx = ctx[f]
2723 fctx = ctx[f]
2716 if fctx is None:
2724 if fctx is None:
2717 removed.append(f)
2725 removed.append(f)
2718 else:
2726 else:
2719 added.append(f)
2727 added.append(f)
2720 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2728 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2721 trp, changed,
2729 trp, changed,
2722 writefilecopymeta)
2730 writefilecopymeta)
2723 m.setflag(f, fctx.flags())
2731 m.setflag(f, fctx.flags())
2724 except OSError:
2732 except OSError:
2725 self.ui.warn(_("trouble committing %s!\n") %
2733 self.ui.warn(_("trouble committing %s!\n") %
2726 uipathfn(f))
2734 uipathfn(f))
2727 raise
2735 raise
2728 except IOError as inst:
2736 except IOError as inst:
2729 errcode = getattr(inst, 'errno', errno.ENOENT)
2737 errcode = getattr(inst, 'errno', errno.ENOENT)
2730 if error or errcode and errcode != errno.ENOENT:
2738 if error or errcode and errcode != errno.ENOENT:
2731 self.ui.warn(_("trouble committing %s!\n") %
2739 self.ui.warn(_("trouble committing %s!\n") %
2732 uipathfn(f))
2740 uipathfn(f))
2733 raise
2741 raise
2734
2742
2735 # update manifest
2743 # update manifest
2736 removed = [f for f in removed if f in m1 or f in m2]
2744 removed = [f for f in removed if f in m1 or f in m2]
2737 drop = sorted([f for f in removed if f in m])
2745 drop = sorted([f for f in removed if f in m])
2738 for f in drop:
2746 for f in drop:
2739 del m[f]
2747 del m[f]
2740 if p2.rev() != nullrev:
2748 if p2.rev() != nullrev:
2741 @util.cachefunc
2749 @util.cachefunc
2742 def mas():
2750 def mas():
2743 p1n = p1.node()
2751 p1n = p1.node()
2744 p2n = p2.node()
2752 p2n = p2.node()
2745 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2753 cahs = self.changelog.commonancestorsheads(p1n, p2n)
2746 if not cahs:
2754 if not cahs:
2747 cahs = [nullrev]
2755 cahs = [nullrev]
2748 return [self[r].manifest() for r in cahs]
2756 return [self[r].manifest() for r in cahs]
2749 def deletionfromparent(f):
2757 def deletionfromparent(f):
2750 # When a file is removed relative to p1 in a merge, this
2758 # When a file is removed relative to p1 in a merge, this
2751 # function determines whether the absence is due to a
2759 # function determines whether the absence is due to a
2752 # deletion from a parent, or whether the merge commit
2760 # deletion from a parent, or whether the merge commit
2753 # itself deletes the file. We decide this by doing a
2761 # itself deletes the file. We decide this by doing a
2754 # simplified three way merge of the manifest entry for
2762 # simplified three way merge of the manifest entry for
2755 # the file. There are two ways we decide the merge
2763 # the file. There are two ways we decide the merge
2756 # itself didn't delete a file:
2764 # itself didn't delete a file:
2757 # - neither parent (nor the merge) contain the file
2765 # - neither parent (nor the merge) contain the file
2758 # - exactly one parent contains the file, and that
2766 # - exactly one parent contains the file, and that
2759 # parent has the same filelog entry as the merge
2767 # parent has the same filelog entry as the merge
2760 # ancestor (or all of them if there two). In other
2768 # ancestor (or all of them if there two). In other
2761 # words, that parent left the file unchanged while the
2769 # words, that parent left the file unchanged while the
2762 # other one deleted it.
2770 # other one deleted it.
2763 # One way to think about this is that deleting a file is
2771 # One way to think about this is that deleting a file is
2764 # similar to emptying it, so the list of changed files
2772 # similar to emptying it, so the list of changed files
2765 # should be similar either way. The computation
2773 # should be similar either way. The computation
2766 # described above is not done directly in _filecommit
2774 # described above is not done directly in _filecommit
2767 # when creating the list of changed files, however
2775 # when creating the list of changed files, however
2768 # it does something very similar by comparing filelog
2776 # it does something very similar by comparing filelog
2769 # nodes.
2777 # nodes.
2770 if f in m1:
2778 if f in m1:
2771 return (f not in m2
2779 return (f not in m2
2772 and all(f in ma and ma.find(f) == m1.find(f)
2780 and all(f in ma and ma.find(f) == m1.find(f)
2773 for ma in mas()))
2781 for ma in mas()))
2774 elif f in m2:
2782 elif f in m2:
2775 return all(f in ma and ma.find(f) == m2.find(f)
2783 return all(f in ma and ma.find(f) == m2.find(f)
2776 for ma in mas())
2784 for ma in mas())
2777 else:
2785 else:
2778 return True
2786 return True
2779 removed = [f for f in removed if not deletionfromparent(f)]
2787 removed = [f for f in removed if not deletionfromparent(f)]
2780
2788
2781 files = changed + removed
2789 files = changed + removed
2782 md = None
2790 md = None
2783 if not files:
2791 if not files:
2784 # if no "files" actually changed in terms of the changelog,
2792 # if no "files" actually changed in terms of the changelog,
2785 # try hard to detect unmodified manifest entry so that the
2793 # try hard to detect unmodified manifest entry so that the
2786 # exact same commit can be reproduced later on convert.
2794 # exact same commit can be reproduced later on convert.
2787 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2795 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2788 if not files and md:
2796 if not files and md:
2789 self.ui.debug('not reusing manifest (no file change in '
2797 self.ui.debug('not reusing manifest (no file change in '
2790 'changelog, but manifest differs)\n')
2798 'changelog, but manifest differs)\n')
2791 if files or md:
2799 if files or md:
2792 self.ui.note(_("committing manifest\n"))
2800 self.ui.note(_("committing manifest\n"))
2793 # we're using narrowmatch here since it's already applied at
2801 # we're using narrowmatch here since it's already applied at
2794 # other stages (such as dirstate.walk), so we're already
2802 # other stages (such as dirstate.walk), so we're already
2795 # ignoring things outside of narrowspec in most cases. The
2803 # ignoring things outside of narrowspec in most cases. The
2796 # one case where we might have files outside the narrowspec
2804 # one case where we might have files outside the narrowspec
2797 # at this point is merges, and we already error out in the
2805 # at this point is merges, and we already error out in the
2798 # case where the merge has files outside of the narrowspec,
2806 # case where the merge has files outside of the narrowspec,
2799 # so this is safe.
2807 # so this is safe.
2800 mn = mctx.write(trp, linkrev,
2808 mn = mctx.write(trp, linkrev,
2801 p1.manifestnode(), p2.manifestnode(),
2809 p1.manifestnode(), p2.manifestnode(),
2802 added, drop, match=self.narrowmatch())
2810 added, drop, match=self.narrowmatch())
2803
2811
2804 if writechangesetcopy:
2812 if writechangesetcopy:
2805 filesadded = [f for f in changed
2813 filesadded = [f for f in changed
2806 if not (f in m1 or f in m2)]
2814 if not (f in m1 or f in m2)]
2807 filesremoved = removed
2815 filesremoved = removed
2808 else:
2816 else:
2809 self.ui.debug('reusing manifest from p1 (listed files '
2817 self.ui.debug('reusing manifest from p1 (listed files '
2810 'actually unchanged)\n')
2818 'actually unchanged)\n')
2811 mn = p1.manifestnode()
2819 mn = p1.manifestnode()
2812 else:
2820 else:
2813 self.ui.debug('reusing manifest from p1 (no file change)\n')
2821 self.ui.debug('reusing manifest from p1 (no file change)\n')
2814 mn = p1.manifestnode()
2822 mn = p1.manifestnode()
2815 files = []
2823 files = []
2816
2824
2817 if writecopiesto == 'changeset-only':
2825 if writecopiesto == 'changeset-only':
2818 # If writing only to changeset extras, use None to indicate that
2826 # If writing only to changeset extras, use None to indicate that
2819 # no entry should be written. If writing to both, write an empty
2827 # no entry should be written. If writing to both, write an empty
2820 # entry to prevent the reader from falling back to reading
2828 # entry to prevent the reader from falling back to reading
2821 # filelogs.
2829 # filelogs.
2822 p1copies = p1copies or None
2830 p1copies = p1copies or None
2823 p2copies = p2copies or None
2831 p2copies = p2copies or None
2824 filesadded = filesadded or None
2832 filesadded = filesadded or None
2825 filesremoved = filesremoved or None
2833 filesremoved = filesremoved or None
2826
2834
2827 if origctx and origctx.manifestnode() == mn:
2835 if origctx and origctx.manifestnode() == mn:
2828 files = origctx.files()
2836 files = origctx.files()
2829
2837
2830 # update changelog
2838 # update changelog
2831 self.ui.note(_("committing changelog\n"))
2839 self.ui.note(_("committing changelog\n"))
2832 self.changelog.delayupdate(tr)
2840 self.changelog.delayupdate(tr)
2833 n = self.changelog.add(mn, files, ctx.description(),
2841 n = self.changelog.add(mn, files, ctx.description(),
2834 trp, p1.node(), p2.node(),
2842 trp, p1.node(), p2.node(),
2835 user, ctx.date(), ctx.extra().copy(),
2843 user, ctx.date(), ctx.extra().copy(),
2836 p1copies, p2copies, filesadded, filesremoved)
2844 p1copies, p2copies, filesadded, filesremoved)
2837 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2845 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2838 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2846 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2839 parent2=xp2)
2847 parent2=xp2)
2840 # set the new commit is proper phase
2848 # set the new commit is proper phase
2841 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2849 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2842 if targetphase:
2850 if targetphase:
2843 # retract boundary do not alter parent changeset.
2851 # retract boundary do not alter parent changeset.
2844 # if a parent have higher the resulting phase will
2852 # if a parent have higher the resulting phase will
2845 # be compliant anyway
2853 # be compliant anyway
2846 #
2854 #
2847 # if minimal phase was 0 we don't need to retract anything
2855 # if minimal phase was 0 we don't need to retract anything
2848 phases.registernew(self, tr, targetphase, [n])
2856 phases.registernew(self, tr, targetphase, [n])
2849 return n
2857 return n
2850
2858
2851 @unfilteredmethod
2859 @unfilteredmethod
2852 def destroying(self):
2860 def destroying(self):
2853 '''Inform the repository that nodes are about to be destroyed.
2861 '''Inform the repository that nodes are about to be destroyed.
2854 Intended for use by strip and rollback, so there's a common
2862 Intended for use by strip and rollback, so there's a common
2855 place for anything that has to be done before destroying history.
2863 place for anything that has to be done before destroying history.
2856
2864
2857 This is mostly useful for saving state that is in memory and waiting
2865 This is mostly useful for saving state that is in memory and waiting
2858 to be flushed when the current lock is released. Because a call to
2866 to be flushed when the current lock is released. Because a call to
2859 destroyed is imminent, the repo will be invalidated causing those
2867 destroyed is imminent, the repo will be invalidated causing those
2860 changes to stay in memory (waiting for the next unlock), or vanish
2868 changes to stay in memory (waiting for the next unlock), or vanish
2861 completely.
2869 completely.
2862 '''
2870 '''
2863 # When using the same lock to commit and strip, the phasecache is left
2871 # When using the same lock to commit and strip, the phasecache is left
2864 # dirty after committing. Then when we strip, the repo is invalidated,
2872 # dirty after committing. Then when we strip, the repo is invalidated,
2865 # causing those changes to disappear.
2873 # causing those changes to disappear.
2866 if '_phasecache' in vars(self):
2874 if '_phasecache' in vars(self):
2867 self._phasecache.write()
2875 self._phasecache.write()
2868
2876
2869 @unfilteredmethod
2877 @unfilteredmethod
2870 def destroyed(self):
2878 def destroyed(self):
2871 '''Inform the repository that nodes have been destroyed.
2879 '''Inform the repository that nodes have been destroyed.
2872 Intended for use by strip and rollback, so there's a common
2880 Intended for use by strip and rollback, so there's a common
2873 place for anything that has to be done after destroying history.
2881 place for anything that has to be done after destroying history.
2874 '''
2882 '''
2875 # When one tries to:
2883 # When one tries to:
2876 # 1) destroy nodes thus calling this method (e.g. strip)
2884 # 1) destroy nodes thus calling this method (e.g. strip)
2877 # 2) use phasecache somewhere (e.g. commit)
2885 # 2) use phasecache somewhere (e.g. commit)
2878 #
2886 #
2879 # then 2) will fail because the phasecache contains nodes that were
2887 # then 2) will fail because the phasecache contains nodes that were
2880 # removed. We can either remove phasecache from the filecache,
2888 # removed. We can either remove phasecache from the filecache,
2881 # causing it to reload next time it is accessed, or simply filter
2889 # causing it to reload next time it is accessed, or simply filter
2882 # the removed nodes now and write the updated cache.
2890 # the removed nodes now and write the updated cache.
2883 self._phasecache.filterunknown(self)
2891 self._phasecache.filterunknown(self)
2884 self._phasecache.write()
2892 self._phasecache.write()
2885
2893
2886 # refresh all repository caches
2894 # refresh all repository caches
2887 self.updatecaches()
2895 self.updatecaches()
2888
2896
2889 # Ensure the persistent tag cache is updated. Doing it now
2897 # Ensure the persistent tag cache is updated. Doing it now
2890 # means that the tag cache only has to worry about destroyed
2898 # means that the tag cache only has to worry about destroyed
2891 # heads immediately after a strip/rollback. That in turn
2899 # heads immediately after a strip/rollback. That in turn
2892 # guarantees that "cachetip == currenttip" (comparing both rev
2900 # guarantees that "cachetip == currenttip" (comparing both rev
2893 # and node) always means no nodes have been added or destroyed.
2901 # and node) always means no nodes have been added or destroyed.
2894
2902
2895 # XXX this is suboptimal when qrefresh'ing: we strip the current
2903 # XXX this is suboptimal when qrefresh'ing: we strip the current
2896 # head, refresh the tag cache, then immediately add a new head.
2904 # head, refresh the tag cache, then immediately add a new head.
2897 # But I think doing it this way is necessary for the "instant
2905 # But I think doing it this way is necessary for the "instant
2898 # tag cache retrieval" case to work.
2906 # tag cache retrieval" case to work.
2899 self.invalidate()
2907 self.invalidate()
2900
2908
2901 def status(self, node1='.', node2=None, match=None,
2909 def status(self, node1='.', node2=None, match=None,
2902 ignored=False, clean=False, unknown=False,
2910 ignored=False, clean=False, unknown=False,
2903 listsubrepos=False):
2911 listsubrepos=False):
2904 '''a convenience method that calls node1.status(node2)'''
2912 '''a convenience method that calls node1.status(node2)'''
2905 return self[node1].status(node2, match, ignored, clean, unknown,
2913 return self[node1].status(node2, match, ignored, clean, unknown,
2906 listsubrepos)
2914 listsubrepos)
2907
2915
2908 def addpostdsstatus(self, ps):
2916 def addpostdsstatus(self, ps):
2909 """Add a callback to run within the wlock, at the point at which status
2917 """Add a callback to run within the wlock, at the point at which status
2910 fixups happen.
2918 fixups happen.
2911
2919
2912 On status completion, callback(wctx, status) will be called with the
2920 On status completion, callback(wctx, status) will be called with the
2913 wlock held, unless the dirstate has changed from underneath or the wlock
2921 wlock held, unless the dirstate has changed from underneath or the wlock
2914 couldn't be grabbed.
2922 couldn't be grabbed.
2915
2923
2916 Callbacks should not capture and use a cached copy of the dirstate --
2924 Callbacks should not capture and use a cached copy of the dirstate --
2917 it might change in the meanwhile. Instead, they should access the
2925 it might change in the meanwhile. Instead, they should access the
2918 dirstate via wctx.repo().dirstate.
2926 dirstate via wctx.repo().dirstate.
2919
2927
2920 This list is emptied out after each status run -- extensions should
2928 This list is emptied out after each status run -- extensions should
2921 make sure it adds to this list each time dirstate.status is called.
2929 make sure it adds to this list each time dirstate.status is called.
2922 Extensions should also make sure they don't call this for statuses
2930 Extensions should also make sure they don't call this for statuses
2923 that don't involve the dirstate.
2931 that don't involve the dirstate.
2924 """
2932 """
2925
2933
2926 # The list is located here for uniqueness reasons -- it is actually
2934 # The list is located here for uniqueness reasons -- it is actually
2927 # managed by the workingctx, but that isn't unique per-repo.
2935 # managed by the workingctx, but that isn't unique per-repo.
2928 self._postdsstatus.append(ps)
2936 self._postdsstatus.append(ps)
2929
2937
2930 def postdsstatus(self):
2938 def postdsstatus(self):
2931 """Used by workingctx to get the list of post-dirstate-status hooks."""
2939 """Used by workingctx to get the list of post-dirstate-status hooks."""
2932 return self._postdsstatus
2940 return self._postdsstatus
2933
2941
2934 def clearpostdsstatus(self):
2942 def clearpostdsstatus(self):
2935 """Used by workingctx to clear post-dirstate-status hooks."""
2943 """Used by workingctx to clear post-dirstate-status hooks."""
2936 del self._postdsstatus[:]
2944 del self._postdsstatus[:]
2937
2945
2938 def heads(self, start=None):
2946 def heads(self, start=None):
2939 if start is None:
2947 if start is None:
2940 cl = self.changelog
2948 cl = self.changelog
2941 headrevs = reversed(cl.headrevs())
2949 headrevs = reversed(cl.headrevs())
2942 return [cl.node(rev) for rev in headrevs]
2950 return [cl.node(rev) for rev in headrevs]
2943
2951
2944 heads = self.changelog.heads(start)
2952 heads = self.changelog.heads(start)
2945 # sort the output in rev descending order
2953 # sort the output in rev descending order
2946 return sorted(heads, key=self.changelog.rev, reverse=True)
2954 return sorted(heads, key=self.changelog.rev, reverse=True)
2947
2955
2948 def branchheads(self, branch=None, start=None, closed=False):
2956 def branchheads(self, branch=None, start=None, closed=False):
2949 '''return a (possibly filtered) list of heads for the given branch
2957 '''return a (possibly filtered) list of heads for the given branch
2950
2958
2951 Heads are returned in topological order, from newest to oldest.
2959 Heads are returned in topological order, from newest to oldest.
2952 If branch is None, use the dirstate branch.
2960 If branch is None, use the dirstate branch.
2953 If start is not None, return only heads reachable from start.
2961 If start is not None, return only heads reachable from start.
2954 If closed is True, return heads that are marked as closed as well.
2962 If closed is True, return heads that are marked as closed as well.
2955 '''
2963 '''
2956 if branch is None:
2964 if branch is None:
2957 branch = self[None].branch()
2965 branch = self[None].branch()
2958 branches = self.branchmap()
2966 branches = self.branchmap()
2959 if not branches.hasbranch(branch):
2967 if not branches.hasbranch(branch):
2960 return []
2968 return []
2961 # the cache returns heads ordered lowest to highest
2969 # the cache returns heads ordered lowest to highest
2962 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2970 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2963 if start is not None:
2971 if start is not None:
2964 # filter out the heads that cannot be reached from startrev
2972 # filter out the heads that cannot be reached from startrev
2965 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2973 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2966 bheads = [h for h in bheads if h in fbheads]
2974 bheads = [h for h in bheads if h in fbheads]
2967 return bheads
2975 return bheads
2968
2976
2969 def branches(self, nodes):
2977 def branches(self, nodes):
2970 if not nodes:
2978 if not nodes:
2971 nodes = [self.changelog.tip()]
2979 nodes = [self.changelog.tip()]
2972 b = []
2980 b = []
2973 for n in nodes:
2981 for n in nodes:
2974 t = n
2982 t = n
2975 while True:
2983 while True:
2976 p = self.changelog.parents(n)
2984 p = self.changelog.parents(n)
2977 if p[1] != nullid or p[0] == nullid:
2985 if p[1] != nullid or p[0] == nullid:
2978 b.append((t, n, p[0], p[1]))
2986 b.append((t, n, p[0], p[1]))
2979 break
2987 break
2980 n = p[0]
2988 n = p[0]
2981 return b
2989 return b
2982
2990
2983 def between(self, pairs):
2991 def between(self, pairs):
2984 r = []
2992 r = []
2985
2993
2986 for top, bottom in pairs:
2994 for top, bottom in pairs:
2987 n, l, i = top, [], 0
2995 n, l, i = top, [], 0
2988 f = 1
2996 f = 1
2989
2997
2990 while n != bottom and n != nullid:
2998 while n != bottom and n != nullid:
2991 p = self.changelog.parents(n)[0]
2999 p = self.changelog.parents(n)[0]
2992 if i == f:
3000 if i == f:
2993 l.append(n)
3001 l.append(n)
2994 f = f * 2
3002 f = f * 2
2995 n = p
3003 n = p
2996 i += 1
3004 i += 1
2997
3005
2998 r.append(l)
3006 r.append(l)
2999
3007
3000 return r
3008 return r
3001
3009
3002 def checkpush(self, pushop):
3010 def checkpush(self, pushop):
3003 """Extensions can override this function if additional checks have
3011 """Extensions can override this function if additional checks have
3004 to be performed before pushing, or call it if they override push
3012 to be performed before pushing, or call it if they override push
3005 command.
3013 command.
3006 """
3014 """
3007
3015
3008 @unfilteredpropertycache
3016 @unfilteredpropertycache
3009 def prepushoutgoinghooks(self):
3017 def prepushoutgoinghooks(self):
3010 """Return util.hooks consists of a pushop with repo, remote, outgoing
3018 """Return util.hooks consists of a pushop with repo, remote, outgoing
3011 methods, which are called before pushing changesets.
3019 methods, which are called before pushing changesets.
3012 """
3020 """
3013 return util.hooks()
3021 return util.hooks()
3014
3022
3015 def pushkey(self, namespace, key, old, new):
3023 def pushkey(self, namespace, key, old, new):
3016 try:
3024 try:
3017 tr = self.currenttransaction()
3025 tr = self.currenttransaction()
3018 hookargs = {}
3026 hookargs = {}
3019 if tr is not None:
3027 if tr is not None:
3020 hookargs.update(tr.hookargs)
3028 hookargs.update(tr.hookargs)
3021 hookargs = pycompat.strkwargs(hookargs)
3029 hookargs = pycompat.strkwargs(hookargs)
3022 hookargs[r'namespace'] = namespace
3030 hookargs[r'namespace'] = namespace
3023 hookargs[r'key'] = key
3031 hookargs[r'key'] = key
3024 hookargs[r'old'] = old
3032 hookargs[r'old'] = old
3025 hookargs[r'new'] = new
3033 hookargs[r'new'] = new
3026 self.hook('prepushkey', throw=True, **hookargs)
3034 self.hook('prepushkey', throw=True, **hookargs)
3027 except error.HookAbort as exc:
3035 except error.HookAbort as exc:
3028 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
3036 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
3029 if exc.hint:
3037 if exc.hint:
3030 self.ui.write_err(_("(%s)\n") % exc.hint)
3038 self.ui.write_err(_("(%s)\n") % exc.hint)
3031 return False
3039 return False
3032 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3040 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
3033 ret = pushkey.push(self, namespace, key, old, new)
3041 ret = pushkey.push(self, namespace, key, old, new)
3034 def runhook():
3042 def runhook():
3035 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
3043 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
3036 ret=ret)
3044 ret=ret)
3037 self._afterlock(runhook)
3045 self._afterlock(runhook)
3038 return ret
3046 return ret
3039
3047
3040 def listkeys(self, namespace):
3048 def listkeys(self, namespace):
3041 self.hook('prelistkeys', throw=True, namespace=namespace)
3049 self.hook('prelistkeys', throw=True, namespace=namespace)
3042 self.ui.debug('listing keys for "%s"\n' % namespace)
3050 self.ui.debug('listing keys for "%s"\n' % namespace)
3043 values = pushkey.list(self, namespace)
3051 values = pushkey.list(self, namespace)
3044 self.hook('listkeys', namespace=namespace, values=values)
3052 self.hook('listkeys', namespace=namespace, values=values)
3045 return values
3053 return values
3046
3054
3047 def debugwireargs(self, one, two, three=None, four=None, five=None):
3055 def debugwireargs(self, one, two, three=None, four=None, five=None):
3048 '''used to test argument passing over the wire'''
3056 '''used to test argument passing over the wire'''
3049 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
3057 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
3050 pycompat.bytestr(four),
3058 pycompat.bytestr(four),
3051 pycompat.bytestr(five))
3059 pycompat.bytestr(five))
3052
3060
3053 def savecommitmessage(self, text):
3061 def savecommitmessage(self, text):
3054 fp = self.vfs('last-message.txt', 'wb')
3062 fp = self.vfs('last-message.txt', 'wb')
3055 try:
3063 try:
3056 fp.write(text)
3064 fp.write(text)
3057 finally:
3065 finally:
3058 fp.close()
3066 fp.close()
3059 return self.pathto(fp.name[len(self.root) + 1:])
3067 return self.pathto(fp.name[len(self.root) + 1:])
3060
3068
3061 # used to avoid circular references so destructors work
3069 # used to avoid circular references so destructors work
3062 def aftertrans(files):
3070 def aftertrans(files):
3063 renamefiles = [tuple(t) for t in files]
3071 renamefiles = [tuple(t) for t in files]
3064 def a():
3072 def a():
3065 for vfs, src, dest in renamefiles:
3073 for vfs, src, dest in renamefiles:
3066 # if src and dest refer to a same file, vfs.rename is a no-op,
3074 # if src and dest refer to a same file, vfs.rename is a no-op,
3067 # leaving both src and dest on disk. delete dest to make sure
3075 # leaving both src and dest on disk. delete dest to make sure
3068 # the rename couldn't be such a no-op.
3076 # the rename couldn't be such a no-op.
3069 vfs.tryunlink(dest)
3077 vfs.tryunlink(dest)
3070 try:
3078 try:
3071 vfs.rename(src, dest)
3079 vfs.rename(src, dest)
3072 except OSError: # journal file does not yet exist
3080 except OSError: # journal file does not yet exist
3073 pass
3081 pass
3074 return a
3082 return a
3075
3083
3076 def undoname(fn):
3084 def undoname(fn):
3077 base, name = os.path.split(fn)
3085 base, name = os.path.split(fn)
3078 assert name.startswith('journal')
3086 assert name.startswith('journal')
3079 return os.path.join(base, name.replace('journal', 'undo', 1))
3087 return os.path.join(base, name.replace('journal', 'undo', 1))
3080
3088
3081 def instance(ui, path, create, intents=None, createopts=None):
3089 def instance(ui, path, create, intents=None, createopts=None):
3082 localpath = util.urllocalpath(path)
3090 localpath = util.urllocalpath(path)
3083 if create:
3091 if create:
3084 createrepository(ui, localpath, createopts=createopts)
3092 createrepository(ui, localpath, createopts=createopts)
3085
3093
3086 return makelocalrepository(ui, localpath, intents=intents)
3094 return makelocalrepository(ui, localpath, intents=intents)
3087
3095
3088 def islocal(path):
3096 def islocal(path):
3089 return True
3097 return True
3090
3098
3091 def defaultcreateopts(ui, createopts=None):
3099 def defaultcreateopts(ui, createopts=None):
3092 """Populate the default creation options for a repository.
3100 """Populate the default creation options for a repository.
3093
3101
3094 A dictionary of explicitly requested creation options can be passed
3102 A dictionary of explicitly requested creation options can be passed
3095 in. Missing keys will be populated.
3103 in. Missing keys will be populated.
3096 """
3104 """
3097 createopts = dict(createopts or {})
3105 createopts = dict(createopts or {})
3098
3106
3099 if 'backend' not in createopts:
3107 if 'backend' not in createopts:
3100 # experimental config: storage.new-repo-backend
3108 # experimental config: storage.new-repo-backend
3101 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3109 createopts['backend'] = ui.config('storage', 'new-repo-backend')
3102
3110
3103 return createopts
3111 return createopts
3104
3112
3105 def newreporequirements(ui, createopts):
3113 def newreporequirements(ui, createopts):
3106 """Determine the set of requirements for a new local repository.
3114 """Determine the set of requirements for a new local repository.
3107
3115
3108 Extensions can wrap this function to specify custom requirements for
3116 Extensions can wrap this function to specify custom requirements for
3109 new repositories.
3117 new repositories.
3110 """
3118 """
3111 # If the repo is being created from a shared repository, we copy
3119 # If the repo is being created from a shared repository, we copy
3112 # its requirements.
3120 # its requirements.
3113 if 'sharedrepo' in createopts:
3121 if 'sharedrepo' in createopts:
3114 requirements = set(createopts['sharedrepo'].requirements)
3122 requirements = set(createopts['sharedrepo'].requirements)
3115 if createopts.get('sharedrelative'):
3123 if createopts.get('sharedrelative'):
3116 requirements.add('relshared')
3124 requirements.add('relshared')
3117 else:
3125 else:
3118 requirements.add('shared')
3126 requirements.add('shared')
3119
3127
3120 return requirements
3128 return requirements
3121
3129
3122 if 'backend' not in createopts:
3130 if 'backend' not in createopts:
3123 raise error.ProgrammingError('backend key not present in createopts; '
3131 raise error.ProgrammingError('backend key not present in createopts; '
3124 'was defaultcreateopts() called?')
3132 'was defaultcreateopts() called?')
3125
3133
3126 if createopts['backend'] != 'revlogv1':
3134 if createopts['backend'] != 'revlogv1':
3127 raise error.Abort(_('unable to determine repository requirements for '
3135 raise error.Abort(_('unable to determine repository requirements for '
3128 'storage backend: %s') % createopts['backend'])
3136 'storage backend: %s') % createopts['backend'])
3129
3137
3130 requirements = {'revlogv1'}
3138 requirements = {'revlogv1'}
3131 if ui.configbool('format', 'usestore'):
3139 if ui.configbool('format', 'usestore'):
3132 requirements.add('store')
3140 requirements.add('store')
3133 if ui.configbool('format', 'usefncache'):
3141 if ui.configbool('format', 'usefncache'):
3134 requirements.add('fncache')
3142 requirements.add('fncache')
3135 if ui.configbool('format', 'dotencode'):
3143 if ui.configbool('format', 'dotencode'):
3136 requirements.add('dotencode')
3144 requirements.add('dotencode')
3137
3145
3138 compengine = ui.config('format', 'revlog-compression')
3146 compengine = ui.config('format', 'revlog-compression')
3139 if compengine not in util.compengines:
3147 if compengine not in util.compengines:
3140 raise error.Abort(_('compression engine %s defined by '
3148 raise error.Abort(_('compression engine %s defined by '
3141 'format.revlog-compression not available') %
3149 'format.revlog-compression not available') %
3142 compengine,
3150 compengine,
3143 hint=_('run "hg debuginstall" to list available '
3151 hint=_('run "hg debuginstall" to list available '
3144 'compression engines'))
3152 'compression engines'))
3145
3153
3146 # zlib is the historical default and doesn't need an explicit requirement.
3154 # zlib is the historical default and doesn't need an explicit requirement.
3147 elif compengine == 'zstd':
3155 elif compengine == 'zstd':
3148 requirements.add('revlog-compression-zstd')
3156 requirements.add('revlog-compression-zstd')
3149 elif compengine != 'zlib':
3157 elif compengine != 'zlib':
3150 requirements.add('exp-compression-%s' % compengine)
3158 requirements.add('exp-compression-%s' % compengine)
3151
3159
3152 if scmutil.gdinitconfig(ui):
3160 if scmutil.gdinitconfig(ui):
3153 requirements.add('generaldelta')
3161 requirements.add('generaldelta')
3154 if ui.configbool('format', 'sparse-revlog'):
3162 if ui.configbool('format', 'sparse-revlog'):
3155 requirements.add(SPARSEREVLOG_REQUIREMENT)
3163 requirements.add(SPARSEREVLOG_REQUIREMENT)
3164
3165 # experimental config: format.use-side-data
3166 if ui.configbool('format', 'use-side-data'):
3167 requirements.add(SIDEDATA_REQUIREMENT)
3156 if ui.configbool('experimental', 'treemanifest'):
3168 if ui.configbool('experimental', 'treemanifest'):
3157 requirements.add('treemanifest')
3169 requirements.add('treemanifest')
3158
3170
3159 revlogv2 = ui.config('experimental', 'revlogv2')
3171 revlogv2 = ui.config('experimental', 'revlogv2')
3160 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3172 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3161 requirements.remove('revlogv1')
3173 requirements.remove('revlogv1')
3162 # generaldelta is implied by revlogv2.
3174 # generaldelta is implied by revlogv2.
3163 requirements.discard('generaldelta')
3175 requirements.discard('generaldelta')
3164 requirements.add(REVLOGV2_REQUIREMENT)
3176 requirements.add(REVLOGV2_REQUIREMENT)
3165 # experimental config: format.internal-phase
3177 # experimental config: format.internal-phase
3166 if ui.configbool('format', 'internal-phase'):
3178 if ui.configbool('format', 'internal-phase'):
3167 requirements.add('internal-phase')
3179 requirements.add('internal-phase')
3168
3180
3169 if createopts.get('narrowfiles'):
3181 if createopts.get('narrowfiles'):
3170 requirements.add(repository.NARROW_REQUIREMENT)
3182 requirements.add(repository.NARROW_REQUIREMENT)
3171
3183
3172 if createopts.get('lfs'):
3184 if createopts.get('lfs'):
3173 requirements.add('lfs')
3185 requirements.add('lfs')
3174
3186
3175 if ui.configbool('format', 'bookmarks-in-store'):
3187 if ui.configbool('format', 'bookmarks-in-store'):
3176 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3188 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3177
3189
3178 return requirements
3190 return requirements
3179
3191
3180 def filterknowncreateopts(ui, createopts):
3192 def filterknowncreateopts(ui, createopts):
3181 """Filters a dict of repo creation options against options that are known.
3193 """Filters a dict of repo creation options against options that are known.
3182
3194
3183 Receives a dict of repo creation options and returns a dict of those
3195 Receives a dict of repo creation options and returns a dict of those
3184 options that we don't know how to handle.
3196 options that we don't know how to handle.
3185
3197
3186 This function is called as part of repository creation. If the
3198 This function is called as part of repository creation. If the
3187 returned dict contains any items, repository creation will not
3199 returned dict contains any items, repository creation will not
3188 be allowed, as it means there was a request to create a repository
3200 be allowed, as it means there was a request to create a repository
3189 with options not recognized by loaded code.
3201 with options not recognized by loaded code.
3190
3202
3191 Extensions can wrap this function to filter out creation options
3203 Extensions can wrap this function to filter out creation options
3192 they know how to handle.
3204 they know how to handle.
3193 """
3205 """
3194 known = {
3206 known = {
3195 'backend',
3207 'backend',
3196 'lfs',
3208 'lfs',
3197 'narrowfiles',
3209 'narrowfiles',
3198 'sharedrepo',
3210 'sharedrepo',
3199 'sharedrelative',
3211 'sharedrelative',
3200 'shareditems',
3212 'shareditems',
3201 'shallowfilestore',
3213 'shallowfilestore',
3202 }
3214 }
3203
3215
3204 return {k: v for k, v in createopts.items() if k not in known}
3216 return {k: v for k, v in createopts.items() if k not in known}
3205
3217
3206 def createrepository(ui, path, createopts=None):
3218 def createrepository(ui, path, createopts=None):
3207 """Create a new repository in a vfs.
3219 """Create a new repository in a vfs.
3208
3220
3209 ``path`` path to the new repo's working directory.
3221 ``path`` path to the new repo's working directory.
3210 ``createopts`` options for the new repository.
3222 ``createopts`` options for the new repository.
3211
3223
3212 The following keys for ``createopts`` are recognized:
3224 The following keys for ``createopts`` are recognized:
3213
3225
3214 backend
3226 backend
3215 The storage backend to use.
3227 The storage backend to use.
3216 lfs
3228 lfs
3217 Repository will be created with ``lfs`` requirement. The lfs extension
3229 Repository will be created with ``lfs`` requirement. The lfs extension
3218 will automatically be loaded when the repository is accessed.
3230 will automatically be loaded when the repository is accessed.
3219 narrowfiles
3231 narrowfiles
3220 Set up repository to support narrow file storage.
3232 Set up repository to support narrow file storage.
3221 sharedrepo
3233 sharedrepo
3222 Repository object from which storage should be shared.
3234 Repository object from which storage should be shared.
3223 sharedrelative
3235 sharedrelative
3224 Boolean indicating if the path to the shared repo should be
3236 Boolean indicating if the path to the shared repo should be
3225 stored as relative. By default, the pointer to the "parent" repo
3237 stored as relative. By default, the pointer to the "parent" repo
3226 is stored as an absolute path.
3238 is stored as an absolute path.
3227 shareditems
3239 shareditems
3228 Set of items to share to the new repository (in addition to storage).
3240 Set of items to share to the new repository (in addition to storage).
3229 shallowfilestore
3241 shallowfilestore
3230 Indicates that storage for files should be shallow (not all ancestor
3242 Indicates that storage for files should be shallow (not all ancestor
3231 revisions are known).
3243 revisions are known).
3232 """
3244 """
3233 createopts = defaultcreateopts(ui, createopts=createopts)
3245 createopts = defaultcreateopts(ui, createopts=createopts)
3234
3246
3235 unknownopts = filterknowncreateopts(ui, createopts)
3247 unknownopts = filterknowncreateopts(ui, createopts)
3236
3248
3237 if not isinstance(unknownopts, dict):
3249 if not isinstance(unknownopts, dict):
3238 raise error.ProgrammingError('filterknowncreateopts() did not return '
3250 raise error.ProgrammingError('filterknowncreateopts() did not return '
3239 'a dict')
3251 'a dict')
3240
3252
3241 if unknownopts:
3253 if unknownopts:
3242 raise error.Abort(_('unable to create repository because of unknown '
3254 raise error.Abort(_('unable to create repository because of unknown '
3243 'creation option: %s') %
3255 'creation option: %s') %
3244 ', '.join(sorted(unknownopts)),
3256 ', '.join(sorted(unknownopts)),
3245 hint=_('is a required extension not loaded?'))
3257 hint=_('is a required extension not loaded?'))
3246
3258
3247 requirements = newreporequirements(ui, createopts=createopts)
3259 requirements = newreporequirements(ui, createopts=createopts)
3248
3260
3249 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3261 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3250
3262
3251 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3263 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3252 if hgvfs.exists():
3264 if hgvfs.exists():
3253 raise error.RepoError(_('repository %s already exists') % path)
3265 raise error.RepoError(_('repository %s already exists') % path)
3254
3266
3255 if 'sharedrepo' in createopts:
3267 if 'sharedrepo' in createopts:
3256 sharedpath = createopts['sharedrepo'].sharedpath
3268 sharedpath = createopts['sharedrepo'].sharedpath
3257
3269
3258 if createopts.get('sharedrelative'):
3270 if createopts.get('sharedrelative'):
3259 try:
3271 try:
3260 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3272 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3261 except (IOError, ValueError) as e:
3273 except (IOError, ValueError) as e:
3262 # ValueError is raised on Windows if the drive letters differ
3274 # ValueError is raised on Windows if the drive letters differ
3263 # on each path.
3275 # on each path.
3264 raise error.Abort(_('cannot calculate relative path'),
3276 raise error.Abort(_('cannot calculate relative path'),
3265 hint=stringutil.forcebytestr(e))
3277 hint=stringutil.forcebytestr(e))
3266
3278
3267 if not wdirvfs.exists():
3279 if not wdirvfs.exists():
3268 wdirvfs.makedirs()
3280 wdirvfs.makedirs()
3269
3281
3270 hgvfs.makedir(notindexed=True)
3282 hgvfs.makedir(notindexed=True)
3271 if 'sharedrepo' not in createopts:
3283 if 'sharedrepo' not in createopts:
3272 hgvfs.mkdir(b'cache')
3284 hgvfs.mkdir(b'cache')
3273 hgvfs.mkdir(b'wcache')
3285 hgvfs.mkdir(b'wcache')
3274
3286
3275 if b'store' in requirements and 'sharedrepo' not in createopts:
3287 if b'store' in requirements and 'sharedrepo' not in createopts:
3276 hgvfs.mkdir(b'store')
3288 hgvfs.mkdir(b'store')
3277
3289
3278 # We create an invalid changelog outside the store so very old
3290 # We create an invalid changelog outside the store so very old
3279 # Mercurial versions (which didn't know about the requirements
3291 # Mercurial versions (which didn't know about the requirements
3280 # file) encounter an error on reading the changelog. This
3292 # file) encounter an error on reading the changelog. This
3281 # effectively locks out old clients and prevents them from
3293 # effectively locks out old clients and prevents them from
3282 # mucking with a repo in an unknown format.
3294 # mucking with a repo in an unknown format.
3283 #
3295 #
3284 # The revlog header has version 2, which won't be recognized by
3296 # The revlog header has version 2, which won't be recognized by
3285 # such old clients.
3297 # such old clients.
3286 hgvfs.append(b'00changelog.i',
3298 hgvfs.append(b'00changelog.i',
3287 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3299 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3288 b'layout')
3300 b'layout')
3289
3301
3290 scmutil.writerequires(hgvfs, requirements)
3302 scmutil.writerequires(hgvfs, requirements)
3291
3303
3292 # Write out file telling readers where to find the shared store.
3304 # Write out file telling readers where to find the shared store.
3293 if 'sharedrepo' in createopts:
3305 if 'sharedrepo' in createopts:
3294 hgvfs.write(b'sharedpath', sharedpath)
3306 hgvfs.write(b'sharedpath', sharedpath)
3295
3307
3296 if createopts.get('shareditems'):
3308 if createopts.get('shareditems'):
3297 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3309 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3298 hgvfs.write(b'shared', shared)
3310 hgvfs.write(b'shared', shared)
3299
3311
3300 def poisonrepository(repo):
3312 def poisonrepository(repo):
3301 """Poison a repository instance so it can no longer be used."""
3313 """Poison a repository instance so it can no longer be used."""
3302 # Perform any cleanup on the instance.
3314 # Perform any cleanup on the instance.
3303 repo.close()
3315 repo.close()
3304
3316
3305 # Our strategy is to replace the type of the object with one that
3317 # Our strategy is to replace the type of the object with one that
3306 # has all attribute lookups result in error.
3318 # has all attribute lookups result in error.
3307 #
3319 #
3308 # But we have to allow the close() method because some constructors
3320 # But we have to allow the close() method because some constructors
3309 # of repos call close() on repo references.
3321 # of repos call close() on repo references.
3310 class poisonedrepository(object):
3322 class poisonedrepository(object):
3311 def __getattribute__(self, item):
3323 def __getattribute__(self, item):
3312 if item == r'close':
3324 if item == r'close':
3313 return object.__getattribute__(self, item)
3325 return object.__getattribute__(self, item)
3314
3326
3315 raise error.ProgrammingError('repo instances should not be used '
3327 raise error.ProgrammingError('repo instances should not be used '
3316 'after unshare')
3328 'after unshare')
3317
3329
3318 def close(self):
3330 def close(self):
3319 pass
3331 pass
3320
3332
3321 # We may have a repoview, which intercepts __setattr__. So be sure
3333 # We may have a repoview, which intercepts __setattr__. So be sure
3322 # we operate at the lowest level possible.
3334 # we operate at the lowest level possible.
3323 object.__setattr__(repo, r'__class__', poisonedrepository)
3335 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2660 +1,2665 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .revlogutils.constants import (
38 from .revlogutils.constants import (
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 REVLOGV0,
41 REVLOGV0,
42 REVLOGV1,
42 REVLOGV1,
43 REVLOGV1_FLAGS,
43 REVLOGV1_FLAGS,
44 REVLOGV2,
44 REVLOGV2,
45 REVLOGV2_FLAGS,
45 REVLOGV2_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FORMAT,
47 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_VERSION,
48 REVLOG_DEFAULT_VERSION,
49 )
49 )
50 from .revlogutils.flagutil import (
50 from .revlogutils.flagutil import (
51 REVIDX_DEFAULT_FLAGS,
51 REVIDX_DEFAULT_FLAGS,
52 REVIDX_ELLIPSIS,
52 REVIDX_ELLIPSIS,
53 REVIDX_EXTSTORED,
53 REVIDX_EXTSTORED,
54 REVIDX_FLAGS_ORDER,
54 REVIDX_FLAGS_ORDER,
55 REVIDX_ISCENSORED,
55 REVIDX_ISCENSORED,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 )
57 )
58 from .thirdparty import (
58 from .thirdparty import (
59 attr,
59 attr,
60 )
60 )
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 )
78 )
79 from .utils import (
79 from .utils import (
80 storageutil,
80 storageutil,
81 stringutil,
81 stringutil,
82 )
82 )
83
83
84 # blanked usage of all the name to prevent pyflakes constraints
84 # blanked usage of all the name to prevent pyflakes constraints
85 # We need these name available in the module for extensions.
85 # We need these name available in the module for extensions.
86 REVLOGV0
86 REVLOGV0
87 REVLOGV1
87 REVLOGV1
88 REVLOGV2
88 REVLOGV2
89 FLAG_INLINE_DATA
89 FLAG_INLINE_DATA
90 FLAG_GENERALDELTA
90 FLAG_GENERALDELTA
91 REVLOG_DEFAULT_FLAGS
91 REVLOG_DEFAULT_FLAGS
92 REVLOG_DEFAULT_FORMAT
92 REVLOG_DEFAULT_FORMAT
93 REVLOG_DEFAULT_VERSION
93 REVLOG_DEFAULT_VERSION
94 REVLOGV1_FLAGS
94 REVLOGV1_FLAGS
95 REVLOGV2_FLAGS
95 REVLOGV2_FLAGS
96 REVIDX_ISCENSORED
96 REVIDX_ISCENSORED
97 REVIDX_ELLIPSIS
97 REVIDX_ELLIPSIS
98 REVIDX_EXTSTORED
98 REVIDX_EXTSTORED
99 REVIDX_DEFAULT_FLAGS
99 REVIDX_DEFAULT_FLAGS
100 REVIDX_FLAGS_ORDER
100 REVIDX_FLAGS_ORDER
101 REVIDX_RAWTEXT_CHANGING_FLAGS
101 REVIDX_RAWTEXT_CHANGING_FLAGS
102
102
103 parsers = policy.importmod(r'parsers')
103 parsers = policy.importmod(r'parsers')
104 rustancestor = policy.importrust(r'ancestor')
104 rustancestor = policy.importrust(r'ancestor')
105 rustdagop = policy.importrust(r'dagop')
105 rustdagop = policy.importrust(r'dagop')
106
106
107 # Aliased for performance.
107 # Aliased for performance.
108 _zlibdecompress = zlib.decompress
108 _zlibdecompress = zlib.decompress
109
109
110 # max size of revlog with inline data
110 # max size of revlog with inline data
111 _maxinline = 131072
111 _maxinline = 131072
112 _chunksize = 1048576
112 _chunksize = 1048576
113
113
114 # Flag processors for REVIDX_ELLIPSIS.
114 # Flag processors for REVIDX_ELLIPSIS.
115 def ellipsisreadprocessor(rl, text):
115 def ellipsisreadprocessor(rl, text):
116 return text, False, {}
116 return text, False, {}
117
117
118 def ellipsiswriteprocessor(rl, text, sidedata):
118 def ellipsiswriteprocessor(rl, text, sidedata):
119 return text, False
119 return text, False
120
120
121 def ellipsisrawprocessor(rl, text):
121 def ellipsisrawprocessor(rl, text):
122 return False
122 return False
123
123
124 ellipsisprocessor = (
124 ellipsisprocessor = (
125 ellipsisreadprocessor,
125 ellipsisreadprocessor,
126 ellipsiswriteprocessor,
126 ellipsiswriteprocessor,
127 ellipsisrawprocessor,
127 ellipsisrawprocessor,
128 )
128 )
129
129
130 def getoffset(q):
130 def getoffset(q):
131 return int(q >> 16)
131 return int(q >> 16)
132
132
133 def gettype(q):
133 def gettype(q):
134 return int(q & 0xFFFF)
134 return int(q & 0xFFFF)
135
135
136 def offset_type(offset, type):
136 def offset_type(offset, type):
137 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
137 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
138 raise ValueError('unknown revlog index flags')
138 raise ValueError('unknown revlog index flags')
139 return int(int(offset) << 16 | type)
139 return int(int(offset) << 16 | type)
140
140
141 @attr.s(slots=True, frozen=True)
141 @attr.s(slots=True, frozen=True)
142 class _revisioninfo(object):
142 class _revisioninfo(object):
143 """Information about a revision that allows building its fulltext
143 """Information about a revision that allows building its fulltext
144 node: expected hash of the revision
144 node: expected hash of the revision
145 p1, p2: parent revs of the revision
145 p1, p2: parent revs of the revision
146 btext: built text cache consisting of a one-element list
146 btext: built text cache consisting of a one-element list
147 cachedelta: (baserev, uncompressed_delta) or None
147 cachedelta: (baserev, uncompressed_delta) or None
148 flags: flags associated to the revision storage
148 flags: flags associated to the revision storage
149
149
150 One of btext[0] or cachedelta must be set.
150 One of btext[0] or cachedelta must be set.
151 """
151 """
152 node = attr.ib()
152 node = attr.ib()
153 p1 = attr.ib()
153 p1 = attr.ib()
154 p2 = attr.ib()
154 p2 = attr.ib()
155 btext = attr.ib()
155 btext = attr.ib()
156 textlen = attr.ib()
156 textlen = attr.ib()
157 cachedelta = attr.ib()
157 cachedelta = attr.ib()
158 flags = attr.ib()
158 flags = attr.ib()
159
159
160 @interfaceutil.implementer(repository.irevisiondelta)
160 @interfaceutil.implementer(repository.irevisiondelta)
161 @attr.s(slots=True)
161 @attr.s(slots=True)
162 class revlogrevisiondelta(object):
162 class revlogrevisiondelta(object):
163 node = attr.ib()
163 node = attr.ib()
164 p1node = attr.ib()
164 p1node = attr.ib()
165 p2node = attr.ib()
165 p2node = attr.ib()
166 basenode = attr.ib()
166 basenode = attr.ib()
167 flags = attr.ib()
167 flags = attr.ib()
168 baserevisionsize = attr.ib()
168 baserevisionsize = attr.ib()
169 revision = attr.ib()
169 revision = attr.ib()
170 delta = attr.ib()
170 delta = attr.ib()
171 linknode = attr.ib(default=None)
171 linknode = attr.ib(default=None)
172
172
173 @interfaceutil.implementer(repository.iverifyproblem)
173 @interfaceutil.implementer(repository.iverifyproblem)
174 @attr.s(frozen=True)
174 @attr.s(frozen=True)
175 class revlogproblem(object):
175 class revlogproblem(object):
176 warning = attr.ib(default=None)
176 warning = attr.ib(default=None)
177 error = attr.ib(default=None)
177 error = attr.ib(default=None)
178 node = attr.ib(default=None)
178 node = attr.ib(default=None)
179
179
180 # index v0:
180 # index v0:
181 # 4 bytes: offset
181 # 4 bytes: offset
182 # 4 bytes: compressed length
182 # 4 bytes: compressed length
183 # 4 bytes: base rev
183 # 4 bytes: base rev
184 # 4 bytes: link rev
184 # 4 bytes: link rev
185 # 20 bytes: parent 1 nodeid
185 # 20 bytes: parent 1 nodeid
186 # 20 bytes: parent 2 nodeid
186 # 20 bytes: parent 2 nodeid
187 # 20 bytes: nodeid
187 # 20 bytes: nodeid
188 indexformatv0 = struct.Struct(">4l20s20s20s")
188 indexformatv0 = struct.Struct(">4l20s20s20s")
189 indexformatv0_pack = indexformatv0.pack
189 indexformatv0_pack = indexformatv0.pack
190 indexformatv0_unpack = indexformatv0.unpack
190 indexformatv0_unpack = indexformatv0.unpack
191
191
192 class revlogoldindex(list):
192 class revlogoldindex(list):
193 def __getitem__(self, i):
193 def __getitem__(self, i):
194 if i == -1:
194 if i == -1:
195 return (0, 0, 0, -1, -1, -1, -1, nullid)
195 return (0, 0, 0, -1, -1, -1, -1, nullid)
196 return list.__getitem__(self, i)
196 return list.__getitem__(self, i)
197
197
198 class revlogoldio(object):
198 class revlogoldio(object):
199 def __init__(self):
199 def __init__(self):
200 self.size = indexformatv0.size
200 self.size = indexformatv0.size
201
201
202 def parseindex(self, data, inline):
202 def parseindex(self, data, inline):
203 s = self.size
203 s = self.size
204 index = []
204 index = []
205 nodemap = {nullid: nullrev}
205 nodemap = {nullid: nullrev}
206 n = off = 0
206 n = off = 0
207 l = len(data)
207 l = len(data)
208 while off + s <= l:
208 while off + s <= l:
209 cur = data[off:off + s]
209 cur = data[off:off + s]
210 off += s
210 off += s
211 e = indexformatv0_unpack(cur)
211 e = indexformatv0_unpack(cur)
212 # transform to revlogv1 format
212 # transform to revlogv1 format
213 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
213 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
214 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
214 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
215 index.append(e2)
215 index.append(e2)
216 nodemap[e[6]] = n
216 nodemap[e[6]] = n
217 n += 1
217 n += 1
218
218
219 return revlogoldindex(index), nodemap, None
219 return revlogoldindex(index), nodemap, None
220
220
221 def packentry(self, entry, node, version, rev):
221 def packentry(self, entry, node, version, rev):
222 if gettype(entry[0]):
222 if gettype(entry[0]):
223 raise error.RevlogError(_('index entry flags need revlog '
223 raise error.RevlogError(_('index entry flags need revlog '
224 'version 1'))
224 'version 1'))
225 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
225 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
226 node(entry[5]), node(entry[6]), entry[7])
226 node(entry[5]), node(entry[6]), entry[7])
227 return indexformatv0_pack(*e2)
227 return indexformatv0_pack(*e2)
228
228
229 # index ng:
229 # index ng:
230 # 6 bytes: offset
230 # 6 bytes: offset
231 # 2 bytes: flags
231 # 2 bytes: flags
232 # 4 bytes: compressed length
232 # 4 bytes: compressed length
233 # 4 bytes: uncompressed length
233 # 4 bytes: uncompressed length
234 # 4 bytes: base rev
234 # 4 bytes: base rev
235 # 4 bytes: link rev
235 # 4 bytes: link rev
236 # 4 bytes: parent 1 rev
236 # 4 bytes: parent 1 rev
237 # 4 bytes: parent 2 rev
237 # 4 bytes: parent 2 rev
238 # 32 bytes: nodeid
238 # 32 bytes: nodeid
239 indexformatng = struct.Struct(">Qiiiiii20s12x")
239 indexformatng = struct.Struct(">Qiiiiii20s12x")
240 indexformatng_pack = indexformatng.pack
240 indexformatng_pack = indexformatng.pack
241 versionformat = struct.Struct(">I")
241 versionformat = struct.Struct(">I")
242 versionformat_pack = versionformat.pack
242 versionformat_pack = versionformat.pack
243 versionformat_unpack = versionformat.unpack
243 versionformat_unpack = versionformat.unpack
244
244
245 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
245 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
246 # signed integer)
246 # signed integer)
247 _maxentrysize = 0x7fffffff
247 _maxentrysize = 0x7fffffff
248
248
249 class revlogio(object):
249 class revlogio(object):
250 def __init__(self):
250 def __init__(self):
251 self.size = indexformatng.size
251 self.size = indexformatng.size
252
252
253 def parseindex(self, data, inline):
253 def parseindex(self, data, inline):
254 # call the C implementation to parse the index data
254 # call the C implementation to parse the index data
255 index, cache = parsers.parse_index2(data, inline)
255 index, cache = parsers.parse_index2(data, inline)
256 return index, getattr(index, 'nodemap', None), cache
256 return index, getattr(index, 'nodemap', None), cache
257
257
258 def packentry(self, entry, node, version, rev):
258 def packentry(self, entry, node, version, rev):
259 p = indexformatng_pack(*entry)
259 p = indexformatng_pack(*entry)
260 if rev == 0:
260 if rev == 0:
261 p = versionformat_pack(version) + p[4:]
261 p = versionformat_pack(version) + p[4:]
262 return p
262 return p
263
263
264 class revlog(object):
264 class revlog(object):
265 """
265 """
266 the underlying revision storage object
266 the underlying revision storage object
267
267
268 A revlog consists of two parts, an index and the revision data.
268 A revlog consists of two parts, an index and the revision data.
269
269
270 The index is a file with a fixed record size containing
270 The index is a file with a fixed record size containing
271 information on each revision, including its nodeid (hash), the
271 information on each revision, including its nodeid (hash), the
272 nodeids of its parents, the position and offset of its data within
272 nodeids of its parents, the position and offset of its data within
273 the data file, and the revision it's based on. Finally, each entry
273 the data file, and the revision it's based on. Finally, each entry
274 contains a linkrev entry that can serve as a pointer to external
274 contains a linkrev entry that can serve as a pointer to external
275 data.
275 data.
276
276
277 The revision data itself is a linear collection of data chunks.
277 The revision data itself is a linear collection of data chunks.
278 Each chunk represents a revision and is usually represented as a
278 Each chunk represents a revision and is usually represented as a
279 delta against the previous chunk. To bound lookup time, runs of
279 delta against the previous chunk. To bound lookup time, runs of
280 deltas are limited to about 2 times the length of the original
280 deltas are limited to about 2 times the length of the original
281 version data. This makes retrieval of a version proportional to
281 version data. This makes retrieval of a version proportional to
282 its size, or O(1) relative to the number of revisions.
282 its size, or O(1) relative to the number of revisions.
283
283
284 Both pieces of the revlog are written to in an append-only
284 Both pieces of the revlog are written to in an append-only
285 fashion, which means we never need to rewrite a file to insert or
285 fashion, which means we never need to rewrite a file to insert or
286 remove data, and can use some simple techniques to avoid the need
286 remove data, and can use some simple techniques to avoid the need
287 for locking while reading.
287 for locking while reading.
288
288
289 If checkambig, indexfile is opened with checkambig=True at
289 If checkambig, indexfile is opened with checkambig=True at
290 writing, to avoid file stat ambiguity.
290 writing, to avoid file stat ambiguity.
291
291
292 If mmaplargeindex is True, and an mmapindexthreshold is set, the
292 If mmaplargeindex is True, and an mmapindexthreshold is set, the
293 index will be mmapped rather than read if it is larger than the
293 index will be mmapped rather than read if it is larger than the
294 configured threshold.
294 configured threshold.
295
295
296 If censorable is True, the revlog can have censored revisions.
296 If censorable is True, the revlog can have censored revisions.
297
297
298 If `upperboundcomp` is not None, this is the expected maximal gain from
298 If `upperboundcomp` is not None, this is the expected maximal gain from
299 compression for the data content.
299 compression for the data content.
300 """
300 """
301
301
302 _flagserrorclass = error.RevlogError
302 _flagserrorclass = error.RevlogError
303
303
304 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
304 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
305 mmaplargeindex=False, censorable=False,
305 mmaplargeindex=False, censorable=False,
306 upperboundcomp=None):
306 upperboundcomp=None):
307 """
307 """
308 create a revlog object
308 create a revlog object
309
309
310 opener is a function that abstracts the file opening operation
310 opener is a function that abstracts the file opening operation
311 and can be used to implement COW semantics or the like.
311 and can be used to implement COW semantics or the like.
312
312
313 """
313 """
314 self.upperboundcomp = upperboundcomp
314 self.upperboundcomp = upperboundcomp
315 self.indexfile = indexfile
315 self.indexfile = indexfile
316 self.datafile = datafile or (indexfile[:-2] + ".d")
316 self.datafile = datafile or (indexfile[:-2] + ".d")
317 self.opener = opener
317 self.opener = opener
318 # When True, indexfile is opened with checkambig=True at writing, to
318 # When True, indexfile is opened with checkambig=True at writing, to
319 # avoid file stat ambiguity.
319 # avoid file stat ambiguity.
320 self._checkambig = checkambig
320 self._checkambig = checkambig
321 self._mmaplargeindex = mmaplargeindex
321 self._mmaplargeindex = mmaplargeindex
322 self._censorable = censorable
322 self._censorable = censorable
323 # 3-tuple of (node, rev, text) for a raw revision.
323 # 3-tuple of (node, rev, text) for a raw revision.
324 self._revisioncache = None
324 self._revisioncache = None
325 # Maps rev to chain base rev.
325 # Maps rev to chain base rev.
326 self._chainbasecache = util.lrucachedict(100)
326 self._chainbasecache = util.lrucachedict(100)
327 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
327 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
328 self._chunkcache = (0, '')
328 self._chunkcache = (0, '')
329 # How much data to read and cache into the raw revlog data cache.
329 # How much data to read and cache into the raw revlog data cache.
330 self._chunkcachesize = 65536
330 self._chunkcachesize = 65536
331 self._maxchainlen = None
331 self._maxchainlen = None
332 self._deltabothparents = True
332 self._deltabothparents = True
333 self.index = []
333 self.index = []
334 # Mapping of partial identifiers to full nodes.
334 # Mapping of partial identifiers to full nodes.
335 self._pcache = {}
335 self._pcache = {}
336 # Mapping of revision integer to full node.
336 # Mapping of revision integer to full node.
337 self._nodecache = {nullid: nullrev}
337 self._nodecache = {nullid: nullrev}
338 self._nodepos = None
338 self._nodepos = None
339 self._compengine = 'zlib'
339 self._compengine = 'zlib'
340 self._compengineopts = {}
340 self._compengineopts = {}
341 self._maxdeltachainspan = -1
341 self._maxdeltachainspan = -1
342 self._withsparseread = False
342 self._withsparseread = False
343 self._sparserevlog = False
343 self._sparserevlog = False
344 self._srdensitythreshold = 0.50
344 self._srdensitythreshold = 0.50
345 self._srmingapsize = 262144
345 self._srmingapsize = 262144
346
346
347 # Make copy of flag processors so each revlog instance can support
347 # Make copy of flag processors so each revlog instance can support
348 # custom flags.
348 # custom flags.
349 self._flagprocessors = dict(flagutil.flagprocessors)
349 self._flagprocessors = dict(flagutil.flagprocessors)
350
350
351 # 2-tuple of file handles being used for active writing.
351 # 2-tuple of file handles being used for active writing.
352 self._writinghandles = None
352 self._writinghandles = None
353
353
354 self._loadindex()
354 self._loadindex()
355
355
356 def _loadindex(self):
356 def _loadindex(self):
357 mmapindexthreshold = None
357 mmapindexthreshold = None
358 opts = self.opener.options
358 opts = self.opener.options
359
359
360 if 'revlogv2' in opts:
360 if 'revlogv2' in opts:
361 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
361 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
362 elif 'revlogv1' in opts:
362 elif 'revlogv1' in opts:
363 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
363 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
364 if 'generaldelta' in opts:
364 if 'generaldelta' in opts:
365 newversionflags |= FLAG_GENERALDELTA
365 newversionflags |= FLAG_GENERALDELTA
366 elif 'revlogv0' in self.opener.options:
366 elif 'revlogv0' in self.opener.options:
367 newversionflags = REVLOGV0
367 newversionflags = REVLOGV0
368 else:
368 else:
369 newversionflags = REVLOG_DEFAULT_VERSION
369 newversionflags = REVLOG_DEFAULT_VERSION
370
370
371 if 'chunkcachesize' in opts:
371 if 'chunkcachesize' in opts:
372 self._chunkcachesize = opts['chunkcachesize']
372 self._chunkcachesize = opts['chunkcachesize']
373 if 'maxchainlen' in opts:
373 if 'maxchainlen' in opts:
374 self._maxchainlen = opts['maxchainlen']
374 self._maxchainlen = opts['maxchainlen']
375 if 'deltabothparents' in opts:
375 if 'deltabothparents' in opts:
376 self._deltabothparents = opts['deltabothparents']
376 self._deltabothparents = opts['deltabothparents']
377 self._lazydelta = bool(opts.get('lazydelta', True))
377 self._lazydelta = bool(opts.get('lazydelta', True))
378 self._lazydeltabase = False
378 self._lazydeltabase = False
379 if self._lazydelta:
379 if self._lazydelta:
380 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
380 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
381 if 'compengine' in opts:
381 if 'compengine' in opts:
382 self._compengine = opts['compengine']
382 self._compengine = opts['compengine']
383 if 'zlib.level' in opts:
383 if 'zlib.level' in opts:
384 self._compengineopts['zlib.level'] = opts['zlib.level']
384 self._compengineopts['zlib.level'] = opts['zlib.level']
385 if 'zstd.level' in opts:
385 if 'zstd.level' in opts:
386 self._compengineopts['zstd.level'] = opts['zstd.level']
386 self._compengineopts['zstd.level'] = opts['zstd.level']
387 if 'maxdeltachainspan' in opts:
387 if 'maxdeltachainspan' in opts:
388 self._maxdeltachainspan = opts['maxdeltachainspan']
388 self._maxdeltachainspan = opts['maxdeltachainspan']
389 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
389 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
390 mmapindexthreshold = opts['mmapindexthreshold']
390 mmapindexthreshold = opts['mmapindexthreshold']
391 self.hassidedata = bool(opts.get('side-data', False))
391 self._sparserevlog = bool(opts.get('sparse-revlog', False))
392 self._sparserevlog = bool(opts.get('sparse-revlog', False))
392 withsparseread = bool(opts.get('with-sparse-read', False))
393 withsparseread = bool(opts.get('with-sparse-read', False))
393 # sparse-revlog forces sparse-read
394 # sparse-revlog forces sparse-read
394 self._withsparseread = self._sparserevlog or withsparseread
395 self._withsparseread = self._sparserevlog or withsparseread
395 if 'sparse-read-density-threshold' in opts:
396 if 'sparse-read-density-threshold' in opts:
396 self._srdensitythreshold = opts['sparse-read-density-threshold']
397 self._srdensitythreshold = opts['sparse-read-density-threshold']
397 if 'sparse-read-min-gap-size' in opts:
398 if 'sparse-read-min-gap-size' in opts:
398 self._srmingapsize = opts['sparse-read-min-gap-size']
399 self._srmingapsize = opts['sparse-read-min-gap-size']
399 if opts.get('enableellipsis'):
400 if opts.get('enableellipsis'):
400 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
401 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
401
402
402 # revlog v0 doesn't have flag processors
403 # revlog v0 doesn't have flag processors
403 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
404 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
404 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
405 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
405
406
406 if self._chunkcachesize <= 0:
407 if self._chunkcachesize <= 0:
407 raise error.RevlogError(_('revlog chunk cache size %r is not '
408 raise error.RevlogError(_('revlog chunk cache size %r is not '
408 'greater than 0') % self._chunkcachesize)
409 'greater than 0') % self._chunkcachesize)
409 elif self._chunkcachesize & (self._chunkcachesize - 1):
410 elif self._chunkcachesize & (self._chunkcachesize - 1):
410 raise error.RevlogError(_('revlog chunk cache size %r is not a '
411 raise error.RevlogError(_('revlog chunk cache size %r is not a '
411 'power of 2') % self._chunkcachesize)
412 'power of 2') % self._chunkcachesize)
412
413
413 indexdata = ''
414 indexdata = ''
414 self._initempty = True
415 self._initempty = True
415 try:
416 try:
416 with self._indexfp() as f:
417 with self._indexfp() as f:
417 if (mmapindexthreshold is not None and
418 if (mmapindexthreshold is not None and
418 self.opener.fstat(f).st_size >= mmapindexthreshold):
419 self.opener.fstat(f).st_size >= mmapindexthreshold):
419 # TODO: should .close() to release resources without
420 # TODO: should .close() to release resources without
420 # relying on Python GC
421 # relying on Python GC
421 indexdata = util.buffer(util.mmapread(f))
422 indexdata = util.buffer(util.mmapread(f))
422 else:
423 else:
423 indexdata = f.read()
424 indexdata = f.read()
424 if len(indexdata) > 0:
425 if len(indexdata) > 0:
425 versionflags = versionformat_unpack(indexdata[:4])[0]
426 versionflags = versionformat_unpack(indexdata[:4])[0]
426 self._initempty = False
427 self._initempty = False
427 else:
428 else:
428 versionflags = newversionflags
429 versionflags = newversionflags
429 except IOError as inst:
430 except IOError as inst:
430 if inst.errno != errno.ENOENT:
431 if inst.errno != errno.ENOENT:
431 raise
432 raise
432
433
433 versionflags = newversionflags
434 versionflags = newversionflags
434
435
435 self.version = versionflags
436 self.version = versionflags
436
437
437 flags = versionflags & ~0xFFFF
438 flags = versionflags & ~0xFFFF
438 fmt = versionflags & 0xFFFF
439 fmt = versionflags & 0xFFFF
439
440
440 if fmt == REVLOGV0:
441 if fmt == REVLOGV0:
441 if flags:
442 if flags:
442 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
443 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
443 'revlog %s') %
444 'revlog %s') %
444 (flags >> 16, fmt, self.indexfile))
445 (flags >> 16, fmt, self.indexfile))
445
446
446 self._inline = False
447 self._inline = False
447 self._generaldelta = False
448 self._generaldelta = False
448
449
449 elif fmt == REVLOGV1:
450 elif fmt == REVLOGV1:
450 if flags & ~REVLOGV1_FLAGS:
451 if flags & ~REVLOGV1_FLAGS:
451 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
452 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
452 'revlog %s') %
453 'revlog %s') %
453 (flags >> 16, fmt, self.indexfile))
454 (flags >> 16, fmt, self.indexfile))
454
455
455 self._inline = versionflags & FLAG_INLINE_DATA
456 self._inline = versionflags & FLAG_INLINE_DATA
456 self._generaldelta = versionflags & FLAG_GENERALDELTA
457 self._generaldelta = versionflags & FLAG_GENERALDELTA
457
458
458 elif fmt == REVLOGV2:
459 elif fmt == REVLOGV2:
459 if flags & ~REVLOGV2_FLAGS:
460 if flags & ~REVLOGV2_FLAGS:
460 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
461 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
461 'revlog %s') %
462 'revlog %s') %
462 (flags >> 16, fmt, self.indexfile))
463 (flags >> 16, fmt, self.indexfile))
463
464
464 self._inline = versionflags & FLAG_INLINE_DATA
465 self._inline = versionflags & FLAG_INLINE_DATA
465 # generaldelta implied by version 2 revlogs.
466 # generaldelta implied by version 2 revlogs.
466 self._generaldelta = True
467 self._generaldelta = True
467
468
468 else:
469 else:
469 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
470 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
470 (fmt, self.indexfile))
471 (fmt, self.indexfile))
471 # sparse-revlog can't be on without general-delta (issue6056)
472 # sparse-revlog can't be on without general-delta (issue6056)
472 if not self._generaldelta:
473 if not self._generaldelta:
473 self._sparserevlog = False
474 self._sparserevlog = False
474
475
475 self._storedeltachains = True
476 self._storedeltachains = True
476
477
477 self._io = revlogio()
478 self._io = revlogio()
478 if self.version == REVLOGV0:
479 if self.version == REVLOGV0:
479 self._io = revlogoldio()
480 self._io = revlogoldio()
480 try:
481 try:
481 d = self._io.parseindex(indexdata, self._inline)
482 d = self._io.parseindex(indexdata, self._inline)
482 except (ValueError, IndexError):
483 except (ValueError, IndexError):
483 raise error.RevlogError(_("index %s is corrupted") %
484 raise error.RevlogError(_("index %s is corrupted") %
484 self.indexfile)
485 self.indexfile)
485 self.index, nodemap, self._chunkcache = d
486 self.index, nodemap, self._chunkcache = d
486 if nodemap is not None:
487 if nodemap is not None:
487 self.nodemap = self._nodecache = nodemap
488 self.nodemap = self._nodecache = nodemap
488 if not self._chunkcache:
489 if not self._chunkcache:
489 self._chunkclear()
490 self._chunkclear()
490 # revnum -> (chain-length, sum-delta-length)
491 # revnum -> (chain-length, sum-delta-length)
491 self._chaininfocache = {}
492 self._chaininfocache = {}
492 # revlog header -> revlog compressor
493 # revlog header -> revlog compressor
493 self._decompressors = {}
494 self._decompressors = {}
494
495
495 @util.propertycache
496 @util.propertycache
496 def _compressor(self):
497 def _compressor(self):
497 engine = util.compengines[self._compengine]
498 engine = util.compengines[self._compengine]
498 return engine.revlogcompressor(self._compengineopts)
499 return engine.revlogcompressor(self._compengineopts)
499
500
500 def _indexfp(self, mode='r'):
501 def _indexfp(self, mode='r'):
501 """file object for the revlog's index file"""
502 """file object for the revlog's index file"""
502 args = {r'mode': mode}
503 args = {r'mode': mode}
503 if mode != 'r':
504 if mode != 'r':
504 args[r'checkambig'] = self._checkambig
505 args[r'checkambig'] = self._checkambig
505 if mode == 'w':
506 if mode == 'w':
506 args[r'atomictemp'] = True
507 args[r'atomictemp'] = True
507 return self.opener(self.indexfile, **args)
508 return self.opener(self.indexfile, **args)
508
509
509 def _datafp(self, mode='r'):
510 def _datafp(self, mode='r'):
510 """file object for the revlog's data file"""
511 """file object for the revlog's data file"""
511 return self.opener(self.datafile, mode=mode)
512 return self.opener(self.datafile, mode=mode)
512
513
513 @contextlib.contextmanager
514 @contextlib.contextmanager
514 def _datareadfp(self, existingfp=None):
515 def _datareadfp(self, existingfp=None):
515 """file object suitable to read data"""
516 """file object suitable to read data"""
516 # Use explicit file handle, if given.
517 # Use explicit file handle, if given.
517 if existingfp is not None:
518 if existingfp is not None:
518 yield existingfp
519 yield existingfp
519
520
520 # Use a file handle being actively used for writes, if available.
521 # Use a file handle being actively used for writes, if available.
521 # There is some danger to doing this because reads will seek the
522 # There is some danger to doing this because reads will seek the
522 # file. However, _writeentry() performs a SEEK_END before all writes,
523 # file. However, _writeentry() performs a SEEK_END before all writes,
523 # so we should be safe.
524 # so we should be safe.
524 elif self._writinghandles:
525 elif self._writinghandles:
525 if self._inline:
526 if self._inline:
526 yield self._writinghandles[0]
527 yield self._writinghandles[0]
527 else:
528 else:
528 yield self._writinghandles[1]
529 yield self._writinghandles[1]
529
530
530 # Otherwise open a new file handle.
531 # Otherwise open a new file handle.
531 else:
532 else:
532 if self._inline:
533 if self._inline:
533 func = self._indexfp
534 func = self._indexfp
534 else:
535 else:
535 func = self._datafp
536 func = self._datafp
536 with func() as fp:
537 with func() as fp:
537 yield fp
538 yield fp
538
539
539 def tip(self):
540 def tip(self):
540 return self.node(len(self.index) - 1)
541 return self.node(len(self.index) - 1)
541 def __contains__(self, rev):
542 def __contains__(self, rev):
542 return 0 <= rev < len(self)
543 return 0 <= rev < len(self)
543 def __len__(self):
544 def __len__(self):
544 return len(self.index)
545 return len(self.index)
545 def __iter__(self):
546 def __iter__(self):
546 return iter(pycompat.xrange(len(self)))
547 return iter(pycompat.xrange(len(self)))
547 def revs(self, start=0, stop=None):
548 def revs(self, start=0, stop=None):
548 """iterate over all rev in this revlog (from start to stop)"""
549 """iterate over all rev in this revlog (from start to stop)"""
549 return storageutil.iterrevs(len(self), start=start, stop=stop)
550 return storageutil.iterrevs(len(self), start=start, stop=stop)
550
551
551 @util.propertycache
552 @util.propertycache
552 def nodemap(self):
553 def nodemap(self):
553 if self.index:
554 if self.index:
554 # populate mapping down to the initial node
555 # populate mapping down to the initial node
555 node0 = self.index[0][7] # get around changelog filtering
556 node0 = self.index[0][7] # get around changelog filtering
556 self.rev(node0)
557 self.rev(node0)
557 return self._nodecache
558 return self._nodecache
558
559
559 def hasnode(self, node):
560 def hasnode(self, node):
560 try:
561 try:
561 self.rev(node)
562 self.rev(node)
562 return True
563 return True
563 except KeyError:
564 except KeyError:
564 return False
565 return False
565
566
566 def candelta(self, baserev, rev):
567 def candelta(self, baserev, rev):
567 """whether two revisions (baserev, rev) can be delta-ed or not"""
568 """whether two revisions (baserev, rev) can be delta-ed or not"""
568 # Disable delta if either rev requires a content-changing flag
569 # Disable delta if either rev requires a content-changing flag
569 # processor (ex. LFS). This is because such flag processor can alter
570 # processor (ex. LFS). This is because such flag processor can alter
570 # the rawtext content that the delta will be based on, and two clients
571 # the rawtext content that the delta will be based on, and two clients
571 # could have a same revlog node with different flags (i.e. different
572 # could have a same revlog node with different flags (i.e. different
572 # rawtext contents) and the delta could be incompatible.
573 # rawtext contents) and the delta could be incompatible.
573 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
574 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
574 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
575 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
575 return False
576 return False
576 return True
577 return True
577
578
578 def clearcaches(self):
579 def clearcaches(self):
579 self._revisioncache = None
580 self._revisioncache = None
580 self._chainbasecache.clear()
581 self._chainbasecache.clear()
581 self._chunkcache = (0, '')
582 self._chunkcache = (0, '')
582 self._pcache = {}
583 self._pcache = {}
583
584
584 try:
585 try:
585 # If we are using the native C version, you are in a fun case
586 # If we are using the native C version, you are in a fun case
586 # where self.index, self.nodemap and self._nodecaches is the same
587 # where self.index, self.nodemap and self._nodecaches is the same
587 # object.
588 # object.
588 self._nodecache.clearcaches()
589 self._nodecache.clearcaches()
589 except AttributeError:
590 except AttributeError:
590 self._nodecache = {nullid: nullrev}
591 self._nodecache = {nullid: nullrev}
591 self._nodepos = None
592 self._nodepos = None
592
593
593 def rev(self, node):
594 def rev(self, node):
594 try:
595 try:
595 return self._nodecache[node]
596 return self._nodecache[node]
596 except TypeError:
597 except TypeError:
597 raise
598 raise
598 except error.RevlogError:
599 except error.RevlogError:
599 # parsers.c radix tree lookup failed
600 # parsers.c radix tree lookup failed
600 if node == wdirid or node in wdirfilenodeids:
601 if node == wdirid or node in wdirfilenodeids:
601 raise error.WdirUnsupported
602 raise error.WdirUnsupported
602 raise error.LookupError(node, self.indexfile, _('no node'))
603 raise error.LookupError(node, self.indexfile, _('no node'))
603 except KeyError:
604 except KeyError:
604 # pure python cache lookup failed
605 # pure python cache lookup failed
605 n = self._nodecache
606 n = self._nodecache
606 i = self.index
607 i = self.index
607 p = self._nodepos
608 p = self._nodepos
608 if p is None:
609 if p is None:
609 p = len(i) - 1
610 p = len(i) - 1
610 else:
611 else:
611 assert p < len(i)
612 assert p < len(i)
612 for r in pycompat.xrange(p, -1, -1):
613 for r in pycompat.xrange(p, -1, -1):
613 v = i[r][7]
614 v = i[r][7]
614 n[v] = r
615 n[v] = r
615 if v == node:
616 if v == node:
616 self._nodepos = r - 1
617 self._nodepos = r - 1
617 return r
618 return r
618 if node == wdirid or node in wdirfilenodeids:
619 if node == wdirid or node in wdirfilenodeids:
619 raise error.WdirUnsupported
620 raise error.WdirUnsupported
620 raise error.LookupError(node, self.indexfile, _('no node'))
621 raise error.LookupError(node, self.indexfile, _('no node'))
621
622
622 # Accessors for index entries.
623 # Accessors for index entries.
623
624
624 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
625 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
625 # are flags.
626 # are flags.
626 def start(self, rev):
627 def start(self, rev):
627 return int(self.index[rev][0] >> 16)
628 return int(self.index[rev][0] >> 16)
628
629
629 def flags(self, rev):
630 def flags(self, rev):
630 return self.index[rev][0] & 0xFFFF
631 return self.index[rev][0] & 0xFFFF
631
632
632 def length(self, rev):
633 def length(self, rev):
633 return self.index[rev][1]
634 return self.index[rev][1]
634
635
635 def rawsize(self, rev):
636 def rawsize(self, rev):
636 """return the length of the uncompressed text for a given revision"""
637 """return the length of the uncompressed text for a given revision"""
637 l = self.index[rev][2]
638 l = self.index[rev][2]
638 if l >= 0:
639 if l >= 0:
639 return l
640 return l
640
641
641 t = self.rawdata(rev)
642 t = self.rawdata(rev)
642 return len(t)
643 return len(t)
643
644
644 def size(self, rev):
645 def size(self, rev):
645 """length of non-raw text (processed by a "read" flag processor)"""
646 """length of non-raw text (processed by a "read" flag processor)"""
646 # fast path: if no "read" flag processor could change the content,
647 # fast path: if no "read" flag processor could change the content,
647 # size is rawsize. note: ELLIPSIS is known to not change the content.
648 # size is rawsize. note: ELLIPSIS is known to not change the content.
648 flags = self.flags(rev)
649 flags = self.flags(rev)
649 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
650 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
650 return self.rawsize(rev)
651 return self.rawsize(rev)
651
652
652 return len(self.revision(rev, raw=False))
653 return len(self.revision(rev, raw=False))
653
654
654 def chainbase(self, rev):
655 def chainbase(self, rev):
655 base = self._chainbasecache.get(rev)
656 base = self._chainbasecache.get(rev)
656 if base is not None:
657 if base is not None:
657 return base
658 return base
658
659
659 index = self.index
660 index = self.index
660 iterrev = rev
661 iterrev = rev
661 base = index[iterrev][3]
662 base = index[iterrev][3]
662 while base != iterrev:
663 while base != iterrev:
663 iterrev = base
664 iterrev = base
664 base = index[iterrev][3]
665 base = index[iterrev][3]
665
666
666 self._chainbasecache[rev] = base
667 self._chainbasecache[rev] = base
667 return base
668 return base
668
669
669 def linkrev(self, rev):
670 def linkrev(self, rev):
670 return self.index[rev][4]
671 return self.index[rev][4]
671
672
672 def parentrevs(self, rev):
673 def parentrevs(self, rev):
673 try:
674 try:
674 entry = self.index[rev]
675 entry = self.index[rev]
675 except IndexError:
676 except IndexError:
676 if rev == wdirrev:
677 if rev == wdirrev:
677 raise error.WdirUnsupported
678 raise error.WdirUnsupported
678 raise
679 raise
679
680
680 return entry[5], entry[6]
681 return entry[5], entry[6]
681
682
682 # fast parentrevs(rev) where rev isn't filtered
683 # fast parentrevs(rev) where rev isn't filtered
683 _uncheckedparentrevs = parentrevs
684 _uncheckedparentrevs = parentrevs
684
685
685 def node(self, rev):
686 def node(self, rev):
686 try:
687 try:
687 return self.index[rev][7]
688 return self.index[rev][7]
688 except IndexError:
689 except IndexError:
689 if rev == wdirrev:
690 if rev == wdirrev:
690 raise error.WdirUnsupported
691 raise error.WdirUnsupported
691 raise
692 raise
692
693
693 # Derived from index values.
694 # Derived from index values.
694
695
695 def end(self, rev):
696 def end(self, rev):
696 return self.start(rev) + self.length(rev)
697 return self.start(rev) + self.length(rev)
697
698
698 def parents(self, node):
699 def parents(self, node):
699 i = self.index
700 i = self.index
700 d = i[self.rev(node)]
701 d = i[self.rev(node)]
701 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
702 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
702
703
703 def chainlen(self, rev):
704 def chainlen(self, rev):
704 return self._chaininfo(rev)[0]
705 return self._chaininfo(rev)[0]
705
706
706 def _chaininfo(self, rev):
707 def _chaininfo(self, rev):
707 chaininfocache = self._chaininfocache
708 chaininfocache = self._chaininfocache
708 if rev in chaininfocache:
709 if rev in chaininfocache:
709 return chaininfocache[rev]
710 return chaininfocache[rev]
710 index = self.index
711 index = self.index
711 generaldelta = self._generaldelta
712 generaldelta = self._generaldelta
712 iterrev = rev
713 iterrev = rev
713 e = index[iterrev]
714 e = index[iterrev]
714 clen = 0
715 clen = 0
715 compresseddeltalen = 0
716 compresseddeltalen = 0
716 while iterrev != e[3]:
717 while iterrev != e[3]:
717 clen += 1
718 clen += 1
718 compresseddeltalen += e[1]
719 compresseddeltalen += e[1]
719 if generaldelta:
720 if generaldelta:
720 iterrev = e[3]
721 iterrev = e[3]
721 else:
722 else:
722 iterrev -= 1
723 iterrev -= 1
723 if iterrev in chaininfocache:
724 if iterrev in chaininfocache:
724 t = chaininfocache[iterrev]
725 t = chaininfocache[iterrev]
725 clen += t[0]
726 clen += t[0]
726 compresseddeltalen += t[1]
727 compresseddeltalen += t[1]
727 break
728 break
728 e = index[iterrev]
729 e = index[iterrev]
729 else:
730 else:
730 # Add text length of base since decompressing that also takes
731 # Add text length of base since decompressing that also takes
731 # work. For cache hits the length is already included.
732 # work. For cache hits the length is already included.
732 compresseddeltalen += e[1]
733 compresseddeltalen += e[1]
733 r = (clen, compresseddeltalen)
734 r = (clen, compresseddeltalen)
734 chaininfocache[rev] = r
735 chaininfocache[rev] = r
735 return r
736 return r
736
737
737 def _deltachain(self, rev, stoprev=None):
738 def _deltachain(self, rev, stoprev=None):
738 """Obtain the delta chain for a revision.
739 """Obtain the delta chain for a revision.
739
740
740 ``stoprev`` specifies a revision to stop at. If not specified, we
741 ``stoprev`` specifies a revision to stop at. If not specified, we
741 stop at the base of the chain.
742 stop at the base of the chain.
742
743
743 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
744 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
744 revs in ascending order and ``stopped`` is a bool indicating whether
745 revs in ascending order and ``stopped`` is a bool indicating whether
745 ``stoprev`` was hit.
746 ``stoprev`` was hit.
746 """
747 """
747 # Try C implementation.
748 # Try C implementation.
748 try:
749 try:
749 return self.index.deltachain(rev, stoprev, self._generaldelta)
750 return self.index.deltachain(rev, stoprev, self._generaldelta)
750 except AttributeError:
751 except AttributeError:
751 pass
752 pass
752
753
753 chain = []
754 chain = []
754
755
755 # Alias to prevent attribute lookup in tight loop.
756 # Alias to prevent attribute lookup in tight loop.
756 index = self.index
757 index = self.index
757 generaldelta = self._generaldelta
758 generaldelta = self._generaldelta
758
759
759 iterrev = rev
760 iterrev = rev
760 e = index[iterrev]
761 e = index[iterrev]
761 while iterrev != e[3] and iterrev != stoprev:
762 while iterrev != e[3] and iterrev != stoprev:
762 chain.append(iterrev)
763 chain.append(iterrev)
763 if generaldelta:
764 if generaldelta:
764 iterrev = e[3]
765 iterrev = e[3]
765 else:
766 else:
766 iterrev -= 1
767 iterrev -= 1
767 e = index[iterrev]
768 e = index[iterrev]
768
769
769 if iterrev == stoprev:
770 if iterrev == stoprev:
770 stopped = True
771 stopped = True
771 else:
772 else:
772 chain.append(iterrev)
773 chain.append(iterrev)
773 stopped = False
774 stopped = False
774
775
775 chain.reverse()
776 chain.reverse()
776 return chain, stopped
777 return chain, stopped
777
778
778 def ancestors(self, revs, stoprev=0, inclusive=False):
779 def ancestors(self, revs, stoprev=0, inclusive=False):
779 """Generate the ancestors of 'revs' in reverse revision order.
780 """Generate the ancestors of 'revs' in reverse revision order.
780 Does not generate revs lower than stoprev.
781 Does not generate revs lower than stoprev.
781
782
782 See the documentation for ancestor.lazyancestors for more details."""
783 See the documentation for ancestor.lazyancestors for more details."""
783
784
784 # first, make sure start revisions aren't filtered
785 # first, make sure start revisions aren't filtered
785 revs = list(revs)
786 revs = list(revs)
786 checkrev = self.node
787 checkrev = self.node
787 for r in revs:
788 for r in revs:
788 checkrev(r)
789 checkrev(r)
789 # and we're sure ancestors aren't filtered as well
790 # and we're sure ancestors aren't filtered as well
790
791
791 if rustancestor is not None:
792 if rustancestor is not None:
792 lazyancestors = rustancestor.LazyAncestors
793 lazyancestors = rustancestor.LazyAncestors
793 arg = self.index
794 arg = self.index
794 elif util.safehasattr(parsers, 'rustlazyancestors'):
795 elif util.safehasattr(parsers, 'rustlazyancestors'):
795 lazyancestors = ancestor.rustlazyancestors
796 lazyancestors = ancestor.rustlazyancestors
796 arg = self.index
797 arg = self.index
797 else:
798 else:
798 lazyancestors = ancestor.lazyancestors
799 lazyancestors = ancestor.lazyancestors
799 arg = self._uncheckedparentrevs
800 arg = self._uncheckedparentrevs
800 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
801 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
801
802
802 def descendants(self, revs):
803 def descendants(self, revs):
803 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
804 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
804
805
805 def findcommonmissing(self, common=None, heads=None):
806 def findcommonmissing(self, common=None, heads=None):
806 """Return a tuple of the ancestors of common and the ancestors of heads
807 """Return a tuple of the ancestors of common and the ancestors of heads
807 that are not ancestors of common. In revset terminology, we return the
808 that are not ancestors of common. In revset terminology, we return the
808 tuple:
809 tuple:
809
810
810 ::common, (::heads) - (::common)
811 ::common, (::heads) - (::common)
811
812
812 The list is sorted by revision number, meaning it is
813 The list is sorted by revision number, meaning it is
813 topologically sorted.
814 topologically sorted.
814
815
815 'heads' and 'common' are both lists of node IDs. If heads is
816 'heads' and 'common' are both lists of node IDs. If heads is
816 not supplied, uses all of the revlog's heads. If common is not
817 not supplied, uses all of the revlog's heads. If common is not
817 supplied, uses nullid."""
818 supplied, uses nullid."""
818 if common is None:
819 if common is None:
819 common = [nullid]
820 common = [nullid]
820 if heads is None:
821 if heads is None:
821 heads = self.heads()
822 heads = self.heads()
822
823
823 common = [self.rev(n) for n in common]
824 common = [self.rev(n) for n in common]
824 heads = [self.rev(n) for n in heads]
825 heads = [self.rev(n) for n in heads]
825
826
826 # we want the ancestors, but inclusive
827 # we want the ancestors, but inclusive
827 class lazyset(object):
828 class lazyset(object):
828 def __init__(self, lazyvalues):
829 def __init__(self, lazyvalues):
829 self.addedvalues = set()
830 self.addedvalues = set()
830 self.lazyvalues = lazyvalues
831 self.lazyvalues = lazyvalues
831
832
832 def __contains__(self, value):
833 def __contains__(self, value):
833 return value in self.addedvalues or value in self.lazyvalues
834 return value in self.addedvalues or value in self.lazyvalues
834
835
835 def __iter__(self):
836 def __iter__(self):
836 added = self.addedvalues
837 added = self.addedvalues
837 for r in added:
838 for r in added:
838 yield r
839 yield r
839 for r in self.lazyvalues:
840 for r in self.lazyvalues:
840 if not r in added:
841 if not r in added:
841 yield r
842 yield r
842
843
843 def add(self, value):
844 def add(self, value):
844 self.addedvalues.add(value)
845 self.addedvalues.add(value)
845
846
846 def update(self, values):
847 def update(self, values):
847 self.addedvalues.update(values)
848 self.addedvalues.update(values)
848
849
849 has = lazyset(self.ancestors(common))
850 has = lazyset(self.ancestors(common))
850 has.add(nullrev)
851 has.add(nullrev)
851 has.update(common)
852 has.update(common)
852
853
853 # take all ancestors from heads that aren't in has
854 # take all ancestors from heads that aren't in has
854 missing = set()
855 missing = set()
855 visit = collections.deque(r for r in heads if r not in has)
856 visit = collections.deque(r for r in heads if r not in has)
856 while visit:
857 while visit:
857 r = visit.popleft()
858 r = visit.popleft()
858 if r in missing:
859 if r in missing:
859 continue
860 continue
860 else:
861 else:
861 missing.add(r)
862 missing.add(r)
862 for p in self.parentrevs(r):
863 for p in self.parentrevs(r):
863 if p not in has:
864 if p not in has:
864 visit.append(p)
865 visit.append(p)
865 missing = list(missing)
866 missing = list(missing)
866 missing.sort()
867 missing.sort()
867 return has, [self.node(miss) for miss in missing]
868 return has, [self.node(miss) for miss in missing]
868
869
869 def incrementalmissingrevs(self, common=None):
870 def incrementalmissingrevs(self, common=None):
870 """Return an object that can be used to incrementally compute the
871 """Return an object that can be used to incrementally compute the
871 revision numbers of the ancestors of arbitrary sets that are not
872 revision numbers of the ancestors of arbitrary sets that are not
872 ancestors of common. This is an ancestor.incrementalmissingancestors
873 ancestors of common. This is an ancestor.incrementalmissingancestors
873 object.
874 object.
874
875
875 'common' is a list of revision numbers. If common is not supplied, uses
876 'common' is a list of revision numbers. If common is not supplied, uses
876 nullrev.
877 nullrev.
877 """
878 """
878 if common is None:
879 if common is None:
879 common = [nullrev]
880 common = [nullrev]
880
881
881 if rustancestor is not None:
882 if rustancestor is not None:
882 return rustancestor.MissingAncestors(self.index, common)
883 return rustancestor.MissingAncestors(self.index, common)
883 return ancestor.incrementalmissingancestors(self.parentrevs, common)
884 return ancestor.incrementalmissingancestors(self.parentrevs, common)
884
885
885 def findmissingrevs(self, common=None, heads=None):
886 def findmissingrevs(self, common=None, heads=None):
886 """Return the revision numbers of the ancestors of heads that
887 """Return the revision numbers of the ancestors of heads that
887 are not ancestors of common.
888 are not ancestors of common.
888
889
889 More specifically, return a list of revision numbers corresponding to
890 More specifically, return a list of revision numbers corresponding to
890 nodes N such that every N satisfies the following constraints:
891 nodes N such that every N satisfies the following constraints:
891
892
892 1. N is an ancestor of some node in 'heads'
893 1. N is an ancestor of some node in 'heads'
893 2. N is not an ancestor of any node in 'common'
894 2. N is not an ancestor of any node in 'common'
894
895
895 The list is sorted by revision number, meaning it is
896 The list is sorted by revision number, meaning it is
896 topologically sorted.
897 topologically sorted.
897
898
898 'heads' and 'common' are both lists of revision numbers. If heads is
899 'heads' and 'common' are both lists of revision numbers. If heads is
899 not supplied, uses all of the revlog's heads. If common is not
900 not supplied, uses all of the revlog's heads. If common is not
900 supplied, uses nullid."""
901 supplied, uses nullid."""
901 if common is None:
902 if common is None:
902 common = [nullrev]
903 common = [nullrev]
903 if heads is None:
904 if heads is None:
904 heads = self.headrevs()
905 heads = self.headrevs()
905
906
906 inc = self.incrementalmissingrevs(common=common)
907 inc = self.incrementalmissingrevs(common=common)
907 return inc.missingancestors(heads)
908 return inc.missingancestors(heads)
908
909
909 def findmissing(self, common=None, heads=None):
910 def findmissing(self, common=None, heads=None):
910 """Return the ancestors of heads that are not ancestors of common.
911 """Return the ancestors of heads that are not ancestors of common.
911
912
912 More specifically, return a list of nodes N such that every N
913 More specifically, return a list of nodes N such that every N
913 satisfies the following constraints:
914 satisfies the following constraints:
914
915
915 1. N is an ancestor of some node in 'heads'
916 1. N is an ancestor of some node in 'heads'
916 2. N is not an ancestor of any node in 'common'
917 2. N is not an ancestor of any node in 'common'
917
918
918 The list is sorted by revision number, meaning it is
919 The list is sorted by revision number, meaning it is
919 topologically sorted.
920 topologically sorted.
920
921
921 'heads' and 'common' are both lists of node IDs. If heads is
922 'heads' and 'common' are both lists of node IDs. If heads is
922 not supplied, uses all of the revlog's heads. If common is not
923 not supplied, uses all of the revlog's heads. If common is not
923 supplied, uses nullid."""
924 supplied, uses nullid."""
924 if common is None:
925 if common is None:
925 common = [nullid]
926 common = [nullid]
926 if heads is None:
927 if heads is None:
927 heads = self.heads()
928 heads = self.heads()
928
929
929 common = [self.rev(n) for n in common]
930 common = [self.rev(n) for n in common]
930 heads = [self.rev(n) for n in heads]
931 heads = [self.rev(n) for n in heads]
931
932
932 inc = self.incrementalmissingrevs(common=common)
933 inc = self.incrementalmissingrevs(common=common)
933 return [self.node(r) for r in inc.missingancestors(heads)]
934 return [self.node(r) for r in inc.missingancestors(heads)]
934
935
935 def nodesbetween(self, roots=None, heads=None):
936 def nodesbetween(self, roots=None, heads=None):
936 """Return a topological path from 'roots' to 'heads'.
937 """Return a topological path from 'roots' to 'heads'.
937
938
938 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
939 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
939 topologically sorted list of all nodes N that satisfy both of
940 topologically sorted list of all nodes N that satisfy both of
940 these constraints:
941 these constraints:
941
942
942 1. N is a descendant of some node in 'roots'
943 1. N is a descendant of some node in 'roots'
943 2. N is an ancestor of some node in 'heads'
944 2. N is an ancestor of some node in 'heads'
944
945
945 Every node is considered to be both a descendant and an ancestor
946 Every node is considered to be both a descendant and an ancestor
946 of itself, so every reachable node in 'roots' and 'heads' will be
947 of itself, so every reachable node in 'roots' and 'heads' will be
947 included in 'nodes'.
948 included in 'nodes'.
948
949
949 'outroots' is the list of reachable nodes in 'roots', i.e., the
950 'outroots' is the list of reachable nodes in 'roots', i.e., the
950 subset of 'roots' that is returned in 'nodes'. Likewise,
951 subset of 'roots' that is returned in 'nodes'. Likewise,
951 'outheads' is the subset of 'heads' that is also in 'nodes'.
952 'outheads' is the subset of 'heads' that is also in 'nodes'.
952
953
953 'roots' and 'heads' are both lists of node IDs. If 'roots' is
954 'roots' and 'heads' are both lists of node IDs. If 'roots' is
954 unspecified, uses nullid as the only root. If 'heads' is
955 unspecified, uses nullid as the only root. If 'heads' is
955 unspecified, uses list of all of the revlog's heads."""
956 unspecified, uses list of all of the revlog's heads."""
956 nonodes = ([], [], [])
957 nonodes = ([], [], [])
957 if roots is not None:
958 if roots is not None:
958 roots = list(roots)
959 roots = list(roots)
959 if not roots:
960 if not roots:
960 return nonodes
961 return nonodes
961 lowestrev = min([self.rev(n) for n in roots])
962 lowestrev = min([self.rev(n) for n in roots])
962 else:
963 else:
963 roots = [nullid] # Everybody's a descendant of nullid
964 roots = [nullid] # Everybody's a descendant of nullid
964 lowestrev = nullrev
965 lowestrev = nullrev
965 if (lowestrev == nullrev) and (heads is None):
966 if (lowestrev == nullrev) and (heads is None):
966 # We want _all_ the nodes!
967 # We want _all_ the nodes!
967 return ([self.node(r) for r in self], [nullid], list(self.heads()))
968 return ([self.node(r) for r in self], [nullid], list(self.heads()))
968 if heads is None:
969 if heads is None:
969 # All nodes are ancestors, so the latest ancestor is the last
970 # All nodes are ancestors, so the latest ancestor is the last
970 # node.
971 # node.
971 highestrev = len(self) - 1
972 highestrev = len(self) - 1
972 # Set ancestors to None to signal that every node is an ancestor.
973 # Set ancestors to None to signal that every node is an ancestor.
973 ancestors = None
974 ancestors = None
974 # Set heads to an empty dictionary for later discovery of heads
975 # Set heads to an empty dictionary for later discovery of heads
975 heads = {}
976 heads = {}
976 else:
977 else:
977 heads = list(heads)
978 heads = list(heads)
978 if not heads:
979 if not heads:
979 return nonodes
980 return nonodes
980 ancestors = set()
981 ancestors = set()
981 # Turn heads into a dictionary so we can remove 'fake' heads.
982 # Turn heads into a dictionary so we can remove 'fake' heads.
982 # Also, later we will be using it to filter out the heads we can't
983 # Also, later we will be using it to filter out the heads we can't
983 # find from roots.
984 # find from roots.
984 heads = dict.fromkeys(heads, False)
985 heads = dict.fromkeys(heads, False)
985 # Start at the top and keep marking parents until we're done.
986 # Start at the top and keep marking parents until we're done.
986 nodestotag = set(heads)
987 nodestotag = set(heads)
987 # Remember where the top was so we can use it as a limit later.
988 # Remember where the top was so we can use it as a limit later.
988 highestrev = max([self.rev(n) for n in nodestotag])
989 highestrev = max([self.rev(n) for n in nodestotag])
989 while nodestotag:
990 while nodestotag:
990 # grab a node to tag
991 # grab a node to tag
991 n = nodestotag.pop()
992 n = nodestotag.pop()
992 # Never tag nullid
993 # Never tag nullid
993 if n == nullid:
994 if n == nullid:
994 continue
995 continue
995 # A node's revision number represents its place in a
996 # A node's revision number represents its place in a
996 # topologically sorted list of nodes.
997 # topologically sorted list of nodes.
997 r = self.rev(n)
998 r = self.rev(n)
998 if r >= lowestrev:
999 if r >= lowestrev:
999 if n not in ancestors:
1000 if n not in ancestors:
1000 # If we are possibly a descendant of one of the roots
1001 # If we are possibly a descendant of one of the roots
1001 # and we haven't already been marked as an ancestor
1002 # and we haven't already been marked as an ancestor
1002 ancestors.add(n) # Mark as ancestor
1003 ancestors.add(n) # Mark as ancestor
1003 # Add non-nullid parents to list of nodes to tag.
1004 # Add non-nullid parents to list of nodes to tag.
1004 nodestotag.update([p for p in self.parents(n) if
1005 nodestotag.update([p for p in self.parents(n) if
1005 p != nullid])
1006 p != nullid])
1006 elif n in heads: # We've seen it before, is it a fake head?
1007 elif n in heads: # We've seen it before, is it a fake head?
1007 # So it is, real heads should not be the ancestors of
1008 # So it is, real heads should not be the ancestors of
1008 # any other heads.
1009 # any other heads.
1009 heads.pop(n)
1010 heads.pop(n)
1010 if not ancestors:
1011 if not ancestors:
1011 return nonodes
1012 return nonodes
1012 # Now that we have our set of ancestors, we want to remove any
1013 # Now that we have our set of ancestors, we want to remove any
1013 # roots that are not ancestors.
1014 # roots that are not ancestors.
1014
1015
1015 # If one of the roots was nullid, everything is included anyway.
1016 # If one of the roots was nullid, everything is included anyway.
1016 if lowestrev > nullrev:
1017 if lowestrev > nullrev:
1017 # But, since we weren't, let's recompute the lowest rev to not
1018 # But, since we weren't, let's recompute the lowest rev to not
1018 # include roots that aren't ancestors.
1019 # include roots that aren't ancestors.
1019
1020
1020 # Filter out roots that aren't ancestors of heads
1021 # Filter out roots that aren't ancestors of heads
1021 roots = [root for root in roots if root in ancestors]
1022 roots = [root for root in roots if root in ancestors]
1022 # Recompute the lowest revision
1023 # Recompute the lowest revision
1023 if roots:
1024 if roots:
1024 lowestrev = min([self.rev(root) for root in roots])
1025 lowestrev = min([self.rev(root) for root in roots])
1025 else:
1026 else:
1026 # No more roots? Return empty list
1027 # No more roots? Return empty list
1027 return nonodes
1028 return nonodes
1028 else:
1029 else:
1029 # We are descending from nullid, and don't need to care about
1030 # We are descending from nullid, and don't need to care about
1030 # any other roots.
1031 # any other roots.
1031 lowestrev = nullrev
1032 lowestrev = nullrev
1032 roots = [nullid]
1033 roots = [nullid]
1033 # Transform our roots list into a set.
1034 # Transform our roots list into a set.
1034 descendants = set(roots)
1035 descendants = set(roots)
1035 # Also, keep the original roots so we can filter out roots that aren't
1036 # Also, keep the original roots so we can filter out roots that aren't
1036 # 'real' roots (i.e. are descended from other roots).
1037 # 'real' roots (i.e. are descended from other roots).
1037 roots = descendants.copy()
1038 roots = descendants.copy()
1038 # Our topologically sorted list of output nodes.
1039 # Our topologically sorted list of output nodes.
1039 orderedout = []
1040 orderedout = []
1040 # Don't start at nullid since we don't want nullid in our output list,
1041 # Don't start at nullid since we don't want nullid in our output list,
1041 # and if nullid shows up in descendants, empty parents will look like
1042 # and if nullid shows up in descendants, empty parents will look like
1042 # they're descendants.
1043 # they're descendants.
1043 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1044 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1044 n = self.node(r)
1045 n = self.node(r)
1045 isdescendant = False
1046 isdescendant = False
1046 if lowestrev == nullrev: # Everybody is a descendant of nullid
1047 if lowestrev == nullrev: # Everybody is a descendant of nullid
1047 isdescendant = True
1048 isdescendant = True
1048 elif n in descendants:
1049 elif n in descendants:
1049 # n is already a descendant
1050 # n is already a descendant
1050 isdescendant = True
1051 isdescendant = True
1051 # This check only needs to be done here because all the roots
1052 # This check only needs to be done here because all the roots
1052 # will start being marked is descendants before the loop.
1053 # will start being marked is descendants before the loop.
1053 if n in roots:
1054 if n in roots:
1054 # If n was a root, check if it's a 'real' root.
1055 # If n was a root, check if it's a 'real' root.
1055 p = tuple(self.parents(n))
1056 p = tuple(self.parents(n))
1056 # If any of its parents are descendants, it's not a root.
1057 # If any of its parents are descendants, it's not a root.
1057 if (p[0] in descendants) or (p[1] in descendants):
1058 if (p[0] in descendants) or (p[1] in descendants):
1058 roots.remove(n)
1059 roots.remove(n)
1059 else:
1060 else:
1060 p = tuple(self.parents(n))
1061 p = tuple(self.parents(n))
1061 # A node is a descendant if either of its parents are
1062 # A node is a descendant if either of its parents are
1062 # descendants. (We seeded the dependents list with the roots
1063 # descendants. (We seeded the dependents list with the roots
1063 # up there, remember?)
1064 # up there, remember?)
1064 if (p[0] in descendants) or (p[1] in descendants):
1065 if (p[0] in descendants) or (p[1] in descendants):
1065 descendants.add(n)
1066 descendants.add(n)
1066 isdescendant = True
1067 isdescendant = True
1067 if isdescendant and ((ancestors is None) or (n in ancestors)):
1068 if isdescendant and ((ancestors is None) or (n in ancestors)):
1068 # Only include nodes that are both descendants and ancestors.
1069 # Only include nodes that are both descendants and ancestors.
1069 orderedout.append(n)
1070 orderedout.append(n)
1070 if (ancestors is not None) and (n in heads):
1071 if (ancestors is not None) and (n in heads):
1071 # We're trying to figure out which heads are reachable
1072 # We're trying to figure out which heads are reachable
1072 # from roots.
1073 # from roots.
1073 # Mark this head as having been reached
1074 # Mark this head as having been reached
1074 heads[n] = True
1075 heads[n] = True
1075 elif ancestors is None:
1076 elif ancestors is None:
1076 # Otherwise, we're trying to discover the heads.
1077 # Otherwise, we're trying to discover the heads.
1077 # Assume this is a head because if it isn't, the next step
1078 # Assume this is a head because if it isn't, the next step
1078 # will eventually remove it.
1079 # will eventually remove it.
1079 heads[n] = True
1080 heads[n] = True
1080 # But, obviously its parents aren't.
1081 # But, obviously its parents aren't.
1081 for p in self.parents(n):
1082 for p in self.parents(n):
1082 heads.pop(p, None)
1083 heads.pop(p, None)
1083 heads = [head for head, flag in heads.iteritems() if flag]
1084 heads = [head for head, flag in heads.iteritems() if flag]
1084 roots = list(roots)
1085 roots = list(roots)
1085 assert orderedout
1086 assert orderedout
1086 assert roots
1087 assert roots
1087 assert heads
1088 assert heads
1088 return (orderedout, roots, heads)
1089 return (orderedout, roots, heads)
1089
1090
1090 def headrevs(self, revs=None):
1091 def headrevs(self, revs=None):
1091 if revs is None:
1092 if revs is None:
1092 try:
1093 try:
1093 return self.index.headrevs()
1094 return self.index.headrevs()
1094 except AttributeError:
1095 except AttributeError:
1095 return self._headrevs()
1096 return self._headrevs()
1096 if rustdagop is not None:
1097 if rustdagop is not None:
1097 return rustdagop.headrevs(self.index, revs)
1098 return rustdagop.headrevs(self.index, revs)
1098 return dagop.headrevs(revs, self._uncheckedparentrevs)
1099 return dagop.headrevs(revs, self._uncheckedparentrevs)
1099
1100
1100 def computephases(self, roots):
1101 def computephases(self, roots):
1101 return self.index.computephasesmapsets(roots)
1102 return self.index.computephasesmapsets(roots)
1102
1103
1103 def _headrevs(self):
1104 def _headrevs(self):
1104 count = len(self)
1105 count = len(self)
1105 if not count:
1106 if not count:
1106 return [nullrev]
1107 return [nullrev]
1107 # we won't iter over filtered rev so nobody is a head at start
1108 # we won't iter over filtered rev so nobody is a head at start
1108 ishead = [0] * (count + 1)
1109 ishead = [0] * (count + 1)
1109 index = self.index
1110 index = self.index
1110 for r in self:
1111 for r in self:
1111 ishead[r] = 1 # I may be an head
1112 ishead[r] = 1 # I may be an head
1112 e = index[r]
1113 e = index[r]
1113 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1114 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1114 return [r for r, val in enumerate(ishead) if val]
1115 return [r for r, val in enumerate(ishead) if val]
1115
1116
1116 def heads(self, start=None, stop=None):
1117 def heads(self, start=None, stop=None):
1117 """return the list of all nodes that have no children
1118 """return the list of all nodes that have no children
1118
1119
1119 if start is specified, only heads that are descendants of
1120 if start is specified, only heads that are descendants of
1120 start will be returned
1121 start will be returned
1121 if stop is specified, it will consider all the revs from stop
1122 if stop is specified, it will consider all the revs from stop
1122 as if they had no children
1123 as if they had no children
1123 """
1124 """
1124 if start is None and stop is None:
1125 if start is None and stop is None:
1125 if not len(self):
1126 if not len(self):
1126 return [nullid]
1127 return [nullid]
1127 return [self.node(r) for r in self.headrevs()]
1128 return [self.node(r) for r in self.headrevs()]
1128
1129
1129 if start is None:
1130 if start is None:
1130 start = nullrev
1131 start = nullrev
1131 else:
1132 else:
1132 start = self.rev(start)
1133 start = self.rev(start)
1133
1134
1134 stoprevs = set(self.rev(n) for n in stop or [])
1135 stoprevs = set(self.rev(n) for n in stop or [])
1135
1136
1136 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1137 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1137 stoprevs=stoprevs)
1138 stoprevs=stoprevs)
1138
1139
1139 return [self.node(rev) for rev in revs]
1140 return [self.node(rev) for rev in revs]
1140
1141
1141 def children(self, node):
1142 def children(self, node):
1142 """find the children of a given node"""
1143 """find the children of a given node"""
1143 c = []
1144 c = []
1144 p = self.rev(node)
1145 p = self.rev(node)
1145 for r in self.revs(start=p + 1):
1146 for r in self.revs(start=p + 1):
1146 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1147 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1147 if prevs:
1148 if prevs:
1148 for pr in prevs:
1149 for pr in prevs:
1149 if pr == p:
1150 if pr == p:
1150 c.append(self.node(r))
1151 c.append(self.node(r))
1151 elif p == nullrev:
1152 elif p == nullrev:
1152 c.append(self.node(r))
1153 c.append(self.node(r))
1153 return c
1154 return c
1154
1155
1155 def commonancestorsheads(self, a, b):
1156 def commonancestorsheads(self, a, b):
1156 """calculate all the heads of the common ancestors of nodes a and b"""
1157 """calculate all the heads of the common ancestors of nodes a and b"""
1157 a, b = self.rev(a), self.rev(b)
1158 a, b = self.rev(a), self.rev(b)
1158 ancs = self._commonancestorsheads(a, b)
1159 ancs = self._commonancestorsheads(a, b)
1159 return pycompat.maplist(self.node, ancs)
1160 return pycompat.maplist(self.node, ancs)
1160
1161
1161 def _commonancestorsheads(self, *revs):
1162 def _commonancestorsheads(self, *revs):
1162 """calculate all the heads of the common ancestors of revs"""
1163 """calculate all the heads of the common ancestors of revs"""
1163 try:
1164 try:
1164 ancs = self.index.commonancestorsheads(*revs)
1165 ancs = self.index.commonancestorsheads(*revs)
1165 except (AttributeError, OverflowError): # C implementation failed
1166 except (AttributeError, OverflowError): # C implementation failed
1166 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1167 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1167 return ancs
1168 return ancs
1168
1169
1169 def isancestor(self, a, b):
1170 def isancestor(self, a, b):
1170 """return True if node a is an ancestor of node b
1171 """return True if node a is an ancestor of node b
1171
1172
1172 A revision is considered an ancestor of itself."""
1173 A revision is considered an ancestor of itself."""
1173 a, b = self.rev(a), self.rev(b)
1174 a, b = self.rev(a), self.rev(b)
1174 return self.isancestorrev(a, b)
1175 return self.isancestorrev(a, b)
1175
1176
1176 def isancestorrev(self, a, b):
1177 def isancestorrev(self, a, b):
1177 """return True if revision a is an ancestor of revision b
1178 """return True if revision a is an ancestor of revision b
1178
1179
1179 A revision is considered an ancestor of itself.
1180 A revision is considered an ancestor of itself.
1180
1181
1181 The implementation of this is trivial but the use of
1182 The implementation of this is trivial but the use of
1182 reachableroots is not."""
1183 reachableroots is not."""
1183 if a == nullrev:
1184 if a == nullrev:
1184 return True
1185 return True
1185 elif a == b:
1186 elif a == b:
1186 return True
1187 return True
1187 elif a > b:
1188 elif a > b:
1188 return False
1189 return False
1189 return bool(self.reachableroots(a, [b], [a], includepath=False))
1190 return bool(self.reachableroots(a, [b], [a], includepath=False))
1190
1191
1191 def reachableroots(self, minroot, heads, roots, includepath=False):
1192 def reachableroots(self, minroot, heads, roots, includepath=False):
1192 """return (heads(::<roots> and <roots>::<heads>))
1193 """return (heads(::<roots> and <roots>::<heads>))
1193
1194
1194 If includepath is True, return (<roots>::<heads>)."""
1195 If includepath is True, return (<roots>::<heads>)."""
1195 try:
1196 try:
1196 return self.index.reachableroots2(minroot, heads, roots,
1197 return self.index.reachableroots2(minroot, heads, roots,
1197 includepath)
1198 includepath)
1198 except AttributeError:
1199 except AttributeError:
1199 return dagop._reachablerootspure(self.parentrevs,
1200 return dagop._reachablerootspure(self.parentrevs,
1200 minroot, roots, heads, includepath)
1201 minroot, roots, heads, includepath)
1201
1202
1202 def ancestor(self, a, b):
1203 def ancestor(self, a, b):
1203 """calculate the "best" common ancestor of nodes a and b"""
1204 """calculate the "best" common ancestor of nodes a and b"""
1204
1205
1205 a, b = self.rev(a), self.rev(b)
1206 a, b = self.rev(a), self.rev(b)
1206 try:
1207 try:
1207 ancs = self.index.ancestors(a, b)
1208 ancs = self.index.ancestors(a, b)
1208 except (AttributeError, OverflowError):
1209 except (AttributeError, OverflowError):
1209 ancs = ancestor.ancestors(self.parentrevs, a, b)
1210 ancs = ancestor.ancestors(self.parentrevs, a, b)
1210 if ancs:
1211 if ancs:
1211 # choose a consistent winner when there's a tie
1212 # choose a consistent winner when there's a tie
1212 return min(map(self.node, ancs))
1213 return min(map(self.node, ancs))
1213 return nullid
1214 return nullid
1214
1215
1215 def _match(self, id):
1216 def _match(self, id):
1216 if isinstance(id, int):
1217 if isinstance(id, int):
1217 # rev
1218 # rev
1218 return self.node(id)
1219 return self.node(id)
1219 if len(id) == 20:
1220 if len(id) == 20:
1220 # possibly a binary node
1221 # possibly a binary node
1221 # odds of a binary node being all hex in ASCII are 1 in 10**25
1222 # odds of a binary node being all hex in ASCII are 1 in 10**25
1222 try:
1223 try:
1223 node = id
1224 node = id
1224 self.rev(node) # quick search the index
1225 self.rev(node) # quick search the index
1225 return node
1226 return node
1226 except error.LookupError:
1227 except error.LookupError:
1227 pass # may be partial hex id
1228 pass # may be partial hex id
1228 try:
1229 try:
1229 # str(rev)
1230 # str(rev)
1230 rev = int(id)
1231 rev = int(id)
1231 if "%d" % rev != id:
1232 if "%d" % rev != id:
1232 raise ValueError
1233 raise ValueError
1233 if rev < 0:
1234 if rev < 0:
1234 rev = len(self) + rev
1235 rev = len(self) + rev
1235 if rev < 0 or rev >= len(self):
1236 if rev < 0 or rev >= len(self):
1236 raise ValueError
1237 raise ValueError
1237 return self.node(rev)
1238 return self.node(rev)
1238 except (ValueError, OverflowError):
1239 except (ValueError, OverflowError):
1239 pass
1240 pass
1240 if len(id) == 40:
1241 if len(id) == 40:
1241 try:
1242 try:
1242 # a full hex nodeid?
1243 # a full hex nodeid?
1243 node = bin(id)
1244 node = bin(id)
1244 self.rev(node)
1245 self.rev(node)
1245 return node
1246 return node
1246 except (TypeError, error.LookupError):
1247 except (TypeError, error.LookupError):
1247 pass
1248 pass
1248
1249
1249 def _partialmatch(self, id):
1250 def _partialmatch(self, id):
1250 # we don't care wdirfilenodeids as they should be always full hash
1251 # we don't care wdirfilenodeids as they should be always full hash
1251 maybewdir = wdirhex.startswith(id)
1252 maybewdir = wdirhex.startswith(id)
1252 try:
1253 try:
1253 partial = self.index.partialmatch(id)
1254 partial = self.index.partialmatch(id)
1254 if partial and self.hasnode(partial):
1255 if partial and self.hasnode(partial):
1255 if maybewdir:
1256 if maybewdir:
1256 # single 'ff...' match in radix tree, ambiguous with wdir
1257 # single 'ff...' match in radix tree, ambiguous with wdir
1257 raise error.RevlogError
1258 raise error.RevlogError
1258 return partial
1259 return partial
1259 if maybewdir:
1260 if maybewdir:
1260 # no 'ff...' match in radix tree, wdir identified
1261 # no 'ff...' match in radix tree, wdir identified
1261 raise error.WdirUnsupported
1262 raise error.WdirUnsupported
1262 return None
1263 return None
1263 except error.RevlogError:
1264 except error.RevlogError:
1264 # parsers.c radix tree lookup gave multiple matches
1265 # parsers.c radix tree lookup gave multiple matches
1265 # fast path: for unfiltered changelog, radix tree is accurate
1266 # fast path: for unfiltered changelog, radix tree is accurate
1266 if not getattr(self, 'filteredrevs', None):
1267 if not getattr(self, 'filteredrevs', None):
1267 raise error.AmbiguousPrefixLookupError(
1268 raise error.AmbiguousPrefixLookupError(
1268 id, self.indexfile, _('ambiguous identifier'))
1269 id, self.indexfile, _('ambiguous identifier'))
1269 # fall through to slow path that filters hidden revisions
1270 # fall through to slow path that filters hidden revisions
1270 except (AttributeError, ValueError):
1271 except (AttributeError, ValueError):
1271 # we are pure python, or key was too short to search radix tree
1272 # we are pure python, or key was too short to search radix tree
1272 pass
1273 pass
1273
1274
1274 if id in self._pcache:
1275 if id in self._pcache:
1275 return self._pcache[id]
1276 return self._pcache[id]
1276
1277
1277 if len(id) <= 40:
1278 if len(id) <= 40:
1278 try:
1279 try:
1279 # hex(node)[:...]
1280 # hex(node)[:...]
1280 l = len(id) // 2 # grab an even number of digits
1281 l = len(id) // 2 # grab an even number of digits
1281 prefix = bin(id[:l * 2])
1282 prefix = bin(id[:l * 2])
1282 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1283 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1283 nl = [n for n in nl if hex(n).startswith(id) and
1284 nl = [n for n in nl if hex(n).startswith(id) and
1284 self.hasnode(n)]
1285 self.hasnode(n)]
1285 if nullhex.startswith(id):
1286 if nullhex.startswith(id):
1286 nl.append(nullid)
1287 nl.append(nullid)
1287 if len(nl) > 0:
1288 if len(nl) > 0:
1288 if len(nl) == 1 and not maybewdir:
1289 if len(nl) == 1 and not maybewdir:
1289 self._pcache[id] = nl[0]
1290 self._pcache[id] = nl[0]
1290 return nl[0]
1291 return nl[0]
1291 raise error.AmbiguousPrefixLookupError(
1292 raise error.AmbiguousPrefixLookupError(
1292 id, self.indexfile, _('ambiguous identifier'))
1293 id, self.indexfile, _('ambiguous identifier'))
1293 if maybewdir:
1294 if maybewdir:
1294 raise error.WdirUnsupported
1295 raise error.WdirUnsupported
1295 return None
1296 return None
1296 except TypeError:
1297 except TypeError:
1297 pass
1298 pass
1298
1299
1299 def lookup(self, id):
1300 def lookup(self, id):
1300 """locate a node based on:
1301 """locate a node based on:
1301 - revision number or str(revision number)
1302 - revision number or str(revision number)
1302 - nodeid or subset of hex nodeid
1303 - nodeid or subset of hex nodeid
1303 """
1304 """
1304 n = self._match(id)
1305 n = self._match(id)
1305 if n is not None:
1306 if n is not None:
1306 return n
1307 return n
1307 n = self._partialmatch(id)
1308 n = self._partialmatch(id)
1308 if n:
1309 if n:
1309 return n
1310 return n
1310
1311
1311 raise error.LookupError(id, self.indexfile, _('no match found'))
1312 raise error.LookupError(id, self.indexfile, _('no match found'))
1312
1313
1313 def shortest(self, node, minlength=1):
1314 def shortest(self, node, minlength=1):
1314 """Find the shortest unambiguous prefix that matches node."""
1315 """Find the shortest unambiguous prefix that matches node."""
1315 def isvalid(prefix):
1316 def isvalid(prefix):
1316 try:
1317 try:
1317 matchednode = self._partialmatch(prefix)
1318 matchednode = self._partialmatch(prefix)
1318 except error.AmbiguousPrefixLookupError:
1319 except error.AmbiguousPrefixLookupError:
1319 return False
1320 return False
1320 except error.WdirUnsupported:
1321 except error.WdirUnsupported:
1321 # single 'ff...' match
1322 # single 'ff...' match
1322 return True
1323 return True
1323 if matchednode is None:
1324 if matchednode is None:
1324 raise error.LookupError(node, self.indexfile, _('no node'))
1325 raise error.LookupError(node, self.indexfile, _('no node'))
1325 return True
1326 return True
1326
1327
1327 def maybewdir(prefix):
1328 def maybewdir(prefix):
1328 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1329 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1329
1330
1330 hexnode = hex(node)
1331 hexnode = hex(node)
1331
1332
1332 def disambiguate(hexnode, minlength):
1333 def disambiguate(hexnode, minlength):
1333 """Disambiguate against wdirid."""
1334 """Disambiguate against wdirid."""
1334 for length in range(minlength, 41):
1335 for length in range(minlength, 41):
1335 prefix = hexnode[:length]
1336 prefix = hexnode[:length]
1336 if not maybewdir(prefix):
1337 if not maybewdir(prefix):
1337 return prefix
1338 return prefix
1338
1339
1339 if not getattr(self, 'filteredrevs', None):
1340 if not getattr(self, 'filteredrevs', None):
1340 try:
1341 try:
1341 length = max(self.index.shortest(node), minlength)
1342 length = max(self.index.shortest(node), minlength)
1342 return disambiguate(hexnode, length)
1343 return disambiguate(hexnode, length)
1343 except error.RevlogError:
1344 except error.RevlogError:
1344 if node != wdirid:
1345 if node != wdirid:
1345 raise error.LookupError(node, self.indexfile, _('no node'))
1346 raise error.LookupError(node, self.indexfile, _('no node'))
1346 except AttributeError:
1347 except AttributeError:
1347 # Fall through to pure code
1348 # Fall through to pure code
1348 pass
1349 pass
1349
1350
1350 if node == wdirid:
1351 if node == wdirid:
1351 for length in range(minlength, 41):
1352 for length in range(minlength, 41):
1352 prefix = hexnode[:length]
1353 prefix = hexnode[:length]
1353 if isvalid(prefix):
1354 if isvalid(prefix):
1354 return prefix
1355 return prefix
1355
1356
1356 for length in range(minlength, 41):
1357 for length in range(minlength, 41):
1357 prefix = hexnode[:length]
1358 prefix = hexnode[:length]
1358 if isvalid(prefix):
1359 if isvalid(prefix):
1359 return disambiguate(hexnode, length)
1360 return disambiguate(hexnode, length)
1360
1361
1361 def cmp(self, node, text):
1362 def cmp(self, node, text):
1362 """compare text with a given file revision
1363 """compare text with a given file revision
1363
1364
1364 returns True if text is different than what is stored.
1365 returns True if text is different than what is stored.
1365 """
1366 """
1366 p1, p2 = self.parents(node)
1367 p1, p2 = self.parents(node)
1367 return storageutil.hashrevisionsha1(text, p1, p2) != node
1368 return storageutil.hashrevisionsha1(text, p1, p2) != node
1368
1369
1369 def _cachesegment(self, offset, data):
1370 def _cachesegment(self, offset, data):
1370 """Add a segment to the revlog cache.
1371 """Add a segment to the revlog cache.
1371
1372
1372 Accepts an absolute offset and the data that is at that location.
1373 Accepts an absolute offset and the data that is at that location.
1373 """
1374 """
1374 o, d = self._chunkcache
1375 o, d = self._chunkcache
1375 # try to add to existing cache
1376 # try to add to existing cache
1376 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1377 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1377 self._chunkcache = o, d + data
1378 self._chunkcache = o, d + data
1378 else:
1379 else:
1379 self._chunkcache = offset, data
1380 self._chunkcache = offset, data
1380
1381
1381 def _readsegment(self, offset, length, df=None):
1382 def _readsegment(self, offset, length, df=None):
1382 """Load a segment of raw data from the revlog.
1383 """Load a segment of raw data from the revlog.
1383
1384
1384 Accepts an absolute offset, length to read, and an optional existing
1385 Accepts an absolute offset, length to read, and an optional existing
1385 file handle to read from.
1386 file handle to read from.
1386
1387
1387 If an existing file handle is passed, it will be seeked and the
1388 If an existing file handle is passed, it will be seeked and the
1388 original seek position will NOT be restored.
1389 original seek position will NOT be restored.
1389
1390
1390 Returns a str or buffer of raw byte data.
1391 Returns a str or buffer of raw byte data.
1391
1392
1392 Raises if the requested number of bytes could not be read.
1393 Raises if the requested number of bytes could not be read.
1393 """
1394 """
1394 # Cache data both forward and backward around the requested
1395 # Cache data both forward and backward around the requested
1395 # data, in a fixed size window. This helps speed up operations
1396 # data, in a fixed size window. This helps speed up operations
1396 # involving reading the revlog backwards.
1397 # involving reading the revlog backwards.
1397 cachesize = self._chunkcachesize
1398 cachesize = self._chunkcachesize
1398 realoffset = offset & ~(cachesize - 1)
1399 realoffset = offset & ~(cachesize - 1)
1399 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1400 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1400 - realoffset)
1401 - realoffset)
1401 with self._datareadfp(df) as df:
1402 with self._datareadfp(df) as df:
1402 df.seek(realoffset)
1403 df.seek(realoffset)
1403 d = df.read(reallength)
1404 d = df.read(reallength)
1404
1405
1405 self._cachesegment(realoffset, d)
1406 self._cachesegment(realoffset, d)
1406 if offset != realoffset or reallength != length:
1407 if offset != realoffset or reallength != length:
1407 startoffset = offset - realoffset
1408 startoffset = offset - realoffset
1408 if len(d) - startoffset < length:
1409 if len(d) - startoffset < length:
1409 raise error.RevlogError(
1410 raise error.RevlogError(
1410 _('partial read of revlog %s; expected %d bytes from '
1411 _('partial read of revlog %s; expected %d bytes from '
1411 'offset %d, got %d') %
1412 'offset %d, got %d') %
1412 (self.indexfile if self._inline else self.datafile,
1413 (self.indexfile if self._inline else self.datafile,
1413 length, realoffset, len(d) - startoffset))
1414 length, realoffset, len(d) - startoffset))
1414
1415
1415 return util.buffer(d, startoffset, length)
1416 return util.buffer(d, startoffset, length)
1416
1417
1417 if len(d) < length:
1418 if len(d) < length:
1418 raise error.RevlogError(
1419 raise error.RevlogError(
1419 _('partial read of revlog %s; expected %d bytes from offset '
1420 _('partial read of revlog %s; expected %d bytes from offset '
1420 '%d, got %d') %
1421 '%d, got %d') %
1421 (self.indexfile if self._inline else self.datafile,
1422 (self.indexfile if self._inline else self.datafile,
1422 length, offset, len(d)))
1423 length, offset, len(d)))
1423
1424
1424 return d
1425 return d
1425
1426
1426 def _getsegment(self, offset, length, df=None):
1427 def _getsegment(self, offset, length, df=None):
1427 """Obtain a segment of raw data from the revlog.
1428 """Obtain a segment of raw data from the revlog.
1428
1429
1429 Accepts an absolute offset, length of bytes to obtain, and an
1430 Accepts an absolute offset, length of bytes to obtain, and an
1430 optional file handle to the already-opened revlog. If the file
1431 optional file handle to the already-opened revlog. If the file
1431 handle is used, it's original seek position will not be preserved.
1432 handle is used, it's original seek position will not be preserved.
1432
1433
1433 Requests for data may be returned from a cache.
1434 Requests for data may be returned from a cache.
1434
1435
1435 Returns a str or a buffer instance of raw byte data.
1436 Returns a str or a buffer instance of raw byte data.
1436 """
1437 """
1437 o, d = self._chunkcache
1438 o, d = self._chunkcache
1438 l = len(d)
1439 l = len(d)
1439
1440
1440 # is it in the cache?
1441 # is it in the cache?
1441 cachestart = offset - o
1442 cachestart = offset - o
1442 cacheend = cachestart + length
1443 cacheend = cachestart + length
1443 if cachestart >= 0 and cacheend <= l:
1444 if cachestart >= 0 and cacheend <= l:
1444 if cachestart == 0 and cacheend == l:
1445 if cachestart == 0 and cacheend == l:
1445 return d # avoid a copy
1446 return d # avoid a copy
1446 return util.buffer(d, cachestart, cacheend - cachestart)
1447 return util.buffer(d, cachestart, cacheend - cachestart)
1447
1448
1448 return self._readsegment(offset, length, df=df)
1449 return self._readsegment(offset, length, df=df)
1449
1450
1450 def _getsegmentforrevs(self, startrev, endrev, df=None):
1451 def _getsegmentforrevs(self, startrev, endrev, df=None):
1451 """Obtain a segment of raw data corresponding to a range of revisions.
1452 """Obtain a segment of raw data corresponding to a range of revisions.
1452
1453
1453 Accepts the start and end revisions and an optional already-open
1454 Accepts the start and end revisions and an optional already-open
1454 file handle to be used for reading. If the file handle is read, its
1455 file handle to be used for reading. If the file handle is read, its
1455 seek position will not be preserved.
1456 seek position will not be preserved.
1456
1457
1457 Requests for data may be satisfied by a cache.
1458 Requests for data may be satisfied by a cache.
1458
1459
1459 Returns a 2-tuple of (offset, data) for the requested range of
1460 Returns a 2-tuple of (offset, data) for the requested range of
1460 revisions. Offset is the integer offset from the beginning of the
1461 revisions. Offset is the integer offset from the beginning of the
1461 revlog and data is a str or buffer of the raw byte data.
1462 revlog and data is a str or buffer of the raw byte data.
1462
1463
1463 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1464 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1464 to determine where each revision's data begins and ends.
1465 to determine where each revision's data begins and ends.
1465 """
1466 """
1466 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1467 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1467 # (functions are expensive).
1468 # (functions are expensive).
1468 index = self.index
1469 index = self.index
1469 istart = index[startrev]
1470 istart = index[startrev]
1470 start = int(istart[0] >> 16)
1471 start = int(istart[0] >> 16)
1471 if startrev == endrev:
1472 if startrev == endrev:
1472 end = start + istart[1]
1473 end = start + istart[1]
1473 else:
1474 else:
1474 iend = index[endrev]
1475 iend = index[endrev]
1475 end = int(iend[0] >> 16) + iend[1]
1476 end = int(iend[0] >> 16) + iend[1]
1476
1477
1477 if self._inline:
1478 if self._inline:
1478 start += (startrev + 1) * self._io.size
1479 start += (startrev + 1) * self._io.size
1479 end += (endrev + 1) * self._io.size
1480 end += (endrev + 1) * self._io.size
1480 length = end - start
1481 length = end - start
1481
1482
1482 return start, self._getsegment(start, length, df=df)
1483 return start, self._getsegment(start, length, df=df)
1483
1484
1484 def _chunk(self, rev, df=None):
1485 def _chunk(self, rev, df=None):
1485 """Obtain a single decompressed chunk for a revision.
1486 """Obtain a single decompressed chunk for a revision.
1486
1487
1487 Accepts an integer revision and an optional already-open file handle
1488 Accepts an integer revision and an optional already-open file handle
1488 to be used for reading. If used, the seek position of the file will not
1489 to be used for reading. If used, the seek position of the file will not
1489 be preserved.
1490 be preserved.
1490
1491
1491 Returns a str holding uncompressed data for the requested revision.
1492 Returns a str holding uncompressed data for the requested revision.
1492 """
1493 """
1493 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1494 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1494
1495
1495 def _chunks(self, revs, df=None, targetsize=None):
1496 def _chunks(self, revs, df=None, targetsize=None):
1496 """Obtain decompressed chunks for the specified revisions.
1497 """Obtain decompressed chunks for the specified revisions.
1497
1498
1498 Accepts an iterable of numeric revisions that are assumed to be in
1499 Accepts an iterable of numeric revisions that are assumed to be in
1499 ascending order. Also accepts an optional already-open file handle
1500 ascending order. Also accepts an optional already-open file handle
1500 to be used for reading. If used, the seek position of the file will
1501 to be used for reading. If used, the seek position of the file will
1501 not be preserved.
1502 not be preserved.
1502
1503
1503 This function is similar to calling ``self._chunk()`` multiple times,
1504 This function is similar to calling ``self._chunk()`` multiple times,
1504 but is faster.
1505 but is faster.
1505
1506
1506 Returns a list with decompressed data for each requested revision.
1507 Returns a list with decompressed data for each requested revision.
1507 """
1508 """
1508 if not revs:
1509 if not revs:
1509 return []
1510 return []
1510 start = self.start
1511 start = self.start
1511 length = self.length
1512 length = self.length
1512 inline = self._inline
1513 inline = self._inline
1513 iosize = self._io.size
1514 iosize = self._io.size
1514 buffer = util.buffer
1515 buffer = util.buffer
1515
1516
1516 l = []
1517 l = []
1517 ladd = l.append
1518 ladd = l.append
1518
1519
1519 if not self._withsparseread:
1520 if not self._withsparseread:
1520 slicedchunks = (revs,)
1521 slicedchunks = (revs,)
1521 else:
1522 else:
1522 slicedchunks = deltautil.slicechunk(self, revs,
1523 slicedchunks = deltautil.slicechunk(self, revs,
1523 targetsize=targetsize)
1524 targetsize=targetsize)
1524
1525
1525 for revschunk in slicedchunks:
1526 for revschunk in slicedchunks:
1526 firstrev = revschunk[0]
1527 firstrev = revschunk[0]
1527 # Skip trailing revisions with empty diff
1528 # Skip trailing revisions with empty diff
1528 for lastrev in revschunk[::-1]:
1529 for lastrev in revschunk[::-1]:
1529 if length(lastrev) != 0:
1530 if length(lastrev) != 0:
1530 break
1531 break
1531
1532
1532 try:
1533 try:
1533 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1534 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1534 except OverflowError:
1535 except OverflowError:
1535 # issue4215 - we can't cache a run of chunks greater than
1536 # issue4215 - we can't cache a run of chunks greater than
1536 # 2G on Windows
1537 # 2G on Windows
1537 return [self._chunk(rev, df=df) for rev in revschunk]
1538 return [self._chunk(rev, df=df) for rev in revschunk]
1538
1539
1539 decomp = self.decompress
1540 decomp = self.decompress
1540 for rev in revschunk:
1541 for rev in revschunk:
1541 chunkstart = start(rev)
1542 chunkstart = start(rev)
1542 if inline:
1543 if inline:
1543 chunkstart += (rev + 1) * iosize
1544 chunkstart += (rev + 1) * iosize
1544 chunklength = length(rev)
1545 chunklength = length(rev)
1545 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1546 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1546
1547
1547 return l
1548 return l
1548
1549
1549 def _chunkclear(self):
1550 def _chunkclear(self):
1550 """Clear the raw chunk cache."""
1551 """Clear the raw chunk cache."""
1551 self._chunkcache = (0, '')
1552 self._chunkcache = (0, '')
1552
1553
1553 def deltaparent(self, rev):
1554 def deltaparent(self, rev):
1554 """return deltaparent of the given revision"""
1555 """return deltaparent of the given revision"""
1555 base = self.index[rev][3]
1556 base = self.index[rev][3]
1556 if base == rev:
1557 if base == rev:
1557 return nullrev
1558 return nullrev
1558 elif self._generaldelta:
1559 elif self._generaldelta:
1559 return base
1560 return base
1560 else:
1561 else:
1561 return rev - 1
1562 return rev - 1
1562
1563
1563 def issnapshot(self, rev):
1564 def issnapshot(self, rev):
1564 """tells whether rev is a snapshot
1565 """tells whether rev is a snapshot
1565 """
1566 """
1566 if not self._sparserevlog:
1567 if not self._sparserevlog:
1567 return self.deltaparent(rev) == nullrev
1568 return self.deltaparent(rev) == nullrev
1568 elif util.safehasattr(self.index, 'issnapshot'):
1569 elif util.safehasattr(self.index, 'issnapshot'):
1569 # directly assign the method to cache the testing and access
1570 # directly assign the method to cache the testing and access
1570 self.issnapshot = self.index.issnapshot
1571 self.issnapshot = self.index.issnapshot
1571 return self.issnapshot(rev)
1572 return self.issnapshot(rev)
1572 if rev == nullrev:
1573 if rev == nullrev:
1573 return True
1574 return True
1574 entry = self.index[rev]
1575 entry = self.index[rev]
1575 base = entry[3]
1576 base = entry[3]
1576 if base == rev:
1577 if base == rev:
1577 return True
1578 return True
1578 if base == nullrev:
1579 if base == nullrev:
1579 return True
1580 return True
1580 p1 = entry[5]
1581 p1 = entry[5]
1581 p2 = entry[6]
1582 p2 = entry[6]
1582 if base == p1 or base == p2:
1583 if base == p1 or base == p2:
1583 return False
1584 return False
1584 return self.issnapshot(base)
1585 return self.issnapshot(base)
1585
1586
1586 def snapshotdepth(self, rev):
1587 def snapshotdepth(self, rev):
1587 """number of snapshot in the chain before this one"""
1588 """number of snapshot in the chain before this one"""
1588 if not self.issnapshot(rev):
1589 if not self.issnapshot(rev):
1589 raise error.ProgrammingError('revision %d not a snapshot')
1590 raise error.ProgrammingError('revision %d not a snapshot')
1590 return len(self._deltachain(rev)[0]) - 1
1591 return len(self._deltachain(rev)[0]) - 1
1591
1592
1592 def revdiff(self, rev1, rev2):
1593 def revdiff(self, rev1, rev2):
1593 """return or calculate a delta between two revisions
1594 """return or calculate a delta between two revisions
1594
1595
1595 The delta calculated is in binary form and is intended to be written to
1596 The delta calculated is in binary form and is intended to be written to
1596 revlog data directly. So this function needs raw revision data.
1597 revlog data directly. So this function needs raw revision data.
1597 """
1598 """
1598 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1599 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1599 return bytes(self._chunk(rev2))
1600 return bytes(self._chunk(rev2))
1600
1601
1601 return mdiff.textdiff(self.rawdata(rev1),
1602 return mdiff.textdiff(self.rawdata(rev1),
1602 self.rawdata(rev2))
1603 self.rawdata(rev2))
1603
1604
1604 def _processflags(self, text, flags, operation, raw=False):
1605 def _processflags(self, text, flags, operation, raw=False):
1605 """deprecated entry point to access flag processors"""
1606 """deprecated entry point to access flag processors"""
1606 msg = ('_processflag(...) use the specialized variant')
1607 msg = ('_processflag(...) use the specialized variant')
1607 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1608 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1608 if raw:
1609 if raw:
1609 return text, flagutil.processflagsraw(self, text, flags)
1610 return text, flagutil.processflagsraw(self, text, flags)
1610 elif operation == 'read':
1611 elif operation == 'read':
1611 return flagutil.processflagsread(self, text, flags)
1612 return flagutil.processflagsread(self, text, flags)
1612 else: # write operation
1613 else: # write operation
1613 return flagutil.processflagswrite(self, text, flags)
1614 return flagutil.processflagswrite(self, text, flags)
1614
1615
1615 def revision(self, nodeorrev, _df=None, raw=False):
1616 def revision(self, nodeorrev, _df=None, raw=False):
1616 """return an uncompressed revision of a given node or revision
1617 """return an uncompressed revision of a given node or revision
1617 number.
1618 number.
1618
1619
1619 _df - an existing file handle to read from. (internal-only)
1620 _df - an existing file handle to read from. (internal-only)
1620 raw - an optional argument specifying if the revision data is to be
1621 raw - an optional argument specifying if the revision data is to be
1621 treated as raw data when applying flag transforms. 'raw' should be set
1622 treated as raw data when applying flag transforms. 'raw' should be set
1622 to True when generating changegroups or in debug commands.
1623 to True when generating changegroups or in debug commands.
1623 """
1624 """
1624 if raw:
1625 if raw:
1625 msg = ('revlog.revision(..., raw=True) is deprecated, '
1626 msg = ('revlog.revision(..., raw=True) is deprecated, '
1626 'use revlog.rawdata(...)')
1627 'use revlog.rawdata(...)')
1627 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1628 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1628 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1629 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1629
1630
1630 def sidedata(self, nodeorrev, _df=None):
1631 def sidedata(self, nodeorrev, _df=None):
1631 """a map of extra data related to the changeset but not part of the hash
1632 """a map of extra data related to the changeset but not part of the hash
1632
1633
1633 This function currently return a dictionary. However, more advanced
1634 This function currently return a dictionary. However, more advanced
1634 mapping object will likely be used in the future for a more
1635 mapping object will likely be used in the future for a more
1635 efficient/lazy code.
1636 efficient/lazy code.
1636 """
1637 """
1637 return self._revisiondata(nodeorrev, _df)[1]
1638 return self._revisiondata(nodeorrev, _df)[1]
1638
1639
1639 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1640 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1640 # deal with <nodeorrev> argument type
1641 # deal with <nodeorrev> argument type
1641 if isinstance(nodeorrev, int):
1642 if isinstance(nodeorrev, int):
1642 rev = nodeorrev
1643 rev = nodeorrev
1643 node = self.node(rev)
1644 node = self.node(rev)
1644 else:
1645 else:
1645 node = nodeorrev
1646 node = nodeorrev
1646 rev = None
1647 rev = None
1647
1648
1648 # fast path the special `nullid` rev
1649 # fast path the special `nullid` rev
1649 if node == nullid:
1650 if node == nullid:
1650 return "", {}
1651 return "", {}
1651
1652
1652 # The text as stored inside the revlog. Might be the revision or might
1653 # The text as stored inside the revlog. Might be the revision or might
1653 # need to be processed to retrieve the revision.
1654 # need to be processed to retrieve the revision.
1654 rawtext = None
1655 rawtext = None
1655
1656
1656 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1657 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1657
1658
1658 if raw and validated:
1659 if raw and validated:
1659 # if we don't want to process the raw text and that raw
1660 # if we don't want to process the raw text and that raw
1660 # text is cached, we can exit early.
1661 # text is cached, we can exit early.
1661 return rawtext, {}
1662 return rawtext, {}
1662 if rev is None:
1663 if rev is None:
1663 rev = self.rev(node)
1664 rev = self.rev(node)
1664 # the revlog's flag for this revision
1665 # the revlog's flag for this revision
1665 # (usually alter its state or content)
1666 # (usually alter its state or content)
1666 flags = self.flags(rev)
1667 flags = self.flags(rev)
1667
1668
1668 if validated and flags == REVIDX_DEFAULT_FLAGS:
1669 if validated and flags == REVIDX_DEFAULT_FLAGS:
1669 # no extra flags set, no flag processor runs, text = rawtext
1670 # no extra flags set, no flag processor runs, text = rawtext
1670 return rawtext, {}
1671 return rawtext, {}
1671
1672
1672 sidedata = {}
1673 sidedata = {}
1673 if raw:
1674 if raw:
1674 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1675 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1675 text = rawtext
1676 text = rawtext
1676 else:
1677 else:
1677 r = flagutil.processflagsread(self, rawtext, flags)
1678 r = flagutil.processflagsread(self, rawtext, flags)
1678 text, validatehash, sidedata = r
1679 text, validatehash, sidedata = r
1679 if validatehash:
1680 if validatehash:
1680 self.checkhash(text, node, rev=rev)
1681 self.checkhash(text, node, rev=rev)
1681 if not validated:
1682 if not validated:
1682 self._revisioncache = (node, rev, rawtext)
1683 self._revisioncache = (node, rev, rawtext)
1683
1684
1684 return text, sidedata
1685 return text, sidedata
1685
1686
1686 def _rawtext(self, node, rev, _df=None):
1687 def _rawtext(self, node, rev, _df=None):
1687 """return the possibly unvalidated rawtext for a revision
1688 """return the possibly unvalidated rawtext for a revision
1688
1689
1689 returns (rev, rawtext, validated)
1690 returns (rev, rawtext, validated)
1690 """
1691 """
1691
1692
1692 # revision in the cache (could be useful to apply delta)
1693 # revision in the cache (could be useful to apply delta)
1693 cachedrev = None
1694 cachedrev = None
1694 # An intermediate text to apply deltas to
1695 # An intermediate text to apply deltas to
1695 basetext = None
1696 basetext = None
1696
1697
1697 # Check if we have the entry in cache
1698 # Check if we have the entry in cache
1698 # The cache entry looks like (node, rev, rawtext)
1699 # The cache entry looks like (node, rev, rawtext)
1699 if self._revisioncache:
1700 if self._revisioncache:
1700 if self._revisioncache[0] == node:
1701 if self._revisioncache[0] == node:
1701 return (rev, self._revisioncache[2], True)
1702 return (rev, self._revisioncache[2], True)
1702 cachedrev = self._revisioncache[1]
1703 cachedrev = self._revisioncache[1]
1703
1704
1704 if rev is None:
1705 if rev is None:
1705 rev = self.rev(node)
1706 rev = self.rev(node)
1706
1707
1707 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1708 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1708 if stopped:
1709 if stopped:
1709 basetext = self._revisioncache[2]
1710 basetext = self._revisioncache[2]
1710
1711
1711 # drop cache to save memory, the caller is expected to
1712 # drop cache to save memory, the caller is expected to
1712 # update self._revisioncache after validating the text
1713 # update self._revisioncache after validating the text
1713 self._revisioncache = None
1714 self._revisioncache = None
1714
1715
1715 targetsize = None
1716 targetsize = None
1716 rawsize = self.index[rev][2]
1717 rawsize = self.index[rev][2]
1717 if 0 <= rawsize:
1718 if 0 <= rawsize:
1718 targetsize = 4 * rawsize
1719 targetsize = 4 * rawsize
1719
1720
1720 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1721 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1721 if basetext is None:
1722 if basetext is None:
1722 basetext = bytes(bins[0])
1723 basetext = bytes(bins[0])
1723 bins = bins[1:]
1724 bins = bins[1:]
1724
1725
1725 rawtext = mdiff.patches(basetext, bins)
1726 rawtext = mdiff.patches(basetext, bins)
1726 del basetext # let us have a chance to free memory early
1727 del basetext # let us have a chance to free memory early
1727 return (rev, rawtext, False)
1728 return (rev, rawtext, False)
1728
1729
1729 def rawdata(self, nodeorrev, _df=None):
1730 def rawdata(self, nodeorrev, _df=None):
1730 """return an uncompressed raw data of a given node or revision number.
1731 """return an uncompressed raw data of a given node or revision number.
1731
1732
1732 _df - an existing file handle to read from. (internal-only)
1733 _df - an existing file handle to read from. (internal-only)
1733 """
1734 """
1734 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1735 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1735
1736
1736 def hash(self, text, p1, p2):
1737 def hash(self, text, p1, p2):
1737 """Compute a node hash.
1738 """Compute a node hash.
1738
1739
1739 Available as a function so that subclasses can replace the hash
1740 Available as a function so that subclasses can replace the hash
1740 as needed.
1741 as needed.
1741 """
1742 """
1742 return storageutil.hashrevisionsha1(text, p1, p2)
1743 return storageutil.hashrevisionsha1(text, p1, p2)
1743
1744
1744 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1745 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1745 """Check node hash integrity.
1746 """Check node hash integrity.
1746
1747
1747 Available as a function so that subclasses can extend hash mismatch
1748 Available as a function so that subclasses can extend hash mismatch
1748 behaviors as needed.
1749 behaviors as needed.
1749 """
1750 """
1750 try:
1751 try:
1751 if p1 is None and p2 is None:
1752 if p1 is None and p2 is None:
1752 p1, p2 = self.parents(node)
1753 p1, p2 = self.parents(node)
1753 if node != self.hash(text, p1, p2):
1754 if node != self.hash(text, p1, p2):
1754 # Clear the revision cache on hash failure. The revision cache
1755 # Clear the revision cache on hash failure. The revision cache
1755 # only stores the raw revision and clearing the cache does have
1756 # only stores the raw revision and clearing the cache does have
1756 # the side-effect that we won't have a cache hit when the raw
1757 # the side-effect that we won't have a cache hit when the raw
1757 # revision data is accessed. But this case should be rare and
1758 # revision data is accessed. But this case should be rare and
1758 # it is extra work to teach the cache about the hash
1759 # it is extra work to teach the cache about the hash
1759 # verification state.
1760 # verification state.
1760 if self._revisioncache and self._revisioncache[0] == node:
1761 if self._revisioncache and self._revisioncache[0] == node:
1761 self._revisioncache = None
1762 self._revisioncache = None
1762
1763
1763 revornode = rev
1764 revornode = rev
1764 if revornode is None:
1765 if revornode is None:
1765 revornode = templatefilters.short(hex(node))
1766 revornode = templatefilters.short(hex(node))
1766 raise error.RevlogError(_("integrity check failed on %s:%s")
1767 raise error.RevlogError(_("integrity check failed on %s:%s")
1767 % (self.indexfile, pycompat.bytestr(revornode)))
1768 % (self.indexfile, pycompat.bytestr(revornode)))
1768 except error.RevlogError:
1769 except error.RevlogError:
1769 if self._censorable and storageutil.iscensoredtext(text):
1770 if self._censorable and storageutil.iscensoredtext(text):
1770 raise error.CensoredNodeError(self.indexfile, node, text)
1771 raise error.CensoredNodeError(self.indexfile, node, text)
1771 raise
1772 raise
1772
1773
1773 def _enforceinlinesize(self, tr, fp=None):
1774 def _enforceinlinesize(self, tr, fp=None):
1774 """Check if the revlog is too big for inline and convert if so.
1775 """Check if the revlog is too big for inline and convert if so.
1775
1776
1776 This should be called after revisions are added to the revlog. If the
1777 This should be called after revisions are added to the revlog. If the
1777 revlog has grown too large to be an inline revlog, it will convert it
1778 revlog has grown too large to be an inline revlog, it will convert it
1778 to use multiple index and data files.
1779 to use multiple index and data files.
1779 """
1780 """
1780 tiprev = len(self) - 1
1781 tiprev = len(self) - 1
1781 if (not self._inline or
1782 if (not self._inline or
1782 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1783 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1783 return
1784 return
1784
1785
1785 trinfo = tr.find(self.indexfile)
1786 trinfo = tr.find(self.indexfile)
1786 if trinfo is None:
1787 if trinfo is None:
1787 raise error.RevlogError(_("%s not found in the transaction")
1788 raise error.RevlogError(_("%s not found in the transaction")
1788 % self.indexfile)
1789 % self.indexfile)
1789
1790
1790 trindex = trinfo[2]
1791 trindex = trinfo[2]
1791 if trindex is not None:
1792 if trindex is not None:
1792 dataoff = self.start(trindex)
1793 dataoff = self.start(trindex)
1793 else:
1794 else:
1794 # revlog was stripped at start of transaction, use all leftover data
1795 # revlog was stripped at start of transaction, use all leftover data
1795 trindex = len(self) - 1
1796 trindex = len(self) - 1
1796 dataoff = self.end(tiprev)
1797 dataoff = self.end(tiprev)
1797
1798
1798 tr.add(self.datafile, dataoff)
1799 tr.add(self.datafile, dataoff)
1799
1800
1800 if fp:
1801 if fp:
1801 fp.flush()
1802 fp.flush()
1802 fp.close()
1803 fp.close()
1803 # We can't use the cached file handle after close(). So prevent
1804 # We can't use the cached file handle after close(). So prevent
1804 # its usage.
1805 # its usage.
1805 self._writinghandles = None
1806 self._writinghandles = None
1806
1807
1807 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1808 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1808 for r in self:
1809 for r in self:
1809 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1810 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1810
1811
1811 with self._indexfp('w') as fp:
1812 with self._indexfp('w') as fp:
1812 self.version &= ~FLAG_INLINE_DATA
1813 self.version &= ~FLAG_INLINE_DATA
1813 self._inline = False
1814 self._inline = False
1814 io = self._io
1815 io = self._io
1815 for i in self:
1816 for i in self:
1816 e = io.packentry(self.index[i], self.node, self.version, i)
1817 e = io.packentry(self.index[i], self.node, self.version, i)
1817 fp.write(e)
1818 fp.write(e)
1818
1819
1819 # the temp file replace the real index when we exit the context
1820 # the temp file replace the real index when we exit the context
1820 # manager
1821 # manager
1821
1822
1822 tr.replace(self.indexfile, trindex * self._io.size)
1823 tr.replace(self.indexfile, trindex * self._io.size)
1823 self._chunkclear()
1824 self._chunkclear()
1824
1825
1825 def _nodeduplicatecallback(self, transaction, node):
1826 def _nodeduplicatecallback(self, transaction, node):
1826 """called when trying to add a node already stored.
1827 """called when trying to add a node already stored.
1827 """
1828 """
1828
1829
1829 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1830 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1830 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None,
1831 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None,
1831 sidedata=None):
1832 sidedata=None):
1832 """add a revision to the log
1833 """add a revision to the log
1833
1834
1834 text - the revision data to add
1835 text - the revision data to add
1835 transaction - the transaction object used for rollback
1836 transaction - the transaction object used for rollback
1836 link - the linkrev data to add
1837 link - the linkrev data to add
1837 p1, p2 - the parent nodeids of the revision
1838 p1, p2 - the parent nodeids of the revision
1838 cachedelta - an optional precomputed delta
1839 cachedelta - an optional precomputed delta
1839 node - nodeid of revision; typically node is not specified, and it is
1840 node - nodeid of revision; typically node is not specified, and it is
1840 computed by default as hash(text, p1, p2), however subclasses might
1841 computed by default as hash(text, p1, p2), however subclasses might
1841 use different hashing method (and override checkhash() in such case)
1842 use different hashing method (and override checkhash() in such case)
1842 flags - the known flags to set on the revision
1843 flags - the known flags to set on the revision
1843 deltacomputer - an optional deltacomputer instance shared between
1844 deltacomputer - an optional deltacomputer instance shared between
1844 multiple calls
1845 multiple calls
1845 """
1846 """
1846 if link == nullrev:
1847 if link == nullrev:
1847 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1848 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1848 % self.indexfile)
1849 % self.indexfile)
1849
1850
1850 if sidedata is None:
1851 if sidedata is None:
1851 sidedata = {}
1852 sidedata = {}
1853 elif not self.hassidedata:
1854 raise error.ProgrammingError(
1855 _("trying to add sidedata to a revlog who don't support them")
1856 )
1852
1857
1853 if flags:
1858 if flags:
1854 node = node or self.hash(text, p1, p2)
1859 node = node or self.hash(text, p1, p2)
1855
1860
1856 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
1861 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
1857 sidedata=sidedata)
1862 sidedata=sidedata)
1858
1863
1859 # If the flag processor modifies the revision data, ignore any provided
1864 # If the flag processor modifies the revision data, ignore any provided
1860 # cachedelta.
1865 # cachedelta.
1861 if rawtext != text:
1866 if rawtext != text:
1862 cachedelta = None
1867 cachedelta = None
1863
1868
1864 if len(rawtext) > _maxentrysize:
1869 if len(rawtext) > _maxentrysize:
1865 raise error.RevlogError(
1870 raise error.RevlogError(
1866 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1871 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1867 % (self.indexfile, len(rawtext)))
1872 % (self.indexfile, len(rawtext)))
1868
1873
1869 node = node or self.hash(rawtext, p1, p2)
1874 node = node or self.hash(rawtext, p1, p2)
1870 if node in self.nodemap:
1875 if node in self.nodemap:
1871 return node
1876 return node
1872
1877
1873 if validatehash:
1878 if validatehash:
1874 self.checkhash(rawtext, node, p1=p1, p2=p2)
1879 self.checkhash(rawtext, node, p1=p1, p2=p2)
1875
1880
1876 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1881 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1877 flags, cachedelta=cachedelta,
1882 flags, cachedelta=cachedelta,
1878 deltacomputer=deltacomputer)
1883 deltacomputer=deltacomputer)
1879
1884
1880 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1885 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1881 cachedelta=None, deltacomputer=None):
1886 cachedelta=None, deltacomputer=None):
1882 """add a raw revision with known flags, node and parents
1887 """add a raw revision with known flags, node and parents
1883 useful when reusing a revision not stored in this revlog (ex: received
1888 useful when reusing a revision not stored in this revlog (ex: received
1884 over wire, or read from an external bundle).
1889 over wire, or read from an external bundle).
1885 """
1890 """
1886 dfh = None
1891 dfh = None
1887 if not self._inline:
1892 if not self._inline:
1888 dfh = self._datafp("a+")
1893 dfh = self._datafp("a+")
1889 ifh = self._indexfp("a+")
1894 ifh = self._indexfp("a+")
1890 try:
1895 try:
1891 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1896 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1892 flags, cachedelta, ifh, dfh,
1897 flags, cachedelta, ifh, dfh,
1893 deltacomputer=deltacomputer)
1898 deltacomputer=deltacomputer)
1894 finally:
1899 finally:
1895 if dfh:
1900 if dfh:
1896 dfh.close()
1901 dfh.close()
1897 ifh.close()
1902 ifh.close()
1898
1903
1899 def compress(self, data):
1904 def compress(self, data):
1900 """Generate a possibly-compressed representation of data."""
1905 """Generate a possibly-compressed representation of data."""
1901 if not data:
1906 if not data:
1902 return '', data
1907 return '', data
1903
1908
1904 compressed = self._compressor.compress(data)
1909 compressed = self._compressor.compress(data)
1905
1910
1906 if compressed:
1911 if compressed:
1907 # The revlog compressor added the header in the returned data.
1912 # The revlog compressor added the header in the returned data.
1908 return '', compressed
1913 return '', compressed
1909
1914
1910 if data[0:1] == '\0':
1915 if data[0:1] == '\0':
1911 return '', data
1916 return '', data
1912 return 'u', data
1917 return 'u', data
1913
1918
1914 def decompress(self, data):
1919 def decompress(self, data):
1915 """Decompress a revlog chunk.
1920 """Decompress a revlog chunk.
1916
1921
1917 The chunk is expected to begin with a header identifying the
1922 The chunk is expected to begin with a header identifying the
1918 format type so it can be routed to an appropriate decompressor.
1923 format type so it can be routed to an appropriate decompressor.
1919 """
1924 """
1920 if not data:
1925 if not data:
1921 return data
1926 return data
1922
1927
1923 # Revlogs are read much more frequently than they are written and many
1928 # Revlogs are read much more frequently than they are written and many
1924 # chunks only take microseconds to decompress, so performance is
1929 # chunks only take microseconds to decompress, so performance is
1925 # important here.
1930 # important here.
1926 #
1931 #
1927 # We can make a few assumptions about revlogs:
1932 # We can make a few assumptions about revlogs:
1928 #
1933 #
1929 # 1) the majority of chunks will be compressed (as opposed to inline
1934 # 1) the majority of chunks will be compressed (as opposed to inline
1930 # raw data).
1935 # raw data).
1931 # 2) decompressing *any* data will likely by at least 10x slower than
1936 # 2) decompressing *any* data will likely by at least 10x slower than
1932 # returning raw inline data.
1937 # returning raw inline data.
1933 # 3) we want to prioritize common and officially supported compression
1938 # 3) we want to prioritize common and officially supported compression
1934 # engines
1939 # engines
1935 #
1940 #
1936 # It follows that we want to optimize for "decompress compressed data
1941 # It follows that we want to optimize for "decompress compressed data
1937 # when encoded with common and officially supported compression engines"
1942 # when encoded with common and officially supported compression engines"
1938 # case over "raw data" and "data encoded by less common or non-official
1943 # case over "raw data" and "data encoded by less common or non-official
1939 # compression engines." That is why we have the inline lookup first
1944 # compression engines." That is why we have the inline lookup first
1940 # followed by the compengines lookup.
1945 # followed by the compengines lookup.
1941 #
1946 #
1942 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1947 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1943 # compressed chunks. And this matters for changelog and manifest reads.
1948 # compressed chunks. And this matters for changelog and manifest reads.
1944 t = data[0:1]
1949 t = data[0:1]
1945
1950
1946 if t == 'x':
1951 if t == 'x':
1947 try:
1952 try:
1948 return _zlibdecompress(data)
1953 return _zlibdecompress(data)
1949 except zlib.error as e:
1954 except zlib.error as e:
1950 raise error.RevlogError(_('revlog decompress error: %s') %
1955 raise error.RevlogError(_('revlog decompress error: %s') %
1951 stringutil.forcebytestr(e))
1956 stringutil.forcebytestr(e))
1952 # '\0' is more common than 'u' so it goes first.
1957 # '\0' is more common than 'u' so it goes first.
1953 elif t == '\0':
1958 elif t == '\0':
1954 return data
1959 return data
1955 elif t == 'u':
1960 elif t == 'u':
1956 return util.buffer(data, 1)
1961 return util.buffer(data, 1)
1957
1962
1958 try:
1963 try:
1959 compressor = self._decompressors[t]
1964 compressor = self._decompressors[t]
1960 except KeyError:
1965 except KeyError:
1961 try:
1966 try:
1962 engine = util.compengines.forrevlogheader(t)
1967 engine = util.compengines.forrevlogheader(t)
1963 compressor = engine.revlogcompressor(self._compengineopts)
1968 compressor = engine.revlogcompressor(self._compengineopts)
1964 self._decompressors[t] = compressor
1969 self._decompressors[t] = compressor
1965 except KeyError:
1970 except KeyError:
1966 raise error.RevlogError(_('unknown compression type %r') % t)
1971 raise error.RevlogError(_('unknown compression type %r') % t)
1967
1972
1968 return compressor.decompress(data)
1973 return compressor.decompress(data)
1969
1974
1970 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1975 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1971 cachedelta, ifh, dfh, alwayscache=False,
1976 cachedelta, ifh, dfh, alwayscache=False,
1972 deltacomputer=None):
1977 deltacomputer=None):
1973 """internal function to add revisions to the log
1978 """internal function to add revisions to the log
1974
1979
1975 see addrevision for argument descriptions.
1980 see addrevision for argument descriptions.
1976
1981
1977 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1982 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1978
1983
1979 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1984 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1980 be used.
1985 be used.
1981
1986
1982 invariants:
1987 invariants:
1983 - rawtext is optional (can be None); if not set, cachedelta must be set.
1988 - rawtext is optional (can be None); if not set, cachedelta must be set.
1984 if both are set, they must correspond to each other.
1989 if both are set, they must correspond to each other.
1985 """
1990 """
1986 if node == nullid:
1991 if node == nullid:
1987 raise error.RevlogError(_("%s: attempt to add null revision") %
1992 raise error.RevlogError(_("%s: attempt to add null revision") %
1988 self.indexfile)
1993 self.indexfile)
1989 if node == wdirid or node in wdirfilenodeids:
1994 if node == wdirid or node in wdirfilenodeids:
1990 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1995 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1991 self.indexfile)
1996 self.indexfile)
1992
1997
1993 if self._inline:
1998 if self._inline:
1994 fh = ifh
1999 fh = ifh
1995 else:
2000 else:
1996 fh = dfh
2001 fh = dfh
1997
2002
1998 btext = [rawtext]
2003 btext = [rawtext]
1999
2004
2000 curr = len(self)
2005 curr = len(self)
2001 prev = curr - 1
2006 prev = curr - 1
2002 offset = self.end(prev)
2007 offset = self.end(prev)
2003 p1r, p2r = self.rev(p1), self.rev(p2)
2008 p1r, p2r = self.rev(p1), self.rev(p2)
2004
2009
2005 # full versions are inserted when the needed deltas
2010 # full versions are inserted when the needed deltas
2006 # become comparable to the uncompressed text
2011 # become comparable to the uncompressed text
2007 if rawtext is None:
2012 if rawtext is None:
2008 # need rawtext size, before changed by flag processors, which is
2013 # need rawtext size, before changed by flag processors, which is
2009 # the non-raw size. use revlog explicitly to avoid filelog's extra
2014 # the non-raw size. use revlog explicitly to avoid filelog's extra
2010 # logic that might remove metadata size.
2015 # logic that might remove metadata size.
2011 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2016 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2012 cachedelta[1])
2017 cachedelta[1])
2013 else:
2018 else:
2014 textlen = len(rawtext)
2019 textlen = len(rawtext)
2015
2020
2016 if deltacomputer is None:
2021 if deltacomputer is None:
2017 deltacomputer = deltautil.deltacomputer(self)
2022 deltacomputer = deltautil.deltacomputer(self)
2018
2023
2019 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2024 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2020
2025
2021 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2026 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2022
2027
2023 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2028 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2024 deltainfo.base, link, p1r, p2r, node)
2029 deltainfo.base, link, p1r, p2r, node)
2025 self.index.append(e)
2030 self.index.append(e)
2026 self.nodemap[node] = curr
2031 self.nodemap[node] = curr
2027
2032
2028 # Reset the pure node cache start lookup offset to account for new
2033 # Reset the pure node cache start lookup offset to account for new
2029 # revision.
2034 # revision.
2030 if self._nodepos is not None:
2035 if self._nodepos is not None:
2031 self._nodepos = curr
2036 self._nodepos = curr
2032
2037
2033 entry = self._io.packentry(e, self.node, self.version, curr)
2038 entry = self._io.packentry(e, self.node, self.version, curr)
2034 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2039 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2035 link, offset)
2040 link, offset)
2036
2041
2037 rawtext = btext[0]
2042 rawtext = btext[0]
2038
2043
2039 if alwayscache and rawtext is None:
2044 if alwayscache and rawtext is None:
2040 rawtext = deltacomputer.buildtext(revinfo, fh)
2045 rawtext = deltacomputer.buildtext(revinfo, fh)
2041
2046
2042 if type(rawtext) == bytes: # only accept immutable objects
2047 if type(rawtext) == bytes: # only accept immutable objects
2043 self._revisioncache = (node, curr, rawtext)
2048 self._revisioncache = (node, curr, rawtext)
2044 self._chainbasecache[curr] = deltainfo.chainbase
2049 self._chainbasecache[curr] = deltainfo.chainbase
2045 return node
2050 return node
2046
2051
2047 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2052 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2048 # Files opened in a+ mode have inconsistent behavior on various
2053 # Files opened in a+ mode have inconsistent behavior on various
2049 # platforms. Windows requires that a file positioning call be made
2054 # platforms. Windows requires that a file positioning call be made
2050 # when the file handle transitions between reads and writes. See
2055 # when the file handle transitions between reads and writes. See
2051 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2056 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2052 # platforms, Python or the platform itself can be buggy. Some versions
2057 # platforms, Python or the platform itself can be buggy. Some versions
2053 # of Solaris have been observed to not append at the end of the file
2058 # of Solaris have been observed to not append at the end of the file
2054 # if the file was seeked to before the end. See issue4943 for more.
2059 # if the file was seeked to before the end. See issue4943 for more.
2055 #
2060 #
2056 # We work around this issue by inserting a seek() before writing.
2061 # We work around this issue by inserting a seek() before writing.
2057 # Note: This is likely not necessary on Python 3. However, because
2062 # Note: This is likely not necessary on Python 3. However, because
2058 # the file handle is reused for reads and may be seeked there, we need
2063 # the file handle is reused for reads and may be seeked there, we need
2059 # to be careful before changing this.
2064 # to be careful before changing this.
2060 ifh.seek(0, os.SEEK_END)
2065 ifh.seek(0, os.SEEK_END)
2061 if dfh:
2066 if dfh:
2062 dfh.seek(0, os.SEEK_END)
2067 dfh.seek(0, os.SEEK_END)
2063
2068
2064 curr = len(self) - 1
2069 curr = len(self) - 1
2065 if not self._inline:
2070 if not self._inline:
2066 transaction.add(self.datafile, offset)
2071 transaction.add(self.datafile, offset)
2067 transaction.add(self.indexfile, curr * len(entry))
2072 transaction.add(self.indexfile, curr * len(entry))
2068 if data[0]:
2073 if data[0]:
2069 dfh.write(data[0])
2074 dfh.write(data[0])
2070 dfh.write(data[1])
2075 dfh.write(data[1])
2071 ifh.write(entry)
2076 ifh.write(entry)
2072 else:
2077 else:
2073 offset += curr * self._io.size
2078 offset += curr * self._io.size
2074 transaction.add(self.indexfile, offset, curr)
2079 transaction.add(self.indexfile, offset, curr)
2075 ifh.write(entry)
2080 ifh.write(entry)
2076 ifh.write(data[0])
2081 ifh.write(data[0])
2077 ifh.write(data[1])
2082 ifh.write(data[1])
2078 self._enforceinlinesize(transaction, ifh)
2083 self._enforceinlinesize(transaction, ifh)
2079
2084
2080 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2085 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2081 """
2086 """
2082 add a delta group
2087 add a delta group
2083
2088
2084 given a set of deltas, add them to the revision log. the
2089 given a set of deltas, add them to the revision log. the
2085 first delta is against its parent, which should be in our
2090 first delta is against its parent, which should be in our
2086 log, the rest are against the previous delta.
2091 log, the rest are against the previous delta.
2087
2092
2088 If ``addrevisioncb`` is defined, it will be called with arguments of
2093 If ``addrevisioncb`` is defined, it will be called with arguments of
2089 this revlog and the node that was added.
2094 this revlog and the node that was added.
2090 """
2095 """
2091
2096
2092 if self._writinghandles:
2097 if self._writinghandles:
2093 raise error.ProgrammingError('cannot nest addgroup() calls')
2098 raise error.ProgrammingError('cannot nest addgroup() calls')
2094
2099
2095 nodes = []
2100 nodes = []
2096
2101
2097 r = len(self)
2102 r = len(self)
2098 end = 0
2103 end = 0
2099 if r:
2104 if r:
2100 end = self.end(r - 1)
2105 end = self.end(r - 1)
2101 ifh = self._indexfp("a+")
2106 ifh = self._indexfp("a+")
2102 isize = r * self._io.size
2107 isize = r * self._io.size
2103 if self._inline:
2108 if self._inline:
2104 transaction.add(self.indexfile, end + isize, r)
2109 transaction.add(self.indexfile, end + isize, r)
2105 dfh = None
2110 dfh = None
2106 else:
2111 else:
2107 transaction.add(self.indexfile, isize, r)
2112 transaction.add(self.indexfile, isize, r)
2108 transaction.add(self.datafile, end)
2113 transaction.add(self.datafile, end)
2109 dfh = self._datafp("a+")
2114 dfh = self._datafp("a+")
2110 def flush():
2115 def flush():
2111 if dfh:
2116 if dfh:
2112 dfh.flush()
2117 dfh.flush()
2113 ifh.flush()
2118 ifh.flush()
2114
2119
2115 self._writinghandles = (ifh, dfh)
2120 self._writinghandles = (ifh, dfh)
2116
2121
2117 try:
2122 try:
2118 deltacomputer = deltautil.deltacomputer(self)
2123 deltacomputer = deltautil.deltacomputer(self)
2119 # loop through our set of deltas
2124 # loop through our set of deltas
2120 for data in deltas:
2125 for data in deltas:
2121 node, p1, p2, linknode, deltabase, delta, flags = data
2126 node, p1, p2, linknode, deltabase, delta, flags = data
2122 link = linkmapper(linknode)
2127 link = linkmapper(linknode)
2123 flags = flags or REVIDX_DEFAULT_FLAGS
2128 flags = flags or REVIDX_DEFAULT_FLAGS
2124
2129
2125 nodes.append(node)
2130 nodes.append(node)
2126
2131
2127 if node in self.nodemap:
2132 if node in self.nodemap:
2128 self._nodeduplicatecallback(transaction, node)
2133 self._nodeduplicatecallback(transaction, node)
2129 # this can happen if two branches make the same change
2134 # this can happen if two branches make the same change
2130 continue
2135 continue
2131
2136
2132 for p in (p1, p2):
2137 for p in (p1, p2):
2133 if p not in self.nodemap:
2138 if p not in self.nodemap:
2134 raise error.LookupError(p, self.indexfile,
2139 raise error.LookupError(p, self.indexfile,
2135 _('unknown parent'))
2140 _('unknown parent'))
2136
2141
2137 if deltabase not in self.nodemap:
2142 if deltabase not in self.nodemap:
2138 raise error.LookupError(deltabase, self.indexfile,
2143 raise error.LookupError(deltabase, self.indexfile,
2139 _('unknown delta base'))
2144 _('unknown delta base'))
2140
2145
2141 baserev = self.rev(deltabase)
2146 baserev = self.rev(deltabase)
2142
2147
2143 if baserev != nullrev and self.iscensored(baserev):
2148 if baserev != nullrev and self.iscensored(baserev):
2144 # if base is censored, delta must be full replacement in a
2149 # if base is censored, delta must be full replacement in a
2145 # single patch operation
2150 # single patch operation
2146 hlen = struct.calcsize(">lll")
2151 hlen = struct.calcsize(">lll")
2147 oldlen = self.rawsize(baserev)
2152 oldlen = self.rawsize(baserev)
2148 newlen = len(delta) - hlen
2153 newlen = len(delta) - hlen
2149 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2154 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2150 raise error.CensoredBaseError(self.indexfile,
2155 raise error.CensoredBaseError(self.indexfile,
2151 self.node(baserev))
2156 self.node(baserev))
2152
2157
2153 if not flags and self._peek_iscensored(baserev, delta, flush):
2158 if not flags and self._peek_iscensored(baserev, delta, flush):
2154 flags |= REVIDX_ISCENSORED
2159 flags |= REVIDX_ISCENSORED
2155
2160
2156 # We assume consumers of addrevisioncb will want to retrieve
2161 # We assume consumers of addrevisioncb will want to retrieve
2157 # the added revision, which will require a call to
2162 # the added revision, which will require a call to
2158 # revision(). revision() will fast path if there is a cache
2163 # revision(). revision() will fast path if there is a cache
2159 # hit. So, we tell _addrevision() to always cache in this case.
2164 # hit. So, we tell _addrevision() to always cache in this case.
2160 # We're only using addgroup() in the context of changegroup
2165 # We're only using addgroup() in the context of changegroup
2161 # generation so the revision data can always be handled as raw
2166 # generation so the revision data can always be handled as raw
2162 # by the flagprocessor.
2167 # by the flagprocessor.
2163 self._addrevision(node, None, transaction, link,
2168 self._addrevision(node, None, transaction, link,
2164 p1, p2, flags, (baserev, delta),
2169 p1, p2, flags, (baserev, delta),
2165 ifh, dfh,
2170 ifh, dfh,
2166 alwayscache=bool(addrevisioncb),
2171 alwayscache=bool(addrevisioncb),
2167 deltacomputer=deltacomputer)
2172 deltacomputer=deltacomputer)
2168
2173
2169 if addrevisioncb:
2174 if addrevisioncb:
2170 addrevisioncb(self, node)
2175 addrevisioncb(self, node)
2171
2176
2172 if not dfh and not self._inline:
2177 if not dfh and not self._inline:
2173 # addrevision switched from inline to conventional
2178 # addrevision switched from inline to conventional
2174 # reopen the index
2179 # reopen the index
2175 ifh.close()
2180 ifh.close()
2176 dfh = self._datafp("a+")
2181 dfh = self._datafp("a+")
2177 ifh = self._indexfp("a+")
2182 ifh = self._indexfp("a+")
2178 self._writinghandles = (ifh, dfh)
2183 self._writinghandles = (ifh, dfh)
2179 finally:
2184 finally:
2180 self._writinghandles = None
2185 self._writinghandles = None
2181
2186
2182 if dfh:
2187 if dfh:
2183 dfh.close()
2188 dfh.close()
2184 ifh.close()
2189 ifh.close()
2185
2190
2186 return nodes
2191 return nodes
2187
2192
2188 def iscensored(self, rev):
2193 def iscensored(self, rev):
2189 """Check if a file revision is censored."""
2194 """Check if a file revision is censored."""
2190 if not self._censorable:
2195 if not self._censorable:
2191 return False
2196 return False
2192
2197
2193 return self.flags(rev) & REVIDX_ISCENSORED
2198 return self.flags(rev) & REVIDX_ISCENSORED
2194
2199
2195 def _peek_iscensored(self, baserev, delta, flush):
2200 def _peek_iscensored(self, baserev, delta, flush):
2196 """Quickly check if a delta produces a censored revision."""
2201 """Quickly check if a delta produces a censored revision."""
2197 if not self._censorable:
2202 if not self._censorable:
2198 return False
2203 return False
2199
2204
2200 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2205 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2201
2206
2202 def getstrippoint(self, minlink):
2207 def getstrippoint(self, minlink):
2203 """find the minimum rev that must be stripped to strip the linkrev
2208 """find the minimum rev that must be stripped to strip the linkrev
2204
2209
2205 Returns a tuple containing the minimum rev and a set of all revs that
2210 Returns a tuple containing the minimum rev and a set of all revs that
2206 have linkrevs that will be broken by this strip.
2211 have linkrevs that will be broken by this strip.
2207 """
2212 """
2208 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2213 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2209 self.headrevs(),
2214 self.headrevs(),
2210 self.linkrev, self.parentrevs)
2215 self.linkrev, self.parentrevs)
2211
2216
2212 def strip(self, minlink, transaction):
2217 def strip(self, minlink, transaction):
2213 """truncate the revlog on the first revision with a linkrev >= minlink
2218 """truncate the revlog on the first revision with a linkrev >= minlink
2214
2219
2215 This function is called when we're stripping revision minlink and
2220 This function is called when we're stripping revision minlink and
2216 its descendants from the repository.
2221 its descendants from the repository.
2217
2222
2218 We have to remove all revisions with linkrev >= minlink, because
2223 We have to remove all revisions with linkrev >= minlink, because
2219 the equivalent changelog revisions will be renumbered after the
2224 the equivalent changelog revisions will be renumbered after the
2220 strip.
2225 strip.
2221
2226
2222 So we truncate the revlog on the first of these revisions, and
2227 So we truncate the revlog on the first of these revisions, and
2223 trust that the caller has saved the revisions that shouldn't be
2228 trust that the caller has saved the revisions that shouldn't be
2224 removed and that it'll re-add them after this truncation.
2229 removed and that it'll re-add them after this truncation.
2225 """
2230 """
2226 if len(self) == 0:
2231 if len(self) == 0:
2227 return
2232 return
2228
2233
2229 rev, _ = self.getstrippoint(minlink)
2234 rev, _ = self.getstrippoint(minlink)
2230 if rev == len(self):
2235 if rev == len(self):
2231 return
2236 return
2232
2237
2233 # first truncate the files on disk
2238 # first truncate the files on disk
2234 end = self.start(rev)
2239 end = self.start(rev)
2235 if not self._inline:
2240 if not self._inline:
2236 transaction.add(self.datafile, end)
2241 transaction.add(self.datafile, end)
2237 end = rev * self._io.size
2242 end = rev * self._io.size
2238 else:
2243 else:
2239 end += rev * self._io.size
2244 end += rev * self._io.size
2240
2245
2241 transaction.add(self.indexfile, end)
2246 transaction.add(self.indexfile, end)
2242
2247
2243 # then reset internal state in memory to forget those revisions
2248 # then reset internal state in memory to forget those revisions
2244 self._revisioncache = None
2249 self._revisioncache = None
2245 self._chaininfocache = {}
2250 self._chaininfocache = {}
2246 self._chunkclear()
2251 self._chunkclear()
2247 for x in pycompat.xrange(rev, len(self)):
2252 for x in pycompat.xrange(rev, len(self)):
2248 del self.nodemap[self.node(x)]
2253 del self.nodemap[self.node(x)]
2249
2254
2250 del self.index[rev:-1]
2255 del self.index[rev:-1]
2251 self._nodepos = None
2256 self._nodepos = None
2252
2257
2253 def checksize(self):
2258 def checksize(self):
2254 """Check size of index and data files
2259 """Check size of index and data files
2255
2260
2256 return a (dd, di) tuple.
2261 return a (dd, di) tuple.
2257 - dd: extra bytes for the "data" file
2262 - dd: extra bytes for the "data" file
2258 - di: extra bytes for the "index" file
2263 - di: extra bytes for the "index" file
2259
2264
2260 A healthy revlog will return (0, 0).
2265 A healthy revlog will return (0, 0).
2261 """
2266 """
2262 expected = 0
2267 expected = 0
2263 if len(self):
2268 if len(self):
2264 expected = max(0, self.end(len(self) - 1))
2269 expected = max(0, self.end(len(self) - 1))
2265
2270
2266 try:
2271 try:
2267 with self._datafp() as f:
2272 with self._datafp() as f:
2268 f.seek(0, io.SEEK_END)
2273 f.seek(0, io.SEEK_END)
2269 actual = f.tell()
2274 actual = f.tell()
2270 dd = actual - expected
2275 dd = actual - expected
2271 except IOError as inst:
2276 except IOError as inst:
2272 if inst.errno != errno.ENOENT:
2277 if inst.errno != errno.ENOENT:
2273 raise
2278 raise
2274 dd = 0
2279 dd = 0
2275
2280
2276 try:
2281 try:
2277 f = self.opener(self.indexfile)
2282 f = self.opener(self.indexfile)
2278 f.seek(0, io.SEEK_END)
2283 f.seek(0, io.SEEK_END)
2279 actual = f.tell()
2284 actual = f.tell()
2280 f.close()
2285 f.close()
2281 s = self._io.size
2286 s = self._io.size
2282 i = max(0, actual // s)
2287 i = max(0, actual // s)
2283 di = actual - (i * s)
2288 di = actual - (i * s)
2284 if self._inline:
2289 if self._inline:
2285 databytes = 0
2290 databytes = 0
2286 for r in self:
2291 for r in self:
2287 databytes += max(0, self.length(r))
2292 databytes += max(0, self.length(r))
2288 dd = 0
2293 dd = 0
2289 di = actual - len(self) * s - databytes
2294 di = actual - len(self) * s - databytes
2290 except IOError as inst:
2295 except IOError as inst:
2291 if inst.errno != errno.ENOENT:
2296 if inst.errno != errno.ENOENT:
2292 raise
2297 raise
2293 di = 0
2298 di = 0
2294
2299
2295 return (dd, di)
2300 return (dd, di)
2296
2301
2297 def files(self):
2302 def files(self):
2298 res = [self.indexfile]
2303 res = [self.indexfile]
2299 if not self._inline:
2304 if not self._inline:
2300 res.append(self.datafile)
2305 res.append(self.datafile)
2301 return res
2306 return res
2302
2307
2303 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2308 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2304 assumehaveparentrevisions=False,
2309 assumehaveparentrevisions=False,
2305 deltamode=repository.CG_DELTAMODE_STD):
2310 deltamode=repository.CG_DELTAMODE_STD):
2306 if nodesorder not in ('nodes', 'storage', 'linear', None):
2311 if nodesorder not in ('nodes', 'storage', 'linear', None):
2307 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2312 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2308 nodesorder)
2313 nodesorder)
2309
2314
2310 if nodesorder is None and not self._generaldelta:
2315 if nodesorder is None and not self._generaldelta:
2311 nodesorder = 'storage'
2316 nodesorder = 'storage'
2312
2317
2313 if (not self._storedeltachains and
2318 if (not self._storedeltachains and
2314 deltamode != repository.CG_DELTAMODE_PREV):
2319 deltamode != repository.CG_DELTAMODE_PREV):
2315 deltamode = repository.CG_DELTAMODE_FULL
2320 deltamode = repository.CG_DELTAMODE_FULL
2316
2321
2317 return storageutil.emitrevisions(
2322 return storageutil.emitrevisions(
2318 self, nodes, nodesorder, revlogrevisiondelta,
2323 self, nodes, nodesorder, revlogrevisiondelta,
2319 deltaparentfn=self.deltaparent,
2324 deltaparentfn=self.deltaparent,
2320 candeltafn=self.candelta,
2325 candeltafn=self.candelta,
2321 rawsizefn=self.rawsize,
2326 rawsizefn=self.rawsize,
2322 revdifffn=self.revdiff,
2327 revdifffn=self.revdiff,
2323 flagsfn=self.flags,
2328 flagsfn=self.flags,
2324 deltamode=deltamode,
2329 deltamode=deltamode,
2325 revisiondata=revisiondata,
2330 revisiondata=revisiondata,
2326 assumehaveparentrevisions=assumehaveparentrevisions)
2331 assumehaveparentrevisions=assumehaveparentrevisions)
2327
2332
2328 DELTAREUSEALWAYS = 'always'
2333 DELTAREUSEALWAYS = 'always'
2329 DELTAREUSESAMEREVS = 'samerevs'
2334 DELTAREUSESAMEREVS = 'samerevs'
2330 DELTAREUSENEVER = 'never'
2335 DELTAREUSENEVER = 'never'
2331
2336
2332 DELTAREUSEFULLADD = 'fulladd'
2337 DELTAREUSEFULLADD = 'fulladd'
2333
2338
2334 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2339 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2335
2340
2336 def clone(self, tr, destrevlog, addrevisioncb=None,
2341 def clone(self, tr, destrevlog, addrevisioncb=None,
2337 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2342 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2338 """Copy this revlog to another, possibly with format changes.
2343 """Copy this revlog to another, possibly with format changes.
2339
2344
2340 The destination revlog will contain the same revisions and nodes.
2345 The destination revlog will contain the same revisions and nodes.
2341 However, it may not be bit-for-bit identical due to e.g. delta encoding
2346 However, it may not be bit-for-bit identical due to e.g. delta encoding
2342 differences.
2347 differences.
2343
2348
2344 The ``deltareuse`` argument control how deltas from the existing revlog
2349 The ``deltareuse`` argument control how deltas from the existing revlog
2345 are preserved in the destination revlog. The argument can have the
2350 are preserved in the destination revlog. The argument can have the
2346 following values:
2351 following values:
2347
2352
2348 DELTAREUSEALWAYS
2353 DELTAREUSEALWAYS
2349 Deltas will always be reused (if possible), even if the destination
2354 Deltas will always be reused (if possible), even if the destination
2350 revlog would not select the same revisions for the delta. This is the
2355 revlog would not select the same revisions for the delta. This is the
2351 fastest mode of operation.
2356 fastest mode of operation.
2352 DELTAREUSESAMEREVS
2357 DELTAREUSESAMEREVS
2353 Deltas will be reused if the destination revlog would pick the same
2358 Deltas will be reused if the destination revlog would pick the same
2354 revisions for the delta. This mode strikes a balance between speed
2359 revisions for the delta. This mode strikes a balance between speed
2355 and optimization.
2360 and optimization.
2356 DELTAREUSENEVER
2361 DELTAREUSENEVER
2357 Deltas will never be reused. This is the slowest mode of execution.
2362 Deltas will never be reused. This is the slowest mode of execution.
2358 This mode can be used to recompute deltas (e.g. if the diff/delta
2363 This mode can be used to recompute deltas (e.g. if the diff/delta
2359 algorithm changes).
2364 algorithm changes).
2360 DELTAREUSEFULLADD
2365 DELTAREUSEFULLADD
2361 Revision will be re-added as if their were new content. This is
2366 Revision will be re-added as if their were new content. This is
2362 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2367 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
2363 eg: large file detection and handling.
2368 eg: large file detection and handling.
2364
2369
2365 Delta computation can be slow, so the choice of delta reuse policy can
2370 Delta computation can be slow, so the choice of delta reuse policy can
2366 significantly affect run time.
2371 significantly affect run time.
2367
2372
2368 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2373 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2369 two extremes. Deltas will be reused if they are appropriate. But if the
2374 two extremes. Deltas will be reused if they are appropriate. But if the
2370 delta could choose a better revision, it will do so. This means if you
2375 delta could choose a better revision, it will do so. This means if you
2371 are converting a non-generaldelta revlog to a generaldelta revlog,
2376 are converting a non-generaldelta revlog to a generaldelta revlog,
2372 deltas will be recomputed if the delta's parent isn't a parent of the
2377 deltas will be recomputed if the delta's parent isn't a parent of the
2373 revision.
2378 revision.
2374
2379
2375 In addition to the delta policy, the ``forcedeltabothparents``
2380 In addition to the delta policy, the ``forcedeltabothparents``
2376 argument controls whether to force compute deltas against both parents
2381 argument controls whether to force compute deltas against both parents
2377 for merges. By default, the current default is used.
2382 for merges. By default, the current default is used.
2378 """
2383 """
2379 if deltareuse not in self.DELTAREUSEALL:
2384 if deltareuse not in self.DELTAREUSEALL:
2380 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2385 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2381
2386
2382 if len(destrevlog):
2387 if len(destrevlog):
2383 raise ValueError(_('destination revlog is not empty'))
2388 raise ValueError(_('destination revlog is not empty'))
2384
2389
2385 if getattr(self, 'filteredrevs', None):
2390 if getattr(self, 'filteredrevs', None):
2386 raise ValueError(_('source revlog has filtered revisions'))
2391 raise ValueError(_('source revlog has filtered revisions'))
2387 if getattr(destrevlog, 'filteredrevs', None):
2392 if getattr(destrevlog, 'filteredrevs', None):
2388 raise ValueError(_('destination revlog has filtered revisions'))
2393 raise ValueError(_('destination revlog has filtered revisions'))
2389
2394
2390 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2395 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2391 # if possible.
2396 # if possible.
2392 oldlazydelta = destrevlog._lazydelta
2397 oldlazydelta = destrevlog._lazydelta
2393 oldlazydeltabase = destrevlog._lazydeltabase
2398 oldlazydeltabase = destrevlog._lazydeltabase
2394 oldamd = destrevlog._deltabothparents
2399 oldamd = destrevlog._deltabothparents
2395
2400
2396 try:
2401 try:
2397 if deltareuse == self.DELTAREUSEALWAYS:
2402 if deltareuse == self.DELTAREUSEALWAYS:
2398 destrevlog._lazydeltabase = True
2403 destrevlog._lazydeltabase = True
2399 destrevlog._lazydelta = True
2404 destrevlog._lazydelta = True
2400 elif deltareuse == self.DELTAREUSESAMEREVS:
2405 elif deltareuse == self.DELTAREUSESAMEREVS:
2401 destrevlog._lazydeltabase = False
2406 destrevlog._lazydeltabase = False
2402 destrevlog._lazydelta = True
2407 destrevlog._lazydelta = True
2403 elif deltareuse == self.DELTAREUSENEVER:
2408 elif deltareuse == self.DELTAREUSENEVER:
2404 destrevlog._lazydeltabase = False
2409 destrevlog._lazydeltabase = False
2405 destrevlog._lazydelta = False
2410 destrevlog._lazydelta = False
2406
2411
2407 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2412 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2408
2413
2409 self._clone(tr, destrevlog, addrevisioncb, deltareuse,
2414 self._clone(tr, destrevlog, addrevisioncb, deltareuse,
2410 forcedeltabothparents)
2415 forcedeltabothparents)
2411
2416
2412 finally:
2417 finally:
2413 destrevlog._lazydelta = oldlazydelta
2418 destrevlog._lazydelta = oldlazydelta
2414 destrevlog._lazydeltabase = oldlazydeltabase
2419 destrevlog._lazydeltabase = oldlazydeltabase
2415 destrevlog._deltabothparents = oldamd
2420 destrevlog._deltabothparents = oldamd
2416
2421
2417 def _clone(self, tr, destrevlog, addrevisioncb, deltareuse,
2422 def _clone(self, tr, destrevlog, addrevisioncb, deltareuse,
2418 forcedeltabothparents):
2423 forcedeltabothparents):
2419 """perform the core duty of `revlog.clone` after parameter processing"""
2424 """perform the core duty of `revlog.clone` after parameter processing"""
2420 deltacomputer = deltautil.deltacomputer(destrevlog)
2425 deltacomputer = deltautil.deltacomputer(destrevlog)
2421 index = self.index
2426 index = self.index
2422 for rev in self:
2427 for rev in self:
2423 entry = index[rev]
2428 entry = index[rev]
2424
2429
2425 # Some classes override linkrev to take filtered revs into
2430 # Some classes override linkrev to take filtered revs into
2426 # account. Use raw entry from index.
2431 # account. Use raw entry from index.
2427 flags = entry[0] & 0xffff
2432 flags = entry[0] & 0xffff
2428 linkrev = entry[4]
2433 linkrev = entry[4]
2429 p1 = index[entry[5]][7]
2434 p1 = index[entry[5]][7]
2430 p2 = index[entry[6]][7]
2435 p2 = index[entry[6]][7]
2431 node = entry[7]
2436 node = entry[7]
2432
2437
2433 # (Possibly) reuse the delta from the revlog if allowed and
2438 # (Possibly) reuse the delta from the revlog if allowed and
2434 # the revlog chunk is a delta.
2439 # the revlog chunk is a delta.
2435 cachedelta = None
2440 cachedelta = None
2436 rawtext = None
2441 rawtext = None
2437 if deltareuse == self.DELTAREUSEFULLADD:
2442 if deltareuse == self.DELTAREUSEFULLADD:
2438 text = self.revision(rev)
2443 text = self.revision(rev)
2439 destrevlog.addrevision(text, tr, linkrev, p1, p2,
2444 destrevlog.addrevision(text, tr, linkrev, p1, p2,
2440 cachedelta=cachedelta,
2445 cachedelta=cachedelta,
2441 node=node, flags=flags,
2446 node=node, flags=flags,
2442 deltacomputer=deltacomputer)
2447 deltacomputer=deltacomputer)
2443 else:
2448 else:
2444 if destrevlog._lazydelta:
2449 if destrevlog._lazydelta:
2445 dp = self.deltaparent(rev)
2450 dp = self.deltaparent(rev)
2446 if dp != nullrev:
2451 if dp != nullrev:
2447 cachedelta = (dp, bytes(self._chunk(rev)))
2452 cachedelta = (dp, bytes(self._chunk(rev)))
2448
2453
2449 if not cachedelta:
2454 if not cachedelta:
2450 rawtext = self.rawdata(rev)
2455 rawtext = self.rawdata(rev)
2451
2456
2452 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2457 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2453 checkambig=False)
2458 checkambig=False)
2454 dfh = None
2459 dfh = None
2455 if not destrevlog._inline:
2460 if not destrevlog._inline:
2456 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2461 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2457 try:
2462 try:
2458 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2463 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2459 p2, flags, cachedelta, ifh, dfh,
2464 p2, flags, cachedelta, ifh, dfh,
2460 deltacomputer=deltacomputer)
2465 deltacomputer=deltacomputer)
2461 finally:
2466 finally:
2462 if dfh:
2467 if dfh:
2463 dfh.close()
2468 dfh.close()
2464 ifh.close()
2469 ifh.close()
2465
2470
2466 if addrevisioncb:
2471 if addrevisioncb:
2467 addrevisioncb(self, rev, node)
2472 addrevisioncb(self, rev, node)
2468
2473
2469 def censorrevision(self, tr, censornode, tombstone=b''):
2474 def censorrevision(self, tr, censornode, tombstone=b''):
2470 if (self.version & 0xFFFF) == REVLOGV0:
2475 if (self.version & 0xFFFF) == REVLOGV0:
2471 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2476 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2472 self.version)
2477 self.version)
2473
2478
2474 censorrev = self.rev(censornode)
2479 censorrev = self.rev(censornode)
2475 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2480 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2476
2481
2477 if len(tombstone) > self.rawsize(censorrev):
2482 if len(tombstone) > self.rawsize(censorrev):
2478 raise error.Abort(_('censor tombstone must be no longer than '
2483 raise error.Abort(_('censor tombstone must be no longer than '
2479 'censored data'))
2484 'censored data'))
2480
2485
2481 # Rewriting the revlog in place is hard. Our strategy for censoring is
2486 # Rewriting the revlog in place is hard. Our strategy for censoring is
2482 # to create a new revlog, copy all revisions to it, then replace the
2487 # to create a new revlog, copy all revisions to it, then replace the
2483 # revlogs on transaction close.
2488 # revlogs on transaction close.
2484
2489
2485 newindexfile = self.indexfile + b'.tmpcensored'
2490 newindexfile = self.indexfile + b'.tmpcensored'
2486 newdatafile = self.datafile + b'.tmpcensored'
2491 newdatafile = self.datafile + b'.tmpcensored'
2487
2492
2488 # This is a bit dangerous. We could easily have a mismatch of state.
2493 # This is a bit dangerous. We could easily have a mismatch of state.
2489 newrl = revlog(self.opener, newindexfile, newdatafile,
2494 newrl = revlog(self.opener, newindexfile, newdatafile,
2490 censorable=True)
2495 censorable=True)
2491 newrl.version = self.version
2496 newrl.version = self.version
2492 newrl._generaldelta = self._generaldelta
2497 newrl._generaldelta = self._generaldelta
2493 newrl._io = self._io
2498 newrl._io = self._io
2494
2499
2495 for rev in self.revs():
2500 for rev in self.revs():
2496 node = self.node(rev)
2501 node = self.node(rev)
2497 p1, p2 = self.parents(node)
2502 p1, p2 = self.parents(node)
2498
2503
2499 if rev == censorrev:
2504 if rev == censorrev:
2500 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2505 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2501 p1, p2, censornode, REVIDX_ISCENSORED)
2506 p1, p2, censornode, REVIDX_ISCENSORED)
2502
2507
2503 if newrl.deltaparent(rev) != nullrev:
2508 if newrl.deltaparent(rev) != nullrev:
2504 raise error.Abort(_('censored revision stored as delta; '
2509 raise error.Abort(_('censored revision stored as delta; '
2505 'cannot censor'),
2510 'cannot censor'),
2506 hint=_('censoring of revlogs is not '
2511 hint=_('censoring of revlogs is not '
2507 'fully implemented; please report '
2512 'fully implemented; please report '
2508 'this bug'))
2513 'this bug'))
2509 continue
2514 continue
2510
2515
2511 if self.iscensored(rev):
2516 if self.iscensored(rev):
2512 if self.deltaparent(rev) != nullrev:
2517 if self.deltaparent(rev) != nullrev:
2513 raise error.Abort(_('cannot censor due to censored '
2518 raise error.Abort(_('cannot censor due to censored '
2514 'revision having delta stored'))
2519 'revision having delta stored'))
2515 rawtext = self._chunk(rev)
2520 rawtext = self._chunk(rev)
2516 else:
2521 else:
2517 rawtext = self.rawdata(rev)
2522 rawtext = self.rawdata(rev)
2518
2523
2519 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2524 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2520 self.flags(rev))
2525 self.flags(rev))
2521
2526
2522 tr.addbackup(self.indexfile, location='store')
2527 tr.addbackup(self.indexfile, location='store')
2523 if not self._inline:
2528 if not self._inline:
2524 tr.addbackup(self.datafile, location='store')
2529 tr.addbackup(self.datafile, location='store')
2525
2530
2526 self.opener.rename(newrl.indexfile, self.indexfile)
2531 self.opener.rename(newrl.indexfile, self.indexfile)
2527 if not self._inline:
2532 if not self._inline:
2528 self.opener.rename(newrl.datafile, self.datafile)
2533 self.opener.rename(newrl.datafile, self.datafile)
2529
2534
2530 self.clearcaches()
2535 self.clearcaches()
2531 self._loadindex()
2536 self._loadindex()
2532
2537
2533 def verifyintegrity(self, state):
2538 def verifyintegrity(self, state):
2534 """Verifies the integrity of the revlog.
2539 """Verifies the integrity of the revlog.
2535
2540
2536 Yields ``revlogproblem`` instances describing problems that are
2541 Yields ``revlogproblem`` instances describing problems that are
2537 found.
2542 found.
2538 """
2543 """
2539 dd, di = self.checksize()
2544 dd, di = self.checksize()
2540 if dd:
2545 if dd:
2541 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2546 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2542 if di:
2547 if di:
2543 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2548 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2544
2549
2545 version = self.version & 0xFFFF
2550 version = self.version & 0xFFFF
2546
2551
2547 # The verifier tells us what version revlog we should be.
2552 # The verifier tells us what version revlog we should be.
2548 if version != state['expectedversion']:
2553 if version != state['expectedversion']:
2549 yield revlogproblem(
2554 yield revlogproblem(
2550 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2555 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2551 (self.indexfile, version, state['expectedversion']))
2556 (self.indexfile, version, state['expectedversion']))
2552
2557
2553 state['skipread'] = set()
2558 state['skipread'] = set()
2554
2559
2555 for rev in self:
2560 for rev in self:
2556 node = self.node(rev)
2561 node = self.node(rev)
2557
2562
2558 # Verify contents. 4 cases to care about:
2563 # Verify contents. 4 cases to care about:
2559 #
2564 #
2560 # common: the most common case
2565 # common: the most common case
2561 # rename: with a rename
2566 # rename: with a rename
2562 # meta: file content starts with b'\1\n', the metadata
2567 # meta: file content starts with b'\1\n', the metadata
2563 # header defined in filelog.py, but without a rename
2568 # header defined in filelog.py, but without a rename
2564 # ext: content stored externally
2569 # ext: content stored externally
2565 #
2570 #
2566 # More formally, their differences are shown below:
2571 # More formally, their differences are shown below:
2567 #
2572 #
2568 # | common | rename | meta | ext
2573 # | common | rename | meta | ext
2569 # -------------------------------------------------------
2574 # -------------------------------------------------------
2570 # flags() | 0 | 0 | 0 | not 0
2575 # flags() | 0 | 0 | 0 | not 0
2571 # renamed() | False | True | False | ?
2576 # renamed() | False | True | False | ?
2572 # rawtext[0:2]=='\1\n'| False | True | True | ?
2577 # rawtext[0:2]=='\1\n'| False | True | True | ?
2573 #
2578 #
2574 # "rawtext" means the raw text stored in revlog data, which
2579 # "rawtext" means the raw text stored in revlog data, which
2575 # could be retrieved by "rawdata(rev)". "text"
2580 # could be retrieved by "rawdata(rev)". "text"
2576 # mentioned below is "revision(rev)".
2581 # mentioned below is "revision(rev)".
2577 #
2582 #
2578 # There are 3 different lengths stored physically:
2583 # There are 3 different lengths stored physically:
2579 # 1. L1: rawsize, stored in revlog index
2584 # 1. L1: rawsize, stored in revlog index
2580 # 2. L2: len(rawtext), stored in revlog data
2585 # 2. L2: len(rawtext), stored in revlog data
2581 # 3. L3: len(text), stored in revlog data if flags==0, or
2586 # 3. L3: len(text), stored in revlog data if flags==0, or
2582 # possibly somewhere else if flags!=0
2587 # possibly somewhere else if flags!=0
2583 #
2588 #
2584 # L1 should be equal to L2. L3 could be different from them.
2589 # L1 should be equal to L2. L3 could be different from them.
2585 # "text" may or may not affect commit hash depending on flag
2590 # "text" may or may not affect commit hash depending on flag
2586 # processors (see flagutil.addflagprocessor).
2591 # processors (see flagutil.addflagprocessor).
2587 #
2592 #
2588 # | common | rename | meta | ext
2593 # | common | rename | meta | ext
2589 # -------------------------------------------------
2594 # -------------------------------------------------
2590 # rawsize() | L1 | L1 | L1 | L1
2595 # rawsize() | L1 | L1 | L1 | L1
2591 # size() | L1 | L2-LM | L1(*) | L1 (?)
2596 # size() | L1 | L2-LM | L1(*) | L1 (?)
2592 # len(rawtext) | L2 | L2 | L2 | L2
2597 # len(rawtext) | L2 | L2 | L2 | L2
2593 # len(text) | L2 | L2 | L2 | L3
2598 # len(text) | L2 | L2 | L2 | L3
2594 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2599 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2595 #
2600 #
2596 # LM: length of metadata, depending on rawtext
2601 # LM: length of metadata, depending on rawtext
2597 # (*): not ideal, see comment in filelog.size
2602 # (*): not ideal, see comment in filelog.size
2598 # (?): could be "- len(meta)" if the resolved content has
2603 # (?): could be "- len(meta)" if the resolved content has
2599 # rename metadata
2604 # rename metadata
2600 #
2605 #
2601 # Checks needed to be done:
2606 # Checks needed to be done:
2602 # 1. length check: L1 == L2, in all cases.
2607 # 1. length check: L1 == L2, in all cases.
2603 # 2. hash check: depending on flag processor, we may need to
2608 # 2. hash check: depending on flag processor, we may need to
2604 # use either "text" (external), or "rawtext" (in revlog).
2609 # use either "text" (external), or "rawtext" (in revlog).
2605
2610
2606 try:
2611 try:
2607 skipflags = state.get('skipflags', 0)
2612 skipflags = state.get('skipflags', 0)
2608 if skipflags:
2613 if skipflags:
2609 skipflags &= self.flags(rev)
2614 skipflags &= self.flags(rev)
2610
2615
2611 if skipflags:
2616 if skipflags:
2612 state['skipread'].add(node)
2617 state['skipread'].add(node)
2613 else:
2618 else:
2614 # Side-effect: read content and verify hash.
2619 # Side-effect: read content and verify hash.
2615 self.revision(node)
2620 self.revision(node)
2616
2621
2617 l1 = self.rawsize(rev)
2622 l1 = self.rawsize(rev)
2618 l2 = len(self.rawdata(node))
2623 l2 = len(self.rawdata(node))
2619
2624
2620 if l1 != l2:
2625 if l1 != l2:
2621 yield revlogproblem(
2626 yield revlogproblem(
2622 error=_('unpacked size is %d, %d expected') % (l2, l1),
2627 error=_('unpacked size is %d, %d expected') % (l2, l1),
2623 node=node)
2628 node=node)
2624
2629
2625 except error.CensoredNodeError:
2630 except error.CensoredNodeError:
2626 if state['erroroncensored']:
2631 if state['erroroncensored']:
2627 yield revlogproblem(error=_('censored file data'),
2632 yield revlogproblem(error=_('censored file data'),
2628 node=node)
2633 node=node)
2629 state['skipread'].add(node)
2634 state['skipread'].add(node)
2630 except Exception as e:
2635 except Exception as e:
2631 yield revlogproblem(
2636 yield revlogproblem(
2632 error=_('unpacking %s: %s') % (short(node),
2637 error=_('unpacking %s: %s') % (short(node),
2633 stringutil.forcebytestr(e)),
2638 stringutil.forcebytestr(e)),
2634 node=node)
2639 node=node)
2635 state['skipread'].add(node)
2640 state['skipread'].add(node)
2636
2641
2637 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2642 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2638 revisionscount=False, trackedsize=False,
2643 revisionscount=False, trackedsize=False,
2639 storedsize=False):
2644 storedsize=False):
2640 d = {}
2645 d = {}
2641
2646
2642 if exclusivefiles:
2647 if exclusivefiles:
2643 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2648 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2644 if not self._inline:
2649 if not self._inline:
2645 d['exclusivefiles'].append((self.opener, self.datafile))
2650 d['exclusivefiles'].append((self.opener, self.datafile))
2646
2651
2647 if sharedfiles:
2652 if sharedfiles:
2648 d['sharedfiles'] = []
2653 d['sharedfiles'] = []
2649
2654
2650 if revisionscount:
2655 if revisionscount:
2651 d['revisionscount'] = len(self)
2656 d['revisionscount'] = len(self)
2652
2657
2653 if trackedsize:
2658 if trackedsize:
2654 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2659 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2655
2660
2656 if storedsize:
2661 if storedsize:
2657 d['storedsize'] = sum(self.opener.stat(path).st_size
2662 d['storedsize'] = sum(self.opener.stat(path).st_size
2658 for path in self.files())
2663 for path in self.files())
2659
2664
2660 return d
2665 return d
General Comments 0
You need to be logged in to leave comments. Login now