##// END OF EJS Templates
sparse-read: skip gaps too small to be worth splitting...
Paul Morelle -
r34882:8c9b08a0 default
parent child Browse files
Show More
@@ -1,1124 +1,1124 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in configtable.items():
20 for section, items in configtable.items():
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 return functools.partial(_register, configtable)
109 return functools.partial(_register, configtable)
110
110
111 coreconfigitem = getitemregister(coreitems)
111 coreconfigitem = getitemregister(coreitems)
112
112
113 coreconfigitem('alias', '.*',
113 coreconfigitem('alias', '.*',
114 default=None,
114 default=None,
115 generic=True,
115 generic=True,
116 )
116 )
117 coreconfigitem('annotate', 'nodates',
117 coreconfigitem('annotate', 'nodates',
118 default=False,
118 default=False,
119 )
119 )
120 coreconfigitem('annotate', 'showfunc',
120 coreconfigitem('annotate', 'showfunc',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'unified',
123 coreconfigitem('annotate', 'unified',
124 default=None,
124 default=None,
125 )
125 )
126 coreconfigitem('annotate', 'git',
126 coreconfigitem('annotate', 'git',
127 default=False,
127 default=False,
128 )
128 )
129 coreconfigitem('annotate', 'ignorews',
129 coreconfigitem('annotate', 'ignorews',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorewsamount',
132 coreconfigitem('annotate', 'ignorewsamount',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignoreblanklines',
135 coreconfigitem('annotate', 'ignoreblanklines',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignorewseol',
138 coreconfigitem('annotate', 'ignorewseol',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'nobinary',
141 coreconfigitem('annotate', 'nobinary',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'noprefix',
144 coreconfigitem('annotate', 'noprefix',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('auth', 'cookiefile',
147 coreconfigitem('auth', 'cookiefile',
148 default=None,
148 default=None,
149 )
149 )
150 # bookmarks.pushing: internal hack for discovery
150 # bookmarks.pushing: internal hack for discovery
151 coreconfigitem('bookmarks', 'pushing',
151 coreconfigitem('bookmarks', 'pushing',
152 default=list,
152 default=list,
153 )
153 )
154 # bundle.mainreporoot: internal hack for bundlerepo
154 # bundle.mainreporoot: internal hack for bundlerepo
155 coreconfigitem('bundle', 'mainreporoot',
155 coreconfigitem('bundle', 'mainreporoot',
156 default='',
156 default='',
157 )
157 )
158 # bundle.reorder: experimental config
158 # bundle.reorder: experimental config
159 coreconfigitem('bundle', 'reorder',
159 coreconfigitem('bundle', 'reorder',
160 default='auto',
160 default='auto',
161 )
161 )
162 coreconfigitem('censor', 'policy',
162 coreconfigitem('censor', 'policy',
163 default='abort',
163 default='abort',
164 )
164 )
165 coreconfigitem('chgserver', 'idletimeout',
165 coreconfigitem('chgserver', 'idletimeout',
166 default=3600,
166 default=3600,
167 )
167 )
168 coreconfigitem('chgserver', 'skiphash',
168 coreconfigitem('chgserver', 'skiphash',
169 default=False,
169 default=False,
170 )
170 )
171 coreconfigitem('cmdserver', 'log',
171 coreconfigitem('cmdserver', 'log',
172 default=None,
172 default=None,
173 )
173 )
174 coreconfigitem('color', '.*',
174 coreconfigitem('color', '.*',
175 default=None,
175 default=None,
176 generic=True,
176 generic=True,
177 )
177 )
178 coreconfigitem('color', 'mode',
178 coreconfigitem('color', 'mode',
179 default='auto',
179 default='auto',
180 )
180 )
181 coreconfigitem('color', 'pagermode',
181 coreconfigitem('color', 'pagermode',
182 default=dynamicdefault,
182 default=dynamicdefault,
183 )
183 )
184 coreconfigitem('commands', 'status.relative',
184 coreconfigitem('commands', 'status.relative',
185 default=False,
185 default=False,
186 )
186 )
187 coreconfigitem('commands', 'status.skipstates',
187 coreconfigitem('commands', 'status.skipstates',
188 default=[],
188 default=[],
189 )
189 )
190 coreconfigitem('commands', 'status.verbose',
190 coreconfigitem('commands', 'status.verbose',
191 default=False,
191 default=False,
192 )
192 )
193 coreconfigitem('commands', 'update.check',
193 coreconfigitem('commands', 'update.check',
194 default=None,
194 default=None,
195 # Deprecated, remove after 4.4 release
195 # Deprecated, remove after 4.4 release
196 alias=[('experimental', 'updatecheck')]
196 alias=[('experimental', 'updatecheck')]
197 )
197 )
198 coreconfigitem('commands', 'update.requiredest',
198 coreconfigitem('commands', 'update.requiredest',
199 default=False,
199 default=False,
200 )
200 )
201 coreconfigitem('committemplate', '.*',
201 coreconfigitem('committemplate', '.*',
202 default=None,
202 default=None,
203 generic=True,
203 generic=True,
204 )
204 )
205 coreconfigitem('debug', 'dirstate.delaywrite',
205 coreconfigitem('debug', 'dirstate.delaywrite',
206 default=0,
206 default=0,
207 )
207 )
208 coreconfigitem('defaults', '.*',
208 coreconfigitem('defaults', '.*',
209 default=None,
209 default=None,
210 generic=True,
210 generic=True,
211 )
211 )
212 coreconfigitem('devel', 'all-warnings',
212 coreconfigitem('devel', 'all-warnings',
213 default=False,
213 default=False,
214 )
214 )
215 coreconfigitem('devel', 'bundle2.debug',
215 coreconfigitem('devel', 'bundle2.debug',
216 default=False,
216 default=False,
217 )
217 )
218 coreconfigitem('devel', 'cache-vfs',
218 coreconfigitem('devel', 'cache-vfs',
219 default=None,
219 default=None,
220 )
220 )
221 coreconfigitem('devel', 'check-locks',
221 coreconfigitem('devel', 'check-locks',
222 default=False,
222 default=False,
223 )
223 )
224 coreconfigitem('devel', 'check-relroot',
224 coreconfigitem('devel', 'check-relroot',
225 default=False,
225 default=False,
226 )
226 )
227 coreconfigitem('devel', 'default-date',
227 coreconfigitem('devel', 'default-date',
228 default=None,
228 default=None,
229 )
229 )
230 coreconfigitem('devel', 'deprec-warn',
230 coreconfigitem('devel', 'deprec-warn',
231 default=False,
231 default=False,
232 )
232 )
233 coreconfigitem('devel', 'disableloaddefaultcerts',
233 coreconfigitem('devel', 'disableloaddefaultcerts',
234 default=False,
234 default=False,
235 )
235 )
236 coreconfigitem('devel', 'warn-empty-changegroup',
236 coreconfigitem('devel', 'warn-empty-changegroup',
237 default=False,
237 default=False,
238 )
238 )
239 coreconfigitem('devel', 'legacy.exchange',
239 coreconfigitem('devel', 'legacy.exchange',
240 default=list,
240 default=list,
241 )
241 )
242 coreconfigitem('devel', 'servercafile',
242 coreconfigitem('devel', 'servercafile',
243 default='',
243 default='',
244 )
244 )
245 coreconfigitem('devel', 'serverexactprotocol',
245 coreconfigitem('devel', 'serverexactprotocol',
246 default='',
246 default='',
247 )
247 )
248 coreconfigitem('devel', 'serverrequirecert',
248 coreconfigitem('devel', 'serverrequirecert',
249 default=False,
249 default=False,
250 )
250 )
251 coreconfigitem('devel', 'strip-obsmarkers',
251 coreconfigitem('devel', 'strip-obsmarkers',
252 default=True,
252 default=True,
253 )
253 )
254 coreconfigitem('devel', 'warn-config',
254 coreconfigitem('devel', 'warn-config',
255 default=None,
255 default=None,
256 )
256 )
257 coreconfigitem('devel', 'warn-config-default',
257 coreconfigitem('devel', 'warn-config-default',
258 default=None,
258 default=None,
259 )
259 )
260 coreconfigitem('devel', 'user.obsmarker',
260 coreconfigitem('devel', 'user.obsmarker',
261 default=None,
261 default=None,
262 )
262 )
263 coreconfigitem('devel', 'warn-config-unknown',
263 coreconfigitem('devel', 'warn-config-unknown',
264 default=None,
264 default=None,
265 )
265 )
266 coreconfigitem('diff', 'nodates',
266 coreconfigitem('diff', 'nodates',
267 default=False,
267 default=False,
268 )
268 )
269 coreconfigitem('diff', 'showfunc',
269 coreconfigitem('diff', 'showfunc',
270 default=False,
270 default=False,
271 )
271 )
272 coreconfigitem('diff', 'unified',
272 coreconfigitem('diff', 'unified',
273 default=None,
273 default=None,
274 )
274 )
275 coreconfigitem('diff', 'git',
275 coreconfigitem('diff', 'git',
276 default=False,
276 default=False,
277 )
277 )
278 coreconfigitem('diff', 'ignorews',
278 coreconfigitem('diff', 'ignorews',
279 default=False,
279 default=False,
280 )
280 )
281 coreconfigitem('diff', 'ignorewsamount',
281 coreconfigitem('diff', 'ignorewsamount',
282 default=False,
282 default=False,
283 )
283 )
284 coreconfigitem('diff', 'ignoreblanklines',
284 coreconfigitem('diff', 'ignoreblanklines',
285 default=False,
285 default=False,
286 )
286 )
287 coreconfigitem('diff', 'ignorewseol',
287 coreconfigitem('diff', 'ignorewseol',
288 default=False,
288 default=False,
289 )
289 )
290 coreconfigitem('diff', 'nobinary',
290 coreconfigitem('diff', 'nobinary',
291 default=False,
291 default=False,
292 )
292 )
293 coreconfigitem('diff', 'noprefix',
293 coreconfigitem('diff', 'noprefix',
294 default=False,
294 default=False,
295 )
295 )
296 coreconfigitem('email', 'bcc',
296 coreconfigitem('email', 'bcc',
297 default=None,
297 default=None,
298 )
298 )
299 coreconfigitem('email', 'cc',
299 coreconfigitem('email', 'cc',
300 default=None,
300 default=None,
301 )
301 )
302 coreconfigitem('email', 'charsets',
302 coreconfigitem('email', 'charsets',
303 default=list,
303 default=list,
304 )
304 )
305 coreconfigitem('email', 'from',
305 coreconfigitem('email', 'from',
306 default=None,
306 default=None,
307 )
307 )
308 coreconfigitem('email', 'method',
308 coreconfigitem('email', 'method',
309 default='smtp',
309 default='smtp',
310 )
310 )
311 coreconfigitem('email', 'reply-to',
311 coreconfigitem('email', 'reply-to',
312 default=None,
312 default=None,
313 )
313 )
314 coreconfigitem('experimental', 'archivemetatemplate',
314 coreconfigitem('experimental', 'archivemetatemplate',
315 default=dynamicdefault,
315 default=dynamicdefault,
316 )
316 )
317 coreconfigitem('experimental', 'bundle-phases',
317 coreconfigitem('experimental', 'bundle-phases',
318 default=False,
318 default=False,
319 )
319 )
320 coreconfigitem('experimental', 'bundle2-advertise',
320 coreconfigitem('experimental', 'bundle2-advertise',
321 default=True,
321 default=True,
322 )
322 )
323 coreconfigitem('experimental', 'bundle2-output-capture',
323 coreconfigitem('experimental', 'bundle2-output-capture',
324 default=False,
324 default=False,
325 )
325 )
326 coreconfigitem('experimental', 'bundle2.pushback',
326 coreconfigitem('experimental', 'bundle2.pushback',
327 default=False,
327 default=False,
328 )
328 )
329 coreconfigitem('experimental', 'bundle2lazylocking',
329 coreconfigitem('experimental', 'bundle2lazylocking',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('experimental', 'bundlecomplevel',
332 coreconfigitem('experimental', 'bundlecomplevel',
333 default=None,
333 default=None,
334 )
334 )
335 coreconfigitem('experimental', 'changegroup3',
335 coreconfigitem('experimental', 'changegroup3',
336 default=False,
336 default=False,
337 )
337 )
338 coreconfigitem('experimental', 'clientcompressionengines',
338 coreconfigitem('experimental', 'clientcompressionengines',
339 default=list,
339 default=list,
340 )
340 )
341 coreconfigitem('experimental', 'copytrace',
341 coreconfigitem('experimental', 'copytrace',
342 default='on',
342 default='on',
343 )
343 )
344 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
344 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
345 default=100,
345 default=100,
346 )
346 )
347 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
347 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
348 default=100,
348 default=100,
349 )
349 )
350 coreconfigitem('experimental', 'crecordtest',
350 coreconfigitem('experimental', 'crecordtest',
351 default=None,
351 default=None,
352 )
352 )
353 coreconfigitem('experimental', 'editortmpinhg',
353 coreconfigitem('experimental', 'editortmpinhg',
354 default=False,
354 default=False,
355 )
355 )
356 coreconfigitem('experimental', 'evolution',
356 coreconfigitem('experimental', 'evolution',
357 default=list,
357 default=list,
358 )
358 )
359 coreconfigitem('experimental', 'evolution.allowdivergence',
359 coreconfigitem('experimental', 'evolution.allowdivergence',
360 default=False,
360 default=False,
361 alias=[('experimental', 'allowdivergence')]
361 alias=[('experimental', 'allowdivergence')]
362 )
362 )
363 coreconfigitem('experimental', 'evolution.allowunstable',
363 coreconfigitem('experimental', 'evolution.allowunstable',
364 default=None,
364 default=None,
365 )
365 )
366 coreconfigitem('experimental', 'evolution.createmarkers',
366 coreconfigitem('experimental', 'evolution.createmarkers',
367 default=None,
367 default=None,
368 )
368 )
369 coreconfigitem('experimental', 'evolution.exchange',
369 coreconfigitem('experimental', 'evolution.exchange',
370 default=None,
370 default=None,
371 )
371 )
372 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
372 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
373 default=False,
373 default=False,
374 )
374 )
375 coreconfigitem('experimental', 'evolution.track-operation',
375 coreconfigitem('experimental', 'evolution.track-operation',
376 default=True,
376 default=True,
377 )
377 )
378 coreconfigitem('experimental', 'maxdeltachainspan',
378 coreconfigitem('experimental', 'maxdeltachainspan',
379 default=-1,
379 default=-1,
380 )
380 )
381 coreconfigitem('experimental', 'mmapindexthreshold',
381 coreconfigitem('experimental', 'mmapindexthreshold',
382 default=None,
382 default=None,
383 )
383 )
384 coreconfigitem('experimental', 'nonnormalparanoidcheck',
384 coreconfigitem('experimental', 'nonnormalparanoidcheck',
385 default=False,
385 default=False,
386 )
386 )
387 coreconfigitem('experimental', 'effect-flags',
387 coreconfigitem('experimental', 'effect-flags',
388 default=False,
388 default=False,
389 )
389 )
390 coreconfigitem('experimental', 'exportableenviron',
390 coreconfigitem('experimental', 'exportableenviron',
391 default=list,
391 default=list,
392 )
392 )
393 coreconfigitem('experimental', 'extendedheader.index',
393 coreconfigitem('experimental', 'extendedheader.index',
394 default=None,
394 default=None,
395 )
395 )
396 coreconfigitem('experimental', 'extendedheader.similarity',
396 coreconfigitem('experimental', 'extendedheader.similarity',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem('experimental', 'format.compression',
399 coreconfigitem('experimental', 'format.compression',
400 default='zlib',
400 default='zlib',
401 )
401 )
402 coreconfigitem('experimental', 'graphshorten',
402 coreconfigitem('experimental', 'graphshorten',
403 default=False,
403 default=False,
404 )
404 )
405 coreconfigitem('experimental', 'graphstyle.parent',
405 coreconfigitem('experimental', 'graphstyle.parent',
406 default=dynamicdefault,
406 default=dynamicdefault,
407 )
407 )
408 coreconfigitem('experimental', 'graphstyle.missing',
408 coreconfigitem('experimental', 'graphstyle.missing',
409 default=dynamicdefault,
409 default=dynamicdefault,
410 )
410 )
411 coreconfigitem('experimental', 'graphstyle.grandparent',
411 coreconfigitem('experimental', 'graphstyle.grandparent',
412 default=dynamicdefault,
412 default=dynamicdefault,
413 )
413 )
414 coreconfigitem('experimental', 'hook-track-tags',
414 coreconfigitem('experimental', 'hook-track-tags',
415 default=False,
415 default=False,
416 )
416 )
417 coreconfigitem('experimental', 'httppostargs',
417 coreconfigitem('experimental', 'httppostargs',
418 default=False,
418 default=False,
419 )
419 )
420 coreconfigitem('experimental', 'manifestv2',
420 coreconfigitem('experimental', 'manifestv2',
421 default=False,
421 default=False,
422 )
422 )
423 coreconfigitem('experimental', 'mergedriver',
423 coreconfigitem('experimental', 'mergedriver',
424 default=None,
424 default=None,
425 )
425 )
426 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
426 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
427 default=False,
427 default=False,
428 )
428 )
429 coreconfigitem('experimental', 'rebase.multidest',
429 coreconfigitem('experimental', 'rebase.multidest',
430 default=False,
430 default=False,
431 )
431 )
432 coreconfigitem('experimental', 'revertalternateinteractivemode',
432 coreconfigitem('experimental', 'revertalternateinteractivemode',
433 default=True,
433 default=True,
434 )
434 )
435 coreconfigitem('experimental', 'revlogv2',
435 coreconfigitem('experimental', 'revlogv2',
436 default=None,
436 default=None,
437 )
437 )
438 coreconfigitem('experimental', 'spacemovesdown',
438 coreconfigitem('experimental', 'spacemovesdown',
439 default=False,
439 default=False,
440 )
440 )
441 coreconfigitem('experimental', 'sparse-read',
441 coreconfigitem('experimental', 'sparse-read',
442 default=False,
442 default=False,
443 )
443 )
444 coreconfigitem('experimental', 'sparse-read.density-threshold',
444 coreconfigitem('experimental', 'sparse-read.density-threshold',
445 default=0.25,
445 default=0.25,
446 )
446 )
447 coreconfigitem('experimental', 'sparse-read.min-block-size',
447 coreconfigitem('experimental', 'sparse-read.min-gap-size',
448 default='256K',
448 default='256K',
449 )
449 )
450 coreconfigitem('experimental', 'treemanifest',
450 coreconfigitem('experimental', 'treemanifest',
451 default=False,
451 default=False,
452 )
452 )
453 coreconfigitem('extensions', '.*',
453 coreconfigitem('extensions', '.*',
454 default=None,
454 default=None,
455 generic=True,
455 generic=True,
456 )
456 )
457 coreconfigitem('extdata', '.*',
457 coreconfigitem('extdata', '.*',
458 default=None,
458 default=None,
459 generic=True,
459 generic=True,
460 )
460 )
461 coreconfigitem('format', 'aggressivemergedeltas',
461 coreconfigitem('format', 'aggressivemergedeltas',
462 default=False,
462 default=False,
463 )
463 )
464 coreconfigitem('format', 'chunkcachesize',
464 coreconfigitem('format', 'chunkcachesize',
465 default=None,
465 default=None,
466 )
466 )
467 coreconfigitem('format', 'dotencode',
467 coreconfigitem('format', 'dotencode',
468 default=True,
468 default=True,
469 )
469 )
470 coreconfigitem('format', 'generaldelta',
470 coreconfigitem('format', 'generaldelta',
471 default=False,
471 default=False,
472 )
472 )
473 coreconfigitem('format', 'manifestcachesize',
473 coreconfigitem('format', 'manifestcachesize',
474 default=None,
474 default=None,
475 )
475 )
476 coreconfigitem('format', 'maxchainlen',
476 coreconfigitem('format', 'maxchainlen',
477 default=None,
477 default=None,
478 )
478 )
479 coreconfigitem('format', 'obsstore-version',
479 coreconfigitem('format', 'obsstore-version',
480 default=None,
480 default=None,
481 )
481 )
482 coreconfigitem('format', 'usefncache',
482 coreconfigitem('format', 'usefncache',
483 default=True,
483 default=True,
484 )
484 )
485 coreconfigitem('format', 'usegeneraldelta',
485 coreconfigitem('format', 'usegeneraldelta',
486 default=True,
486 default=True,
487 )
487 )
488 coreconfigitem('format', 'usestore',
488 coreconfigitem('format', 'usestore',
489 default=True,
489 default=True,
490 )
490 )
491 coreconfigitem('hooks', '.*',
491 coreconfigitem('hooks', '.*',
492 default=dynamicdefault,
492 default=dynamicdefault,
493 generic=True,
493 generic=True,
494 )
494 )
495 coreconfigitem('hgweb-paths', '.*',
495 coreconfigitem('hgweb-paths', '.*',
496 default=list,
496 default=list,
497 generic=True,
497 generic=True,
498 )
498 )
499 coreconfigitem('hostfingerprints', '.*',
499 coreconfigitem('hostfingerprints', '.*',
500 default=list,
500 default=list,
501 generic=True,
501 generic=True,
502 )
502 )
503 coreconfigitem('hostsecurity', 'ciphers',
503 coreconfigitem('hostsecurity', 'ciphers',
504 default=None,
504 default=None,
505 )
505 )
506 coreconfigitem('hostsecurity', 'disabletls10warning',
506 coreconfigitem('hostsecurity', 'disabletls10warning',
507 default=False,
507 default=False,
508 )
508 )
509 coreconfigitem('hostsecurity', 'minimumprotocol',
509 coreconfigitem('hostsecurity', 'minimumprotocol',
510 default=dynamicdefault,
510 default=dynamicdefault,
511 )
511 )
512 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
512 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
513 default=dynamicdefault,
513 default=dynamicdefault,
514 generic=True,
514 generic=True,
515 )
515 )
516 coreconfigitem('hostsecurity', '.*:ciphers$',
516 coreconfigitem('hostsecurity', '.*:ciphers$',
517 default=dynamicdefault,
517 default=dynamicdefault,
518 generic=True,
518 generic=True,
519 )
519 )
520 coreconfigitem('hostsecurity', '.*:fingerprints$',
520 coreconfigitem('hostsecurity', '.*:fingerprints$',
521 default=list,
521 default=list,
522 generic=True,
522 generic=True,
523 )
523 )
524 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
524 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
525 default=None,
525 default=None,
526 generic=True,
526 generic=True,
527 )
527 )
528
528
529 coreconfigitem('http_proxy', 'always',
529 coreconfigitem('http_proxy', 'always',
530 default=False,
530 default=False,
531 )
531 )
532 coreconfigitem('http_proxy', 'host',
532 coreconfigitem('http_proxy', 'host',
533 default=None,
533 default=None,
534 )
534 )
535 coreconfigitem('http_proxy', 'no',
535 coreconfigitem('http_proxy', 'no',
536 default=list,
536 default=list,
537 )
537 )
538 coreconfigitem('http_proxy', 'passwd',
538 coreconfigitem('http_proxy', 'passwd',
539 default=None,
539 default=None,
540 )
540 )
541 coreconfigitem('http_proxy', 'user',
541 coreconfigitem('http_proxy', 'user',
542 default=None,
542 default=None,
543 )
543 )
544 coreconfigitem('logtoprocess', 'commandexception',
544 coreconfigitem('logtoprocess', 'commandexception',
545 default=None,
545 default=None,
546 )
546 )
547 coreconfigitem('logtoprocess', 'commandfinish',
547 coreconfigitem('logtoprocess', 'commandfinish',
548 default=None,
548 default=None,
549 )
549 )
550 coreconfigitem('logtoprocess', 'command',
550 coreconfigitem('logtoprocess', 'command',
551 default=None,
551 default=None,
552 )
552 )
553 coreconfigitem('logtoprocess', 'develwarn',
553 coreconfigitem('logtoprocess', 'develwarn',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem('logtoprocess', 'uiblocked',
556 coreconfigitem('logtoprocess', 'uiblocked',
557 default=None,
557 default=None,
558 )
558 )
559 coreconfigitem('merge', 'checkunknown',
559 coreconfigitem('merge', 'checkunknown',
560 default='abort',
560 default='abort',
561 )
561 )
562 coreconfigitem('merge', 'checkignored',
562 coreconfigitem('merge', 'checkignored',
563 default='abort',
563 default='abort',
564 )
564 )
565 coreconfigitem('merge', 'followcopies',
565 coreconfigitem('merge', 'followcopies',
566 default=True,
566 default=True,
567 )
567 )
568 coreconfigitem('merge', 'on-failure',
568 coreconfigitem('merge', 'on-failure',
569 default='continue',
569 default='continue',
570 )
570 )
571 coreconfigitem('merge', 'preferancestor',
571 coreconfigitem('merge', 'preferancestor',
572 default=lambda: ['*'],
572 default=lambda: ['*'],
573 )
573 )
574 coreconfigitem('merge-tools', '.*',
574 coreconfigitem('merge-tools', '.*',
575 default=None,
575 default=None,
576 generic=True,
576 generic=True,
577 )
577 )
578 coreconfigitem('merge-tools', r'.*\.args$',
578 coreconfigitem('merge-tools', r'.*\.args$',
579 default="$local $base $other",
579 default="$local $base $other",
580 generic=True,
580 generic=True,
581 priority=-1,
581 priority=-1,
582 )
582 )
583 coreconfigitem('merge-tools', r'.*\.binary$',
583 coreconfigitem('merge-tools', r'.*\.binary$',
584 default=False,
584 default=False,
585 generic=True,
585 generic=True,
586 priority=-1,
586 priority=-1,
587 )
587 )
588 coreconfigitem('merge-tools', r'.*\.check$',
588 coreconfigitem('merge-tools', r'.*\.check$',
589 default=list,
589 default=list,
590 generic=True,
590 generic=True,
591 priority=-1,
591 priority=-1,
592 )
592 )
593 coreconfigitem('merge-tools', r'.*\.checkchanged$',
593 coreconfigitem('merge-tools', r'.*\.checkchanged$',
594 default=False,
594 default=False,
595 generic=True,
595 generic=True,
596 priority=-1,
596 priority=-1,
597 )
597 )
598 coreconfigitem('merge-tools', r'.*\.executable$',
598 coreconfigitem('merge-tools', r'.*\.executable$',
599 default=dynamicdefault,
599 default=dynamicdefault,
600 generic=True,
600 generic=True,
601 priority=-1,
601 priority=-1,
602 )
602 )
603 coreconfigitem('merge-tools', r'.*\.fixeol$',
603 coreconfigitem('merge-tools', r'.*\.fixeol$',
604 default=False,
604 default=False,
605 generic=True,
605 generic=True,
606 priority=-1,
606 priority=-1,
607 )
607 )
608 coreconfigitem('merge-tools', r'.*\.gui$',
608 coreconfigitem('merge-tools', r'.*\.gui$',
609 default=False,
609 default=False,
610 generic=True,
610 generic=True,
611 priority=-1,
611 priority=-1,
612 )
612 )
613 coreconfigitem('merge-tools', r'.*\.priority$',
613 coreconfigitem('merge-tools', r'.*\.priority$',
614 default=0,
614 default=0,
615 generic=True,
615 generic=True,
616 priority=-1,
616 priority=-1,
617 )
617 )
618 coreconfigitem('merge-tools', r'.*\.premerge$',
618 coreconfigitem('merge-tools', r'.*\.premerge$',
619 default=dynamicdefault,
619 default=dynamicdefault,
620 generic=True,
620 generic=True,
621 priority=-1,
621 priority=-1,
622 )
622 )
623 coreconfigitem('merge-tools', r'.*\.symlink$',
623 coreconfigitem('merge-tools', r'.*\.symlink$',
624 default=False,
624 default=False,
625 generic=True,
625 generic=True,
626 priority=-1,
626 priority=-1,
627 )
627 )
628 coreconfigitem('pager', 'attend-.*',
628 coreconfigitem('pager', 'attend-.*',
629 default=dynamicdefault,
629 default=dynamicdefault,
630 generic=True,
630 generic=True,
631 )
631 )
632 coreconfigitem('pager', 'ignore',
632 coreconfigitem('pager', 'ignore',
633 default=list,
633 default=list,
634 )
634 )
635 coreconfigitem('pager', 'pager',
635 coreconfigitem('pager', 'pager',
636 default=dynamicdefault,
636 default=dynamicdefault,
637 )
637 )
638 coreconfigitem('patch', 'eol',
638 coreconfigitem('patch', 'eol',
639 default='strict',
639 default='strict',
640 )
640 )
641 coreconfigitem('patch', 'fuzz',
641 coreconfigitem('patch', 'fuzz',
642 default=2,
642 default=2,
643 )
643 )
644 coreconfigitem('paths', 'default',
644 coreconfigitem('paths', 'default',
645 default=None,
645 default=None,
646 )
646 )
647 coreconfigitem('paths', 'default-push',
647 coreconfigitem('paths', 'default-push',
648 default=None,
648 default=None,
649 )
649 )
650 coreconfigitem('paths', '.*',
650 coreconfigitem('paths', '.*',
651 default=None,
651 default=None,
652 generic=True,
652 generic=True,
653 )
653 )
654 coreconfigitem('phases', 'checksubrepos',
654 coreconfigitem('phases', 'checksubrepos',
655 default='follow',
655 default='follow',
656 )
656 )
657 coreconfigitem('phases', 'new-commit',
657 coreconfigitem('phases', 'new-commit',
658 default='draft',
658 default='draft',
659 )
659 )
660 coreconfigitem('phases', 'publish',
660 coreconfigitem('phases', 'publish',
661 default=True,
661 default=True,
662 )
662 )
663 coreconfigitem('profiling', 'enabled',
663 coreconfigitem('profiling', 'enabled',
664 default=False,
664 default=False,
665 )
665 )
666 coreconfigitem('profiling', 'format',
666 coreconfigitem('profiling', 'format',
667 default='text',
667 default='text',
668 )
668 )
669 coreconfigitem('profiling', 'freq',
669 coreconfigitem('profiling', 'freq',
670 default=1000,
670 default=1000,
671 )
671 )
672 coreconfigitem('profiling', 'limit',
672 coreconfigitem('profiling', 'limit',
673 default=30,
673 default=30,
674 )
674 )
675 coreconfigitem('profiling', 'nested',
675 coreconfigitem('profiling', 'nested',
676 default=0,
676 default=0,
677 )
677 )
678 coreconfigitem('profiling', 'output',
678 coreconfigitem('profiling', 'output',
679 default=None,
679 default=None,
680 )
680 )
681 coreconfigitem('profiling', 'showmax',
681 coreconfigitem('profiling', 'showmax',
682 default=0.999,
682 default=0.999,
683 )
683 )
684 coreconfigitem('profiling', 'showmin',
684 coreconfigitem('profiling', 'showmin',
685 default=dynamicdefault,
685 default=dynamicdefault,
686 )
686 )
687 coreconfigitem('profiling', 'sort',
687 coreconfigitem('profiling', 'sort',
688 default='inlinetime',
688 default='inlinetime',
689 )
689 )
690 coreconfigitem('profiling', 'statformat',
690 coreconfigitem('profiling', 'statformat',
691 default='hotpath',
691 default='hotpath',
692 )
692 )
693 coreconfigitem('profiling', 'type',
693 coreconfigitem('profiling', 'type',
694 default='stat',
694 default='stat',
695 )
695 )
696 coreconfigitem('progress', 'assume-tty',
696 coreconfigitem('progress', 'assume-tty',
697 default=False,
697 default=False,
698 )
698 )
699 coreconfigitem('progress', 'changedelay',
699 coreconfigitem('progress', 'changedelay',
700 default=1,
700 default=1,
701 )
701 )
702 coreconfigitem('progress', 'clear-complete',
702 coreconfigitem('progress', 'clear-complete',
703 default=True,
703 default=True,
704 )
704 )
705 coreconfigitem('progress', 'debug',
705 coreconfigitem('progress', 'debug',
706 default=False,
706 default=False,
707 )
707 )
708 coreconfigitem('progress', 'delay',
708 coreconfigitem('progress', 'delay',
709 default=3,
709 default=3,
710 )
710 )
711 coreconfigitem('progress', 'disable',
711 coreconfigitem('progress', 'disable',
712 default=False,
712 default=False,
713 )
713 )
714 coreconfigitem('progress', 'estimateinterval',
714 coreconfigitem('progress', 'estimateinterval',
715 default=60.0,
715 default=60.0,
716 )
716 )
717 coreconfigitem('progress', 'format',
717 coreconfigitem('progress', 'format',
718 default=lambda: ['topic', 'bar', 'number', 'estimate'],
718 default=lambda: ['topic', 'bar', 'number', 'estimate'],
719 )
719 )
720 coreconfigitem('progress', 'refresh',
720 coreconfigitem('progress', 'refresh',
721 default=0.1,
721 default=0.1,
722 )
722 )
723 coreconfigitem('progress', 'width',
723 coreconfigitem('progress', 'width',
724 default=dynamicdefault,
724 default=dynamicdefault,
725 )
725 )
726 coreconfigitem('push', 'pushvars.server',
726 coreconfigitem('push', 'pushvars.server',
727 default=False,
727 default=False,
728 )
728 )
729 coreconfigitem('server', 'bundle1',
729 coreconfigitem('server', 'bundle1',
730 default=True,
730 default=True,
731 )
731 )
732 coreconfigitem('server', 'bundle1gd',
732 coreconfigitem('server', 'bundle1gd',
733 default=None,
733 default=None,
734 )
734 )
735 coreconfigitem('server', 'bundle1.pull',
735 coreconfigitem('server', 'bundle1.pull',
736 default=None,
736 default=None,
737 )
737 )
738 coreconfigitem('server', 'bundle1gd.pull',
738 coreconfigitem('server', 'bundle1gd.pull',
739 default=None,
739 default=None,
740 )
740 )
741 coreconfigitem('server', 'bundle1.push',
741 coreconfigitem('server', 'bundle1.push',
742 default=None,
742 default=None,
743 )
743 )
744 coreconfigitem('server', 'bundle1gd.push',
744 coreconfigitem('server', 'bundle1gd.push',
745 default=None,
745 default=None,
746 )
746 )
747 coreconfigitem('server', 'compressionengines',
747 coreconfigitem('server', 'compressionengines',
748 default=list,
748 default=list,
749 )
749 )
750 coreconfigitem('server', 'concurrent-push-mode',
750 coreconfigitem('server', 'concurrent-push-mode',
751 default='strict',
751 default='strict',
752 )
752 )
753 coreconfigitem('server', 'disablefullbundle',
753 coreconfigitem('server', 'disablefullbundle',
754 default=False,
754 default=False,
755 )
755 )
756 coreconfigitem('server', 'maxhttpheaderlen',
756 coreconfigitem('server', 'maxhttpheaderlen',
757 default=1024,
757 default=1024,
758 )
758 )
759 coreconfigitem('server', 'preferuncompressed',
759 coreconfigitem('server', 'preferuncompressed',
760 default=False,
760 default=False,
761 )
761 )
762 coreconfigitem('server', 'uncompressed',
762 coreconfigitem('server', 'uncompressed',
763 default=True,
763 default=True,
764 )
764 )
765 coreconfigitem('server', 'uncompressedallowsecret',
765 coreconfigitem('server', 'uncompressedallowsecret',
766 default=False,
766 default=False,
767 )
767 )
768 coreconfigitem('server', 'validate',
768 coreconfigitem('server', 'validate',
769 default=False,
769 default=False,
770 )
770 )
771 coreconfigitem('server', 'zliblevel',
771 coreconfigitem('server', 'zliblevel',
772 default=-1,
772 default=-1,
773 )
773 )
774 coreconfigitem('smtp', 'host',
774 coreconfigitem('smtp', 'host',
775 default=None,
775 default=None,
776 )
776 )
777 coreconfigitem('smtp', 'local_hostname',
777 coreconfigitem('smtp', 'local_hostname',
778 default=None,
778 default=None,
779 )
779 )
780 coreconfigitem('smtp', 'password',
780 coreconfigitem('smtp', 'password',
781 default=None,
781 default=None,
782 )
782 )
783 coreconfigitem('smtp', 'port',
783 coreconfigitem('smtp', 'port',
784 default=dynamicdefault,
784 default=dynamicdefault,
785 )
785 )
786 coreconfigitem('smtp', 'tls',
786 coreconfigitem('smtp', 'tls',
787 default='none',
787 default='none',
788 )
788 )
789 coreconfigitem('smtp', 'username',
789 coreconfigitem('smtp', 'username',
790 default=None,
790 default=None,
791 )
791 )
792 coreconfigitem('sparse', 'missingwarning',
792 coreconfigitem('sparse', 'missingwarning',
793 default=True,
793 default=True,
794 )
794 )
795 coreconfigitem('templates', '.*',
795 coreconfigitem('templates', '.*',
796 default=None,
796 default=None,
797 generic=True,
797 generic=True,
798 )
798 )
799 coreconfigitem('trusted', 'groups',
799 coreconfigitem('trusted', 'groups',
800 default=list,
800 default=list,
801 )
801 )
802 coreconfigitem('trusted', 'users',
802 coreconfigitem('trusted', 'users',
803 default=list,
803 default=list,
804 )
804 )
805 coreconfigitem('ui', '_usedassubrepo',
805 coreconfigitem('ui', '_usedassubrepo',
806 default=False,
806 default=False,
807 )
807 )
808 coreconfigitem('ui', 'allowemptycommit',
808 coreconfigitem('ui', 'allowemptycommit',
809 default=False,
809 default=False,
810 )
810 )
811 coreconfigitem('ui', 'archivemeta',
811 coreconfigitem('ui', 'archivemeta',
812 default=True,
812 default=True,
813 )
813 )
814 coreconfigitem('ui', 'askusername',
814 coreconfigitem('ui', 'askusername',
815 default=False,
815 default=False,
816 )
816 )
817 coreconfigitem('ui', 'clonebundlefallback',
817 coreconfigitem('ui', 'clonebundlefallback',
818 default=False,
818 default=False,
819 )
819 )
820 coreconfigitem('ui', 'clonebundleprefers',
820 coreconfigitem('ui', 'clonebundleprefers',
821 default=list,
821 default=list,
822 )
822 )
823 coreconfigitem('ui', 'clonebundles',
823 coreconfigitem('ui', 'clonebundles',
824 default=True,
824 default=True,
825 )
825 )
826 coreconfigitem('ui', 'color',
826 coreconfigitem('ui', 'color',
827 default='auto',
827 default='auto',
828 )
828 )
829 coreconfigitem('ui', 'commitsubrepos',
829 coreconfigitem('ui', 'commitsubrepos',
830 default=False,
830 default=False,
831 )
831 )
832 coreconfigitem('ui', 'debug',
832 coreconfigitem('ui', 'debug',
833 default=False,
833 default=False,
834 )
834 )
835 coreconfigitem('ui', 'debugger',
835 coreconfigitem('ui', 'debugger',
836 default=None,
836 default=None,
837 )
837 )
838 coreconfigitem('ui', 'fallbackencoding',
838 coreconfigitem('ui', 'fallbackencoding',
839 default=None,
839 default=None,
840 )
840 )
841 coreconfigitem('ui', 'forcecwd',
841 coreconfigitem('ui', 'forcecwd',
842 default=None,
842 default=None,
843 )
843 )
844 coreconfigitem('ui', 'forcemerge',
844 coreconfigitem('ui', 'forcemerge',
845 default=None,
845 default=None,
846 )
846 )
847 coreconfigitem('ui', 'formatdebug',
847 coreconfigitem('ui', 'formatdebug',
848 default=False,
848 default=False,
849 )
849 )
850 coreconfigitem('ui', 'formatjson',
850 coreconfigitem('ui', 'formatjson',
851 default=False,
851 default=False,
852 )
852 )
853 coreconfigitem('ui', 'formatted',
853 coreconfigitem('ui', 'formatted',
854 default=None,
854 default=None,
855 )
855 )
856 coreconfigitem('ui', 'graphnodetemplate',
856 coreconfigitem('ui', 'graphnodetemplate',
857 default=None,
857 default=None,
858 )
858 )
859 coreconfigitem('ui', 'http2debuglevel',
859 coreconfigitem('ui', 'http2debuglevel',
860 default=None,
860 default=None,
861 )
861 )
862 coreconfigitem('ui', 'interactive',
862 coreconfigitem('ui', 'interactive',
863 default=None,
863 default=None,
864 )
864 )
865 coreconfigitem('ui', 'interface',
865 coreconfigitem('ui', 'interface',
866 default=None,
866 default=None,
867 )
867 )
868 coreconfigitem('ui', 'interface.chunkselector',
868 coreconfigitem('ui', 'interface.chunkselector',
869 default=None,
869 default=None,
870 )
870 )
871 coreconfigitem('ui', 'logblockedtimes',
871 coreconfigitem('ui', 'logblockedtimes',
872 default=False,
872 default=False,
873 )
873 )
874 coreconfigitem('ui', 'logtemplate',
874 coreconfigitem('ui', 'logtemplate',
875 default=None,
875 default=None,
876 )
876 )
877 coreconfigitem('ui', 'merge',
877 coreconfigitem('ui', 'merge',
878 default=None,
878 default=None,
879 )
879 )
880 coreconfigitem('ui', 'mergemarkers',
880 coreconfigitem('ui', 'mergemarkers',
881 default='basic',
881 default='basic',
882 )
882 )
883 coreconfigitem('ui', 'mergemarkertemplate',
883 coreconfigitem('ui', 'mergemarkertemplate',
884 default=('{node|short} '
884 default=('{node|short} '
885 '{ifeq(tags, "tip", "", '
885 '{ifeq(tags, "tip", "", '
886 'ifeq(tags, "", "", "{tags} "))}'
886 'ifeq(tags, "", "", "{tags} "))}'
887 '{if(bookmarks, "{bookmarks} ")}'
887 '{if(bookmarks, "{bookmarks} ")}'
888 '{ifeq(branch, "default", "", "{branch} ")}'
888 '{ifeq(branch, "default", "", "{branch} ")}'
889 '- {author|user}: {desc|firstline}')
889 '- {author|user}: {desc|firstline}')
890 )
890 )
891 coreconfigitem('ui', 'nontty',
891 coreconfigitem('ui', 'nontty',
892 default=False,
892 default=False,
893 )
893 )
894 coreconfigitem('ui', 'origbackuppath',
894 coreconfigitem('ui', 'origbackuppath',
895 default=None,
895 default=None,
896 )
896 )
897 coreconfigitem('ui', 'paginate',
897 coreconfigitem('ui', 'paginate',
898 default=True,
898 default=True,
899 )
899 )
900 coreconfigitem('ui', 'patch',
900 coreconfigitem('ui', 'patch',
901 default=None,
901 default=None,
902 )
902 )
903 coreconfigitem('ui', 'portablefilenames',
903 coreconfigitem('ui', 'portablefilenames',
904 default='warn',
904 default='warn',
905 )
905 )
906 coreconfigitem('ui', 'promptecho',
906 coreconfigitem('ui', 'promptecho',
907 default=False,
907 default=False,
908 )
908 )
909 coreconfigitem('ui', 'quiet',
909 coreconfigitem('ui', 'quiet',
910 default=False,
910 default=False,
911 )
911 )
912 coreconfigitem('ui', 'quietbookmarkmove',
912 coreconfigitem('ui', 'quietbookmarkmove',
913 default=False,
913 default=False,
914 )
914 )
915 coreconfigitem('ui', 'remotecmd',
915 coreconfigitem('ui', 'remotecmd',
916 default='hg',
916 default='hg',
917 )
917 )
918 coreconfigitem('ui', 'report_untrusted',
918 coreconfigitem('ui', 'report_untrusted',
919 default=True,
919 default=True,
920 )
920 )
921 coreconfigitem('ui', 'rollback',
921 coreconfigitem('ui', 'rollback',
922 default=True,
922 default=True,
923 )
923 )
924 coreconfigitem('ui', 'slash',
924 coreconfigitem('ui', 'slash',
925 default=False,
925 default=False,
926 )
926 )
927 coreconfigitem('ui', 'ssh',
927 coreconfigitem('ui', 'ssh',
928 default='ssh',
928 default='ssh',
929 )
929 )
930 coreconfigitem('ui', 'statuscopies',
930 coreconfigitem('ui', 'statuscopies',
931 default=False,
931 default=False,
932 )
932 )
933 coreconfigitem('ui', 'strict',
933 coreconfigitem('ui', 'strict',
934 default=False,
934 default=False,
935 )
935 )
936 coreconfigitem('ui', 'style',
936 coreconfigitem('ui', 'style',
937 default='',
937 default='',
938 )
938 )
939 coreconfigitem('ui', 'supportcontact',
939 coreconfigitem('ui', 'supportcontact',
940 default=None,
940 default=None,
941 )
941 )
942 coreconfigitem('ui', 'textwidth',
942 coreconfigitem('ui', 'textwidth',
943 default=78,
943 default=78,
944 )
944 )
945 coreconfigitem('ui', 'timeout',
945 coreconfigitem('ui', 'timeout',
946 default='600',
946 default='600',
947 )
947 )
948 coreconfigitem('ui', 'traceback',
948 coreconfigitem('ui', 'traceback',
949 default=False,
949 default=False,
950 )
950 )
951 coreconfigitem('ui', 'tweakdefaults',
951 coreconfigitem('ui', 'tweakdefaults',
952 default=False,
952 default=False,
953 )
953 )
954 coreconfigitem('ui', 'usehttp2',
954 coreconfigitem('ui', 'usehttp2',
955 default=False,
955 default=False,
956 )
956 )
957 coreconfigitem('ui', 'username',
957 coreconfigitem('ui', 'username',
958 alias=[('ui', 'user')]
958 alias=[('ui', 'user')]
959 )
959 )
960 coreconfigitem('ui', 'verbose',
960 coreconfigitem('ui', 'verbose',
961 default=False,
961 default=False,
962 )
962 )
963 coreconfigitem('verify', 'skipflags',
963 coreconfigitem('verify', 'skipflags',
964 default=None,
964 default=None,
965 )
965 )
966 coreconfigitem('web', 'allowbz2',
966 coreconfigitem('web', 'allowbz2',
967 default=False,
967 default=False,
968 )
968 )
969 coreconfigitem('web', 'allowgz',
969 coreconfigitem('web', 'allowgz',
970 default=False,
970 default=False,
971 )
971 )
972 coreconfigitem('web', 'allowpull',
972 coreconfigitem('web', 'allowpull',
973 default=True,
973 default=True,
974 )
974 )
975 coreconfigitem('web', 'allow_push',
975 coreconfigitem('web', 'allow_push',
976 default=list,
976 default=list,
977 )
977 )
978 coreconfigitem('web', 'allowzip',
978 coreconfigitem('web', 'allowzip',
979 default=False,
979 default=False,
980 )
980 )
981 coreconfigitem('web', 'archivesubrepos',
981 coreconfigitem('web', 'archivesubrepos',
982 default=False,
982 default=False,
983 )
983 )
984 coreconfigitem('web', 'cache',
984 coreconfigitem('web', 'cache',
985 default=True,
985 default=True,
986 )
986 )
987 coreconfigitem('web', 'contact',
987 coreconfigitem('web', 'contact',
988 default=None,
988 default=None,
989 )
989 )
990 coreconfigitem('web', 'deny_push',
990 coreconfigitem('web', 'deny_push',
991 default=list,
991 default=list,
992 )
992 )
993 coreconfigitem('web', 'guessmime',
993 coreconfigitem('web', 'guessmime',
994 default=False,
994 default=False,
995 )
995 )
996 coreconfigitem('web', 'hidden',
996 coreconfigitem('web', 'hidden',
997 default=False,
997 default=False,
998 )
998 )
999 coreconfigitem('web', 'labels',
999 coreconfigitem('web', 'labels',
1000 default=list,
1000 default=list,
1001 )
1001 )
1002 coreconfigitem('web', 'logoimg',
1002 coreconfigitem('web', 'logoimg',
1003 default='hglogo.png',
1003 default='hglogo.png',
1004 )
1004 )
1005 coreconfigitem('web', 'logourl',
1005 coreconfigitem('web', 'logourl',
1006 default='https://mercurial-scm.org/',
1006 default='https://mercurial-scm.org/',
1007 )
1007 )
1008 coreconfigitem('web', 'accesslog',
1008 coreconfigitem('web', 'accesslog',
1009 default='-',
1009 default='-',
1010 )
1010 )
1011 coreconfigitem('web', 'address',
1011 coreconfigitem('web', 'address',
1012 default='',
1012 default='',
1013 )
1013 )
1014 coreconfigitem('web', 'allow_archive',
1014 coreconfigitem('web', 'allow_archive',
1015 default=list,
1015 default=list,
1016 )
1016 )
1017 coreconfigitem('web', 'allow_read',
1017 coreconfigitem('web', 'allow_read',
1018 default=list,
1018 default=list,
1019 )
1019 )
1020 coreconfigitem('web', 'baseurl',
1020 coreconfigitem('web', 'baseurl',
1021 default=None,
1021 default=None,
1022 )
1022 )
1023 coreconfigitem('web', 'cacerts',
1023 coreconfigitem('web', 'cacerts',
1024 default=None,
1024 default=None,
1025 )
1025 )
1026 coreconfigitem('web', 'certificate',
1026 coreconfigitem('web', 'certificate',
1027 default=None,
1027 default=None,
1028 )
1028 )
1029 coreconfigitem('web', 'collapse',
1029 coreconfigitem('web', 'collapse',
1030 default=False,
1030 default=False,
1031 )
1031 )
1032 coreconfigitem('web', 'csp',
1032 coreconfigitem('web', 'csp',
1033 default=None,
1033 default=None,
1034 )
1034 )
1035 coreconfigitem('web', 'deny_read',
1035 coreconfigitem('web', 'deny_read',
1036 default=list,
1036 default=list,
1037 )
1037 )
1038 coreconfigitem('web', 'descend',
1038 coreconfigitem('web', 'descend',
1039 default=True,
1039 default=True,
1040 )
1040 )
1041 coreconfigitem('web', 'description',
1041 coreconfigitem('web', 'description',
1042 default="",
1042 default="",
1043 )
1043 )
1044 coreconfigitem('web', 'encoding',
1044 coreconfigitem('web', 'encoding',
1045 default=lambda: encoding.encoding,
1045 default=lambda: encoding.encoding,
1046 )
1046 )
1047 coreconfigitem('web', 'errorlog',
1047 coreconfigitem('web', 'errorlog',
1048 default='-',
1048 default='-',
1049 )
1049 )
1050 coreconfigitem('web', 'ipv6',
1050 coreconfigitem('web', 'ipv6',
1051 default=False,
1051 default=False,
1052 )
1052 )
1053 coreconfigitem('web', 'maxchanges',
1053 coreconfigitem('web', 'maxchanges',
1054 default=10,
1054 default=10,
1055 )
1055 )
1056 coreconfigitem('web', 'maxfiles',
1056 coreconfigitem('web', 'maxfiles',
1057 default=10,
1057 default=10,
1058 )
1058 )
1059 coreconfigitem('web', 'maxshortchanges',
1059 coreconfigitem('web', 'maxshortchanges',
1060 default=60,
1060 default=60,
1061 )
1061 )
1062 coreconfigitem('web', 'motd',
1062 coreconfigitem('web', 'motd',
1063 default='',
1063 default='',
1064 )
1064 )
1065 coreconfigitem('web', 'name',
1065 coreconfigitem('web', 'name',
1066 default=dynamicdefault,
1066 default=dynamicdefault,
1067 )
1067 )
1068 coreconfigitem('web', 'port',
1068 coreconfigitem('web', 'port',
1069 default=8000,
1069 default=8000,
1070 )
1070 )
1071 coreconfigitem('web', 'prefix',
1071 coreconfigitem('web', 'prefix',
1072 default='',
1072 default='',
1073 )
1073 )
1074 coreconfigitem('web', 'push_ssl',
1074 coreconfigitem('web', 'push_ssl',
1075 default=True,
1075 default=True,
1076 )
1076 )
1077 coreconfigitem('web', 'refreshinterval',
1077 coreconfigitem('web', 'refreshinterval',
1078 default=20,
1078 default=20,
1079 )
1079 )
1080 coreconfigitem('web', 'staticurl',
1080 coreconfigitem('web', 'staticurl',
1081 default=None,
1081 default=None,
1082 )
1082 )
1083 coreconfigitem('web', 'stripes',
1083 coreconfigitem('web', 'stripes',
1084 default=1,
1084 default=1,
1085 )
1085 )
1086 coreconfigitem('web', 'style',
1086 coreconfigitem('web', 'style',
1087 default='paper',
1087 default='paper',
1088 )
1088 )
1089 coreconfigitem('web', 'templates',
1089 coreconfigitem('web', 'templates',
1090 default=None,
1090 default=None,
1091 )
1091 )
1092 coreconfigitem('web', 'view',
1092 coreconfigitem('web', 'view',
1093 default='served',
1093 default='served',
1094 )
1094 )
1095 coreconfigitem('worker', 'backgroundclose',
1095 coreconfigitem('worker', 'backgroundclose',
1096 default=dynamicdefault,
1096 default=dynamicdefault,
1097 )
1097 )
1098 # Windows defaults to a limit of 512 open files. A buffer of 128
1098 # Windows defaults to a limit of 512 open files. A buffer of 128
1099 # should give us enough headway.
1099 # should give us enough headway.
1100 coreconfigitem('worker', 'backgroundclosemaxqueue',
1100 coreconfigitem('worker', 'backgroundclosemaxqueue',
1101 default=384,
1101 default=384,
1102 )
1102 )
1103 coreconfigitem('worker', 'backgroundcloseminfilecount',
1103 coreconfigitem('worker', 'backgroundcloseminfilecount',
1104 default=2048,
1104 default=2048,
1105 )
1105 )
1106 coreconfigitem('worker', 'backgroundclosethreadcount',
1106 coreconfigitem('worker', 'backgroundclosethreadcount',
1107 default=4,
1107 default=4,
1108 )
1108 )
1109 coreconfigitem('worker', 'numcpus',
1109 coreconfigitem('worker', 'numcpus',
1110 default=None,
1110 default=None,
1111 )
1111 )
1112
1112
1113 # Rebase related configuration moved to core because other extension are doing
1113 # Rebase related configuration moved to core because other extension are doing
1114 # strange things. For example, shelve import the extensions to reuse some bit
1114 # strange things. For example, shelve import the extensions to reuse some bit
1115 # without formally loading it.
1115 # without formally loading it.
1116 coreconfigitem('commands', 'rebase.requiredest',
1116 coreconfigitem('commands', 'rebase.requiredest',
1117 default=False,
1117 default=False,
1118 )
1118 )
1119 coreconfigitem('experimental', 'rebaseskipobsolete',
1119 coreconfigitem('experimental', 'rebaseskipobsolete',
1120 default=True,
1120 default=True,
1121 )
1121 )
1122 coreconfigitem('rebase', 'singletransaction',
1122 coreconfigitem('rebase', 'singletransaction',
1123 default=False,
1123 default=False,
1124 )
1124 )
@@ -1,2346 +1,2346 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # These auditor are not used by the vfs,
367 # These auditor are not used by the vfs,
368 # only used when writing this comment: basectx.match
368 # only used when writing this comment: basectx.match
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
371 realfs=False, cached=True)
371 realfs=False, cached=True)
372 self.baseui = baseui
372 self.baseui = baseui
373 self.ui = baseui.copy()
373 self.ui = baseui.copy()
374 self.ui.copy = baseui.copy # prevent copying repo configuration
374 self.ui.copy = baseui.copy # prevent copying repo configuration
375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 if (self.ui.configbool('devel', 'all-warnings') or
376 if (self.ui.configbool('devel', 'all-warnings') or
377 self.ui.configbool('devel', 'check-locks')):
377 self.ui.configbool('devel', 'check-locks')):
378 self.vfs.audit = self._getvfsward(self.vfs.audit)
378 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 # A list of callback to shape the phase if no data were found.
379 # A list of callback to shape the phase if no data were found.
380 # Callback are in the form: func(repo, roots) --> processed root.
380 # Callback are in the form: func(repo, roots) --> processed root.
381 # This list it to be filled by extension during repo setup
381 # This list it to be filled by extension during repo setup
382 self._phasedefaults = []
382 self._phasedefaults = []
383 try:
383 try:
384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 self._loadextensions()
385 self._loadextensions()
386 except IOError:
386 except IOError:
387 pass
387 pass
388
388
389 if self.featuresetupfuncs:
389 if self.featuresetupfuncs:
390 self.supported = set(self._basesupported) # use private copy
390 self.supported = set(self._basesupported) # use private copy
391 extmods = set(m.__name__ for n, m
391 extmods = set(m.__name__ for n, m
392 in extensions.extensions(self.ui))
392 in extensions.extensions(self.ui))
393 for setupfunc in self.featuresetupfuncs:
393 for setupfunc in self.featuresetupfuncs:
394 if setupfunc.__module__ in extmods:
394 if setupfunc.__module__ in extmods:
395 setupfunc(self.ui, self.supported)
395 setupfunc(self.ui, self.supported)
396 else:
396 else:
397 self.supported = self._basesupported
397 self.supported = self._basesupported
398 color.setup(self.ui)
398 color.setup(self.ui)
399
399
400 # Add compression engines.
400 # Add compression engines.
401 for name in util.compengines:
401 for name in util.compengines:
402 engine = util.compengines[name]
402 engine = util.compengines[name]
403 if engine.revlogheader():
403 if engine.revlogheader():
404 self.supported.add('exp-compression-%s' % name)
404 self.supported.add('exp-compression-%s' % name)
405
405
406 if not self.vfs.isdir():
406 if not self.vfs.isdir():
407 if create:
407 if create:
408 self.requirements = newreporequirements(self)
408 self.requirements = newreporequirements(self)
409
409
410 if not self.wvfs.exists():
410 if not self.wvfs.exists():
411 self.wvfs.makedirs()
411 self.wvfs.makedirs()
412 self.vfs.makedir(notindexed=True)
412 self.vfs.makedir(notindexed=True)
413
413
414 if 'store' in self.requirements:
414 if 'store' in self.requirements:
415 self.vfs.mkdir("store")
415 self.vfs.mkdir("store")
416
416
417 # create an invalid changelog
417 # create an invalid changelog
418 self.vfs.append(
418 self.vfs.append(
419 "00changelog.i",
419 "00changelog.i",
420 '\0\0\0\2' # represents revlogv2
420 '\0\0\0\2' # represents revlogv2
421 ' dummy changelog to prevent using the old repo layout'
421 ' dummy changelog to prevent using the old repo layout'
422 )
422 )
423 else:
423 else:
424 raise error.RepoError(_("repository %s not found") % path)
424 raise error.RepoError(_("repository %s not found") % path)
425 elif create:
425 elif create:
426 raise error.RepoError(_("repository %s already exists") % path)
426 raise error.RepoError(_("repository %s already exists") % path)
427 else:
427 else:
428 try:
428 try:
429 self.requirements = scmutil.readrequires(
429 self.requirements = scmutil.readrequires(
430 self.vfs, self.supported)
430 self.vfs, self.supported)
431 except IOError as inst:
431 except IOError as inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 cachepath = self.vfs.join('cache')
435 cachepath = self.vfs.join('cache')
436 self.sharedpath = self.path
436 self.sharedpath = self.path
437 try:
437 try:
438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 if 'relshared' in self.requirements:
439 if 'relshared' in self.requirements:
440 sharedpath = self.vfs.join(sharedpath)
440 sharedpath = self.vfs.join(sharedpath)
441 vfs = vfsmod.vfs(sharedpath, realpath=True)
441 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 cachepath = vfs.join('cache')
442 cachepath = vfs.join('cache')
443 s = vfs.base
443 s = vfs.base
444 if not vfs.exists():
444 if not vfs.exists():
445 raise error.RepoError(
445 raise error.RepoError(
446 _('.hg/sharedpath points to nonexistent directory %s') % s)
446 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 self.sharedpath = s
447 self.sharedpath = s
448 except IOError as inst:
448 except IOError as inst:
449 if inst.errno != errno.ENOENT:
449 if inst.errno != errno.ENOENT:
450 raise
450 raise
451
451
452 if 'exp-sparse' in self.requirements and not sparse.enabled:
452 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 raise error.RepoError(_('repository is using sparse feature but '
453 raise error.RepoError(_('repository is using sparse feature but '
454 'sparse is not enabled; enable the '
454 'sparse is not enabled; enable the '
455 '"sparse" extensions to access'))
455 '"sparse" extensions to access'))
456
456
457 self.store = store.store(
457 self.store = store.store(
458 self.requirements, self.sharedpath,
458 self.requirements, self.sharedpath,
459 lambda base: vfsmod.vfs(base, cacheaudited=True))
459 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 self.spath = self.store.path
460 self.spath = self.store.path
461 self.svfs = self.store.vfs
461 self.svfs = self.store.vfs
462 self.sjoin = self.store.join
462 self.sjoin = self.store.join
463 self.vfs.createmode = self.store.createmode
463 self.vfs.createmode = self.store.createmode
464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 self.cachevfs.createmode = self.store.createmode
465 self.cachevfs.createmode = self.store.createmode
466 if (self.ui.configbool('devel', 'all-warnings') or
466 if (self.ui.configbool('devel', 'all-warnings') or
467 self.ui.configbool('devel', 'check-locks')):
467 self.ui.configbool('devel', 'check-locks')):
468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 else: # standard vfs
470 else: # standard vfs
471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 self._applyopenerreqs()
472 self._applyopenerreqs()
473 if create:
473 if create:
474 self._writerequirements()
474 self._writerequirements()
475
475
476 self._dirstatevalidatewarned = False
476 self._dirstatevalidatewarned = False
477
477
478 self._branchcaches = {}
478 self._branchcaches = {}
479 self._revbranchcache = None
479 self._revbranchcache = None
480 self.filterpats = {}
480 self.filterpats = {}
481 self._datafilters = {}
481 self._datafilters = {}
482 self._transref = self._lockref = self._wlockref = None
482 self._transref = self._lockref = self._wlockref = None
483
483
484 # A cache for various files under .hg/ that tracks file changes,
484 # A cache for various files under .hg/ that tracks file changes,
485 # (used by the filecache decorator)
485 # (used by the filecache decorator)
486 #
486 #
487 # Maps a property name to its util.filecacheentry
487 # Maps a property name to its util.filecacheentry
488 self._filecache = {}
488 self._filecache = {}
489
489
490 # hold sets of revision to be filtered
490 # hold sets of revision to be filtered
491 # should be cleared when something might have changed the filter value:
491 # should be cleared when something might have changed the filter value:
492 # - new changesets,
492 # - new changesets,
493 # - phase change,
493 # - phase change,
494 # - new obsolescence marker,
494 # - new obsolescence marker,
495 # - working directory parent change,
495 # - working directory parent change,
496 # - bookmark changes
496 # - bookmark changes
497 self.filteredrevcache = {}
497 self.filteredrevcache = {}
498
498
499 # post-dirstate-status hooks
499 # post-dirstate-status hooks
500 self._postdsstatus = []
500 self._postdsstatus = []
501
501
502 # Cache of types representing filtered repos.
502 # Cache of types representing filtered repos.
503 self._filteredrepotypes = weakref.WeakKeyDictionary()
503 self._filteredrepotypes = weakref.WeakKeyDictionary()
504
504
505 # generic mapping between names and nodes
505 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
506 self.names = namespaces.namespaces()
507
507
508 # Key to signature value.
508 # Key to signature value.
509 self._sparsesignaturecache = {}
509 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
510 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
511 self._sparsematchercache = {}
512
512
513 def _getvfsward(self, origfunc):
513 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
514 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
515 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
516 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
517 ret = origfunc(path, mode=mode)
518 repo = rref()
518 repo = rref()
519 if (repo is None
519 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
521 or not util.safehasattr(repo, '_lockref')):
522 return
522 return
523 if mode in (None, 'r', 'rb'):
523 if mode in (None, 'r', 'rb'):
524 return
524 return
525 if path.startswith(repo.path):
525 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
526 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
527 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
528 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
531 if path.startswith('journal.'):
532 # journal is covered by 'lock'
532 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
533 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
535 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
536 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
537 # rest of vfs files are covered by 'wlock'
538 #
538 #
539 # exclude special files
539 # exclude special files
540 for prefix in self._wlockfreeprefix:
540 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
541 if path.startswith(prefix):
542 return
542 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
544 stacklevel=2, config='check-locks')
545 return ret
545 return ret
546 return checkvfs
546 return checkvfs
547
547
548 def _getsvfsward(self, origfunc):
548 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
549 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
550 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
551 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
552 ret = origfunc(path, mode=mode)
553 repo = rref()
553 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
555 return
556 if mode in (None, 'r', 'rb'):
556 if mode in (None, 'r', 'rb'):
557 return
557 return
558 if path.startswith(repo.sharedpath):
558 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
559 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
560 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
561 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
563 stacklevel=3)
564 return ret
564 return ret
565 return checksvfs
565 return checksvfs
566
566
567 def close(self):
567 def close(self):
568 self._writecaches()
568 self._writecaches()
569
569
570 def _loadextensions(self):
570 def _loadextensions(self):
571 extensions.loadall(self.ui)
571 extensions.loadall(self.ui)
572
572
573 def _writecaches(self):
573 def _writecaches(self):
574 if self._revbranchcache:
574 if self._revbranchcache:
575 self._revbranchcache.write()
575 self._revbranchcache.write()
576
576
577 def _restrictcapabilities(self, caps):
577 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
579 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
582 return caps
583
583
584 def _applyopenerreqs(self):
584 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
585 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
586 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
587 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
589 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
591 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
593 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
594 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
595 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
597 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
599 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
600 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
601 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
605 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
606 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
607 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold')
608 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
609 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 srdensitythres = float(self.ui.config('experimental',
612 srdensitythres = float(self.ui.config('experimental',
613 'sparse-read.density-threshold'))
613 'sparse-read.density-threshold'))
614 srminblocksize = self.ui.configbytes('experimental',
614 srmingapsize = self.ui.configbytes('experimental',
615 'sparse-read.min-block-size')
615 'sparse-read.min-gap-size')
616 self.svfs.options['with-sparse-read'] = withsparseread
616 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-min-block-size'] = srminblocksize
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619
619
620 for r in self.requirements:
620 for r in self.requirements:
621 if r.startswith('exp-compression-'):
621 if r.startswith('exp-compression-'):
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623
623
624 # TODO move "revlogv2" to openerreqs once finalized.
624 # TODO move "revlogv2" to openerreqs once finalized.
625 if REVLOGV2_REQUIREMENT in self.requirements:
625 if REVLOGV2_REQUIREMENT in self.requirements:
626 self.svfs.options['revlogv2'] = True
626 self.svfs.options['revlogv2'] = True
627
627
628 def _writerequirements(self):
628 def _writerequirements(self):
629 scmutil.writerequires(self.vfs, self.requirements)
629 scmutil.writerequires(self.vfs, self.requirements)
630
630
631 def _checknested(self, path):
631 def _checknested(self, path):
632 """Determine if path is a legal nested repository."""
632 """Determine if path is a legal nested repository."""
633 if not path.startswith(self.root):
633 if not path.startswith(self.root):
634 return False
634 return False
635 subpath = path[len(self.root) + 1:]
635 subpath = path[len(self.root) + 1:]
636 normsubpath = util.pconvert(subpath)
636 normsubpath = util.pconvert(subpath)
637
637
638 # XXX: Checking against the current working copy is wrong in
638 # XXX: Checking against the current working copy is wrong in
639 # the sense that it can reject things like
639 # the sense that it can reject things like
640 #
640 #
641 # $ hg cat -r 10 sub/x.txt
641 # $ hg cat -r 10 sub/x.txt
642 #
642 #
643 # if sub/ is no longer a subrepository in the working copy
643 # if sub/ is no longer a subrepository in the working copy
644 # parent revision.
644 # parent revision.
645 #
645 #
646 # However, it can of course also allow things that would have
646 # However, it can of course also allow things that would have
647 # been rejected before, such as the above cat command if sub/
647 # been rejected before, such as the above cat command if sub/
648 # is a subrepository now, but was a normal directory before.
648 # is a subrepository now, but was a normal directory before.
649 # The old path auditor would have rejected by mistake since it
649 # The old path auditor would have rejected by mistake since it
650 # panics when it sees sub/.hg/.
650 # panics when it sees sub/.hg/.
651 #
651 #
652 # All in all, checking against the working copy seems sensible
652 # All in all, checking against the working copy seems sensible
653 # since we want to prevent access to nested repositories on
653 # since we want to prevent access to nested repositories on
654 # the filesystem *now*.
654 # the filesystem *now*.
655 ctx = self[None]
655 ctx = self[None]
656 parts = util.splitpath(subpath)
656 parts = util.splitpath(subpath)
657 while parts:
657 while parts:
658 prefix = '/'.join(parts)
658 prefix = '/'.join(parts)
659 if prefix in ctx.substate:
659 if prefix in ctx.substate:
660 if prefix == normsubpath:
660 if prefix == normsubpath:
661 return True
661 return True
662 else:
662 else:
663 sub = ctx.sub(prefix)
663 sub = ctx.sub(prefix)
664 return sub.checknested(subpath[len(prefix) + 1:])
664 return sub.checknested(subpath[len(prefix) + 1:])
665 else:
665 else:
666 parts.pop()
666 parts.pop()
667 return False
667 return False
668
668
669 def peer(self):
669 def peer(self):
670 return localpeer(self) # not cached to avoid reference cycle
670 return localpeer(self) # not cached to avoid reference cycle
671
671
672 def unfiltered(self):
672 def unfiltered(self):
673 """Return unfiltered version of the repository
673 """Return unfiltered version of the repository
674
674
675 Intended to be overwritten by filtered repo."""
675 Intended to be overwritten by filtered repo."""
676 return self
676 return self
677
677
678 def filtered(self, name):
678 def filtered(self, name):
679 """Return a filtered version of a repository"""
679 """Return a filtered version of a repository"""
680 # Python <3.4 easily leaks types via __mro__. See
680 # Python <3.4 easily leaks types via __mro__. See
681 # https://bugs.python.org/issue17950. We cache dynamically
681 # https://bugs.python.org/issue17950. We cache dynamically
682 # created types so this method doesn't leak on every
682 # created types so this method doesn't leak on every
683 # invocation.
683 # invocation.
684
684
685 key = self.unfiltered().__class__
685 key = self.unfiltered().__class__
686 if key not in self._filteredrepotypes:
686 if key not in self._filteredrepotypes:
687 # Build a new type with the repoview mixin and the base
687 # Build a new type with the repoview mixin and the base
688 # class of this repo. Give it a name containing the
688 # class of this repo. Give it a name containing the
689 # filter name to aid debugging.
689 # filter name to aid debugging.
690 bases = (repoview.repoview, key)
690 bases = (repoview.repoview, key)
691 cls = type(r'%sfilteredrepo' % name, bases, {})
691 cls = type(r'%sfilteredrepo' % name, bases, {})
692 self._filteredrepotypes[key] = cls
692 self._filteredrepotypes[key] = cls
693
693
694 return self._filteredrepotypes[key](self, name)
694 return self._filteredrepotypes[key](self, name)
695
695
696 @repofilecache('bookmarks', 'bookmarks.current')
696 @repofilecache('bookmarks', 'bookmarks.current')
697 def _bookmarks(self):
697 def _bookmarks(self):
698 return bookmarks.bmstore(self)
698 return bookmarks.bmstore(self)
699
699
700 @property
700 @property
701 def _activebookmark(self):
701 def _activebookmark(self):
702 return self._bookmarks.active
702 return self._bookmarks.active
703
703
704 # _phaserevs and _phasesets depend on changelog. what we need is to
704 # _phaserevs and _phasesets depend on changelog. what we need is to
705 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
705 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
706 # can't be easily expressed in filecache mechanism.
706 # can't be easily expressed in filecache mechanism.
707 @storecache('phaseroots', '00changelog.i')
707 @storecache('phaseroots', '00changelog.i')
708 def _phasecache(self):
708 def _phasecache(self):
709 return phases.phasecache(self, self._phasedefaults)
709 return phases.phasecache(self, self._phasedefaults)
710
710
711 @storecache('obsstore')
711 @storecache('obsstore')
712 def obsstore(self):
712 def obsstore(self):
713 return obsolete.makestore(self.ui, self)
713 return obsolete.makestore(self.ui, self)
714
714
715 @storecache('00changelog.i')
715 @storecache('00changelog.i')
716 def changelog(self):
716 def changelog(self):
717 return changelog.changelog(self.svfs,
717 return changelog.changelog(self.svfs,
718 trypending=txnutil.mayhavepending(self.root))
718 trypending=txnutil.mayhavepending(self.root))
719
719
720 def _constructmanifest(self):
720 def _constructmanifest(self):
721 # This is a temporary function while we migrate from manifest to
721 # This is a temporary function while we migrate from manifest to
722 # manifestlog. It allows bundlerepo and unionrepo to intercept the
722 # manifestlog. It allows bundlerepo and unionrepo to intercept the
723 # manifest creation.
723 # manifest creation.
724 return manifest.manifestrevlog(self.svfs)
724 return manifest.manifestrevlog(self.svfs)
725
725
726 @storecache('00manifest.i')
726 @storecache('00manifest.i')
727 def manifestlog(self):
727 def manifestlog(self):
728 return manifest.manifestlog(self.svfs, self)
728 return manifest.manifestlog(self.svfs, self)
729
729
730 @repofilecache('dirstate')
730 @repofilecache('dirstate')
731 def dirstate(self):
731 def dirstate(self):
732 sparsematchfn = lambda: sparse.matcher(self)
732 sparsematchfn = lambda: sparse.matcher(self)
733
733
734 return dirstate.dirstate(self.vfs, self.ui, self.root,
734 return dirstate.dirstate(self.vfs, self.ui, self.root,
735 self._dirstatevalidate, sparsematchfn)
735 self._dirstatevalidate, sparsematchfn)
736
736
737 def _dirstatevalidate(self, node):
737 def _dirstatevalidate(self, node):
738 try:
738 try:
739 self.changelog.rev(node)
739 self.changelog.rev(node)
740 return node
740 return node
741 except error.LookupError:
741 except error.LookupError:
742 if not self._dirstatevalidatewarned:
742 if not self._dirstatevalidatewarned:
743 self._dirstatevalidatewarned = True
743 self._dirstatevalidatewarned = True
744 self.ui.warn(_("warning: ignoring unknown"
744 self.ui.warn(_("warning: ignoring unknown"
745 " working parent %s!\n") % short(node))
745 " working parent %s!\n") % short(node))
746 return nullid
746 return nullid
747
747
748 def __getitem__(self, changeid):
748 def __getitem__(self, changeid):
749 if changeid is None:
749 if changeid is None:
750 return context.workingctx(self)
750 return context.workingctx(self)
751 if isinstance(changeid, slice):
751 if isinstance(changeid, slice):
752 # wdirrev isn't contiguous so the slice shouldn't include it
752 # wdirrev isn't contiguous so the slice shouldn't include it
753 return [context.changectx(self, i)
753 return [context.changectx(self, i)
754 for i in xrange(*changeid.indices(len(self)))
754 for i in xrange(*changeid.indices(len(self)))
755 if i not in self.changelog.filteredrevs]
755 if i not in self.changelog.filteredrevs]
756 try:
756 try:
757 return context.changectx(self, changeid)
757 return context.changectx(self, changeid)
758 except error.WdirUnsupported:
758 except error.WdirUnsupported:
759 return context.workingctx(self)
759 return context.workingctx(self)
760
760
761 def __contains__(self, changeid):
761 def __contains__(self, changeid):
762 """True if the given changeid exists
762 """True if the given changeid exists
763
763
764 error.LookupError is raised if an ambiguous node specified.
764 error.LookupError is raised if an ambiguous node specified.
765 """
765 """
766 try:
766 try:
767 self[changeid]
767 self[changeid]
768 return True
768 return True
769 except error.RepoLookupError:
769 except error.RepoLookupError:
770 return False
770 return False
771
771
772 def __nonzero__(self):
772 def __nonzero__(self):
773 return True
773 return True
774
774
775 __bool__ = __nonzero__
775 __bool__ = __nonzero__
776
776
777 def __len__(self):
777 def __len__(self):
778 return len(self.changelog)
778 return len(self.changelog)
779
779
780 def __iter__(self):
780 def __iter__(self):
781 return iter(self.changelog)
781 return iter(self.changelog)
782
782
783 def revs(self, expr, *args):
783 def revs(self, expr, *args):
784 '''Find revisions matching a revset.
784 '''Find revisions matching a revset.
785
785
786 The revset is specified as a string ``expr`` that may contain
786 The revset is specified as a string ``expr`` that may contain
787 %-formatting to escape certain types. See ``revsetlang.formatspec``.
787 %-formatting to escape certain types. See ``revsetlang.formatspec``.
788
788
789 Revset aliases from the configuration are not expanded. To expand
789 Revset aliases from the configuration are not expanded. To expand
790 user aliases, consider calling ``scmutil.revrange()`` or
790 user aliases, consider calling ``scmutil.revrange()`` or
791 ``repo.anyrevs([expr], user=True)``.
791 ``repo.anyrevs([expr], user=True)``.
792
792
793 Returns a revset.abstractsmartset, which is a list-like interface
793 Returns a revset.abstractsmartset, which is a list-like interface
794 that contains integer revisions.
794 that contains integer revisions.
795 '''
795 '''
796 expr = revsetlang.formatspec(expr, *args)
796 expr = revsetlang.formatspec(expr, *args)
797 m = revset.match(None, expr)
797 m = revset.match(None, expr)
798 return m(self)
798 return m(self)
799
799
800 def set(self, expr, *args):
800 def set(self, expr, *args):
801 '''Find revisions matching a revset and emit changectx instances.
801 '''Find revisions matching a revset and emit changectx instances.
802
802
803 This is a convenience wrapper around ``revs()`` that iterates the
803 This is a convenience wrapper around ``revs()`` that iterates the
804 result and is a generator of changectx instances.
804 result and is a generator of changectx instances.
805
805
806 Revset aliases from the configuration are not expanded. To expand
806 Revset aliases from the configuration are not expanded. To expand
807 user aliases, consider calling ``scmutil.revrange()``.
807 user aliases, consider calling ``scmutil.revrange()``.
808 '''
808 '''
809 for r in self.revs(expr, *args):
809 for r in self.revs(expr, *args):
810 yield self[r]
810 yield self[r]
811
811
812 def anyrevs(self, specs, user=False, localalias=None):
812 def anyrevs(self, specs, user=False, localalias=None):
813 '''Find revisions matching one of the given revsets.
813 '''Find revisions matching one of the given revsets.
814
814
815 Revset aliases from the configuration are not expanded by default. To
815 Revset aliases from the configuration are not expanded by default. To
816 expand user aliases, specify ``user=True``. To provide some local
816 expand user aliases, specify ``user=True``. To provide some local
817 definitions overriding user aliases, set ``localalias`` to
817 definitions overriding user aliases, set ``localalias`` to
818 ``{name: definitionstring}``.
818 ``{name: definitionstring}``.
819 '''
819 '''
820 if user:
820 if user:
821 m = revset.matchany(self.ui, specs, repo=self,
821 m = revset.matchany(self.ui, specs, repo=self,
822 localalias=localalias)
822 localalias=localalias)
823 else:
823 else:
824 m = revset.matchany(None, specs, localalias=localalias)
824 m = revset.matchany(None, specs, localalias=localalias)
825 return m(self)
825 return m(self)
826
826
827 def url(self):
827 def url(self):
828 return 'file:' + self.root
828 return 'file:' + self.root
829
829
830 def hook(self, name, throw=False, **args):
830 def hook(self, name, throw=False, **args):
831 """Call a hook, passing this repo instance.
831 """Call a hook, passing this repo instance.
832
832
833 This a convenience method to aid invoking hooks. Extensions likely
833 This a convenience method to aid invoking hooks. Extensions likely
834 won't call this unless they have registered a custom hook or are
834 won't call this unless they have registered a custom hook or are
835 replacing code that is expected to call a hook.
835 replacing code that is expected to call a hook.
836 """
836 """
837 return hook.hook(self.ui, self, name, throw, **args)
837 return hook.hook(self.ui, self, name, throw, **args)
838
838
839 @filteredpropertycache
839 @filteredpropertycache
840 def _tagscache(self):
840 def _tagscache(self):
841 '''Returns a tagscache object that contains various tags related
841 '''Returns a tagscache object that contains various tags related
842 caches.'''
842 caches.'''
843
843
844 # This simplifies its cache management by having one decorated
844 # This simplifies its cache management by having one decorated
845 # function (this one) and the rest simply fetch things from it.
845 # function (this one) and the rest simply fetch things from it.
846 class tagscache(object):
846 class tagscache(object):
847 def __init__(self):
847 def __init__(self):
848 # These two define the set of tags for this repository. tags
848 # These two define the set of tags for this repository. tags
849 # maps tag name to node; tagtypes maps tag name to 'global' or
849 # maps tag name to node; tagtypes maps tag name to 'global' or
850 # 'local'. (Global tags are defined by .hgtags across all
850 # 'local'. (Global tags are defined by .hgtags across all
851 # heads, and local tags are defined in .hg/localtags.)
851 # heads, and local tags are defined in .hg/localtags.)
852 # They constitute the in-memory cache of tags.
852 # They constitute the in-memory cache of tags.
853 self.tags = self.tagtypes = None
853 self.tags = self.tagtypes = None
854
854
855 self.nodetagscache = self.tagslist = None
855 self.nodetagscache = self.tagslist = None
856
856
857 cache = tagscache()
857 cache = tagscache()
858 cache.tags, cache.tagtypes = self._findtags()
858 cache.tags, cache.tagtypes = self._findtags()
859
859
860 return cache
860 return cache
861
861
862 def tags(self):
862 def tags(self):
863 '''return a mapping of tag to node'''
863 '''return a mapping of tag to node'''
864 t = {}
864 t = {}
865 if self.changelog.filteredrevs:
865 if self.changelog.filteredrevs:
866 tags, tt = self._findtags()
866 tags, tt = self._findtags()
867 else:
867 else:
868 tags = self._tagscache.tags
868 tags = self._tagscache.tags
869 for k, v in tags.iteritems():
869 for k, v in tags.iteritems():
870 try:
870 try:
871 # ignore tags to unknown nodes
871 # ignore tags to unknown nodes
872 self.changelog.rev(v)
872 self.changelog.rev(v)
873 t[k] = v
873 t[k] = v
874 except (error.LookupError, ValueError):
874 except (error.LookupError, ValueError):
875 pass
875 pass
876 return t
876 return t
877
877
878 def _findtags(self):
878 def _findtags(self):
879 '''Do the hard work of finding tags. Return a pair of dicts
879 '''Do the hard work of finding tags. Return a pair of dicts
880 (tags, tagtypes) where tags maps tag name to node, and tagtypes
880 (tags, tagtypes) where tags maps tag name to node, and tagtypes
881 maps tag name to a string like \'global\' or \'local\'.
881 maps tag name to a string like \'global\' or \'local\'.
882 Subclasses or extensions are free to add their own tags, but
882 Subclasses or extensions are free to add their own tags, but
883 should be aware that the returned dicts will be retained for the
883 should be aware that the returned dicts will be retained for the
884 duration of the localrepo object.'''
884 duration of the localrepo object.'''
885
885
886 # XXX what tagtype should subclasses/extensions use? Currently
886 # XXX what tagtype should subclasses/extensions use? Currently
887 # mq and bookmarks add tags, but do not set the tagtype at all.
887 # mq and bookmarks add tags, but do not set the tagtype at all.
888 # Should each extension invent its own tag type? Should there
888 # Should each extension invent its own tag type? Should there
889 # be one tagtype for all such "virtual" tags? Or is the status
889 # be one tagtype for all such "virtual" tags? Or is the status
890 # quo fine?
890 # quo fine?
891
891
892
892
893 # map tag name to (node, hist)
893 # map tag name to (node, hist)
894 alltags = tagsmod.findglobaltags(self.ui, self)
894 alltags = tagsmod.findglobaltags(self.ui, self)
895 # map tag name to tag type
895 # map tag name to tag type
896 tagtypes = dict((tag, 'global') for tag in alltags)
896 tagtypes = dict((tag, 'global') for tag in alltags)
897
897
898 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
898 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
899
899
900 # Build the return dicts. Have to re-encode tag names because
900 # Build the return dicts. Have to re-encode tag names because
901 # the tags module always uses UTF-8 (in order not to lose info
901 # the tags module always uses UTF-8 (in order not to lose info
902 # writing to the cache), but the rest of Mercurial wants them in
902 # writing to the cache), but the rest of Mercurial wants them in
903 # local encoding.
903 # local encoding.
904 tags = {}
904 tags = {}
905 for (name, (node, hist)) in alltags.iteritems():
905 for (name, (node, hist)) in alltags.iteritems():
906 if node != nullid:
906 if node != nullid:
907 tags[encoding.tolocal(name)] = node
907 tags[encoding.tolocal(name)] = node
908 tags['tip'] = self.changelog.tip()
908 tags['tip'] = self.changelog.tip()
909 tagtypes = dict([(encoding.tolocal(name), value)
909 tagtypes = dict([(encoding.tolocal(name), value)
910 for (name, value) in tagtypes.iteritems()])
910 for (name, value) in tagtypes.iteritems()])
911 return (tags, tagtypes)
911 return (tags, tagtypes)
912
912
913 def tagtype(self, tagname):
913 def tagtype(self, tagname):
914 '''
914 '''
915 return the type of the given tag. result can be:
915 return the type of the given tag. result can be:
916
916
917 'local' : a local tag
917 'local' : a local tag
918 'global' : a global tag
918 'global' : a global tag
919 None : tag does not exist
919 None : tag does not exist
920 '''
920 '''
921
921
922 return self._tagscache.tagtypes.get(tagname)
922 return self._tagscache.tagtypes.get(tagname)
923
923
924 def tagslist(self):
924 def tagslist(self):
925 '''return a list of tags ordered by revision'''
925 '''return a list of tags ordered by revision'''
926 if not self._tagscache.tagslist:
926 if not self._tagscache.tagslist:
927 l = []
927 l = []
928 for t, n in self.tags().iteritems():
928 for t, n in self.tags().iteritems():
929 l.append((self.changelog.rev(n), t, n))
929 l.append((self.changelog.rev(n), t, n))
930 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
930 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
931
931
932 return self._tagscache.tagslist
932 return self._tagscache.tagslist
933
933
934 def nodetags(self, node):
934 def nodetags(self, node):
935 '''return the tags associated with a node'''
935 '''return the tags associated with a node'''
936 if not self._tagscache.nodetagscache:
936 if not self._tagscache.nodetagscache:
937 nodetagscache = {}
937 nodetagscache = {}
938 for t, n in self._tagscache.tags.iteritems():
938 for t, n in self._tagscache.tags.iteritems():
939 nodetagscache.setdefault(n, []).append(t)
939 nodetagscache.setdefault(n, []).append(t)
940 for tags in nodetagscache.itervalues():
940 for tags in nodetagscache.itervalues():
941 tags.sort()
941 tags.sort()
942 self._tagscache.nodetagscache = nodetagscache
942 self._tagscache.nodetagscache = nodetagscache
943 return self._tagscache.nodetagscache.get(node, [])
943 return self._tagscache.nodetagscache.get(node, [])
944
944
945 def nodebookmarks(self, node):
945 def nodebookmarks(self, node):
946 """return the list of bookmarks pointing to the specified node"""
946 """return the list of bookmarks pointing to the specified node"""
947 marks = []
947 marks = []
948 for bookmark, n in self._bookmarks.iteritems():
948 for bookmark, n in self._bookmarks.iteritems():
949 if n == node:
949 if n == node:
950 marks.append(bookmark)
950 marks.append(bookmark)
951 return sorted(marks)
951 return sorted(marks)
952
952
953 def branchmap(self):
953 def branchmap(self):
954 '''returns a dictionary {branch: [branchheads]} with branchheads
954 '''returns a dictionary {branch: [branchheads]} with branchheads
955 ordered by increasing revision number'''
955 ordered by increasing revision number'''
956 branchmap.updatecache(self)
956 branchmap.updatecache(self)
957 return self._branchcaches[self.filtername]
957 return self._branchcaches[self.filtername]
958
958
959 @unfilteredmethod
959 @unfilteredmethod
960 def revbranchcache(self):
960 def revbranchcache(self):
961 if not self._revbranchcache:
961 if not self._revbranchcache:
962 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
962 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
963 return self._revbranchcache
963 return self._revbranchcache
964
964
965 def branchtip(self, branch, ignoremissing=False):
965 def branchtip(self, branch, ignoremissing=False):
966 '''return the tip node for a given branch
966 '''return the tip node for a given branch
967
967
968 If ignoremissing is True, then this method will not raise an error.
968 If ignoremissing is True, then this method will not raise an error.
969 This is helpful for callers that only expect None for a missing branch
969 This is helpful for callers that only expect None for a missing branch
970 (e.g. namespace).
970 (e.g. namespace).
971
971
972 '''
972 '''
973 try:
973 try:
974 return self.branchmap().branchtip(branch)
974 return self.branchmap().branchtip(branch)
975 except KeyError:
975 except KeyError:
976 if not ignoremissing:
976 if not ignoremissing:
977 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
977 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
978 else:
978 else:
979 pass
979 pass
980
980
981 def lookup(self, key):
981 def lookup(self, key):
982 return self[key].node()
982 return self[key].node()
983
983
984 def lookupbranch(self, key, remote=None):
984 def lookupbranch(self, key, remote=None):
985 repo = remote or self
985 repo = remote or self
986 if key in repo.branchmap():
986 if key in repo.branchmap():
987 return key
987 return key
988
988
989 repo = (remote and remote.local()) and remote or self
989 repo = (remote and remote.local()) and remote or self
990 return repo[key].branch()
990 return repo[key].branch()
991
991
992 def known(self, nodes):
992 def known(self, nodes):
993 cl = self.changelog
993 cl = self.changelog
994 nm = cl.nodemap
994 nm = cl.nodemap
995 filtered = cl.filteredrevs
995 filtered = cl.filteredrevs
996 result = []
996 result = []
997 for n in nodes:
997 for n in nodes:
998 r = nm.get(n)
998 r = nm.get(n)
999 resp = not (r is None or r in filtered)
999 resp = not (r is None or r in filtered)
1000 result.append(resp)
1000 result.append(resp)
1001 return result
1001 return result
1002
1002
1003 def local(self):
1003 def local(self):
1004 return self
1004 return self
1005
1005
1006 def publishing(self):
1006 def publishing(self):
1007 # it's safe (and desirable) to trust the publish flag unconditionally
1007 # it's safe (and desirable) to trust the publish flag unconditionally
1008 # so that we don't finalize changes shared between users via ssh or nfs
1008 # so that we don't finalize changes shared between users via ssh or nfs
1009 return self.ui.configbool('phases', 'publish', untrusted=True)
1009 return self.ui.configbool('phases', 'publish', untrusted=True)
1010
1010
1011 def cancopy(self):
1011 def cancopy(self):
1012 # so statichttprepo's override of local() works
1012 # so statichttprepo's override of local() works
1013 if not self.local():
1013 if not self.local():
1014 return False
1014 return False
1015 if not self.publishing():
1015 if not self.publishing():
1016 return True
1016 return True
1017 # if publishing we can't copy if there is filtered content
1017 # if publishing we can't copy if there is filtered content
1018 return not self.filtered('visible').changelog.filteredrevs
1018 return not self.filtered('visible').changelog.filteredrevs
1019
1019
1020 def shared(self):
1020 def shared(self):
1021 '''the type of shared repository (None if not shared)'''
1021 '''the type of shared repository (None if not shared)'''
1022 if self.sharedpath != self.path:
1022 if self.sharedpath != self.path:
1023 return 'store'
1023 return 'store'
1024 return None
1024 return None
1025
1025
1026 def wjoin(self, f, *insidef):
1026 def wjoin(self, f, *insidef):
1027 return self.vfs.reljoin(self.root, f, *insidef)
1027 return self.vfs.reljoin(self.root, f, *insidef)
1028
1028
1029 def file(self, f):
1029 def file(self, f):
1030 if f[0] == '/':
1030 if f[0] == '/':
1031 f = f[1:]
1031 f = f[1:]
1032 return filelog.filelog(self.svfs, f)
1032 return filelog.filelog(self.svfs, f)
1033
1033
1034 def changectx(self, changeid):
1034 def changectx(self, changeid):
1035 return self[changeid]
1035 return self[changeid]
1036
1036
1037 def setparents(self, p1, p2=nullid):
1037 def setparents(self, p1, p2=nullid):
1038 with self.dirstate.parentchange():
1038 with self.dirstate.parentchange():
1039 copies = self.dirstate.setparents(p1, p2)
1039 copies = self.dirstate.setparents(p1, p2)
1040 pctx = self[p1]
1040 pctx = self[p1]
1041 if copies:
1041 if copies:
1042 # Adjust copy records, the dirstate cannot do it, it
1042 # Adjust copy records, the dirstate cannot do it, it
1043 # requires access to parents manifests. Preserve them
1043 # requires access to parents manifests. Preserve them
1044 # only for entries added to first parent.
1044 # only for entries added to first parent.
1045 for f in copies:
1045 for f in copies:
1046 if f not in pctx and copies[f] in pctx:
1046 if f not in pctx and copies[f] in pctx:
1047 self.dirstate.copy(copies[f], f)
1047 self.dirstate.copy(copies[f], f)
1048 if p2 == nullid:
1048 if p2 == nullid:
1049 for f, s in sorted(self.dirstate.copies().items()):
1049 for f, s in sorted(self.dirstate.copies().items()):
1050 if f not in pctx and s not in pctx:
1050 if f not in pctx and s not in pctx:
1051 self.dirstate.copy(None, f)
1051 self.dirstate.copy(None, f)
1052
1052
1053 def filectx(self, path, changeid=None, fileid=None):
1053 def filectx(self, path, changeid=None, fileid=None):
1054 """changeid can be a changeset revision, node, or tag.
1054 """changeid can be a changeset revision, node, or tag.
1055 fileid can be a file revision or node."""
1055 fileid can be a file revision or node."""
1056 return context.filectx(self, path, changeid, fileid)
1056 return context.filectx(self, path, changeid, fileid)
1057
1057
1058 def getcwd(self):
1058 def getcwd(self):
1059 return self.dirstate.getcwd()
1059 return self.dirstate.getcwd()
1060
1060
1061 def pathto(self, f, cwd=None):
1061 def pathto(self, f, cwd=None):
1062 return self.dirstate.pathto(f, cwd)
1062 return self.dirstate.pathto(f, cwd)
1063
1063
1064 def _loadfilter(self, filter):
1064 def _loadfilter(self, filter):
1065 if filter not in self.filterpats:
1065 if filter not in self.filterpats:
1066 l = []
1066 l = []
1067 for pat, cmd in self.ui.configitems(filter):
1067 for pat, cmd in self.ui.configitems(filter):
1068 if cmd == '!':
1068 if cmd == '!':
1069 continue
1069 continue
1070 mf = matchmod.match(self.root, '', [pat])
1070 mf = matchmod.match(self.root, '', [pat])
1071 fn = None
1071 fn = None
1072 params = cmd
1072 params = cmd
1073 for name, filterfn in self._datafilters.iteritems():
1073 for name, filterfn in self._datafilters.iteritems():
1074 if cmd.startswith(name):
1074 if cmd.startswith(name):
1075 fn = filterfn
1075 fn = filterfn
1076 params = cmd[len(name):].lstrip()
1076 params = cmd[len(name):].lstrip()
1077 break
1077 break
1078 if not fn:
1078 if not fn:
1079 fn = lambda s, c, **kwargs: util.filter(s, c)
1079 fn = lambda s, c, **kwargs: util.filter(s, c)
1080 # Wrap old filters not supporting keyword arguments
1080 # Wrap old filters not supporting keyword arguments
1081 if not inspect.getargspec(fn)[2]:
1081 if not inspect.getargspec(fn)[2]:
1082 oldfn = fn
1082 oldfn = fn
1083 fn = lambda s, c, **kwargs: oldfn(s, c)
1083 fn = lambda s, c, **kwargs: oldfn(s, c)
1084 l.append((mf, fn, params))
1084 l.append((mf, fn, params))
1085 self.filterpats[filter] = l
1085 self.filterpats[filter] = l
1086 return self.filterpats[filter]
1086 return self.filterpats[filter]
1087
1087
1088 def _filter(self, filterpats, filename, data):
1088 def _filter(self, filterpats, filename, data):
1089 for mf, fn, cmd in filterpats:
1089 for mf, fn, cmd in filterpats:
1090 if mf(filename):
1090 if mf(filename):
1091 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1091 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1092 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1092 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1093 break
1093 break
1094
1094
1095 return data
1095 return data
1096
1096
1097 @unfilteredpropertycache
1097 @unfilteredpropertycache
1098 def _encodefilterpats(self):
1098 def _encodefilterpats(self):
1099 return self._loadfilter('encode')
1099 return self._loadfilter('encode')
1100
1100
1101 @unfilteredpropertycache
1101 @unfilteredpropertycache
1102 def _decodefilterpats(self):
1102 def _decodefilterpats(self):
1103 return self._loadfilter('decode')
1103 return self._loadfilter('decode')
1104
1104
1105 def adddatafilter(self, name, filter):
1105 def adddatafilter(self, name, filter):
1106 self._datafilters[name] = filter
1106 self._datafilters[name] = filter
1107
1107
1108 def wread(self, filename):
1108 def wread(self, filename):
1109 if self.wvfs.islink(filename):
1109 if self.wvfs.islink(filename):
1110 data = self.wvfs.readlink(filename)
1110 data = self.wvfs.readlink(filename)
1111 else:
1111 else:
1112 data = self.wvfs.read(filename)
1112 data = self.wvfs.read(filename)
1113 return self._filter(self._encodefilterpats, filename, data)
1113 return self._filter(self._encodefilterpats, filename, data)
1114
1114
1115 def wwrite(self, filename, data, flags, backgroundclose=False):
1115 def wwrite(self, filename, data, flags, backgroundclose=False):
1116 """write ``data`` into ``filename`` in the working directory
1116 """write ``data`` into ``filename`` in the working directory
1117
1117
1118 This returns length of written (maybe decoded) data.
1118 This returns length of written (maybe decoded) data.
1119 """
1119 """
1120 data = self._filter(self._decodefilterpats, filename, data)
1120 data = self._filter(self._decodefilterpats, filename, data)
1121 if 'l' in flags:
1121 if 'l' in flags:
1122 self.wvfs.symlink(data, filename)
1122 self.wvfs.symlink(data, filename)
1123 else:
1123 else:
1124 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1124 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1125 if 'x' in flags:
1125 if 'x' in flags:
1126 self.wvfs.setflags(filename, False, True)
1126 self.wvfs.setflags(filename, False, True)
1127 return len(data)
1127 return len(data)
1128
1128
1129 def wwritedata(self, filename, data):
1129 def wwritedata(self, filename, data):
1130 return self._filter(self._decodefilterpats, filename, data)
1130 return self._filter(self._decodefilterpats, filename, data)
1131
1131
1132 def currenttransaction(self):
1132 def currenttransaction(self):
1133 """return the current transaction or None if non exists"""
1133 """return the current transaction or None if non exists"""
1134 if self._transref:
1134 if self._transref:
1135 tr = self._transref()
1135 tr = self._transref()
1136 else:
1136 else:
1137 tr = None
1137 tr = None
1138
1138
1139 if tr and tr.running():
1139 if tr and tr.running():
1140 return tr
1140 return tr
1141 return None
1141 return None
1142
1142
1143 def transaction(self, desc, report=None):
1143 def transaction(self, desc, report=None):
1144 if (self.ui.configbool('devel', 'all-warnings')
1144 if (self.ui.configbool('devel', 'all-warnings')
1145 or self.ui.configbool('devel', 'check-locks')):
1145 or self.ui.configbool('devel', 'check-locks')):
1146 if self._currentlock(self._lockref) is None:
1146 if self._currentlock(self._lockref) is None:
1147 raise error.ProgrammingError('transaction requires locking')
1147 raise error.ProgrammingError('transaction requires locking')
1148 tr = self.currenttransaction()
1148 tr = self.currenttransaction()
1149 if tr is not None:
1149 if tr is not None:
1150 scmutil.registersummarycallback(self, tr, desc)
1150 scmutil.registersummarycallback(self, tr, desc)
1151 return tr.nest()
1151 return tr.nest()
1152
1152
1153 # abort here if the journal already exists
1153 # abort here if the journal already exists
1154 if self.svfs.exists("journal"):
1154 if self.svfs.exists("journal"):
1155 raise error.RepoError(
1155 raise error.RepoError(
1156 _("abandoned transaction found"),
1156 _("abandoned transaction found"),
1157 hint=_("run 'hg recover' to clean up transaction"))
1157 hint=_("run 'hg recover' to clean up transaction"))
1158
1158
1159 idbase = "%.40f#%f" % (random.random(), time.time())
1159 idbase = "%.40f#%f" % (random.random(), time.time())
1160 ha = hex(hashlib.sha1(idbase).digest())
1160 ha = hex(hashlib.sha1(idbase).digest())
1161 txnid = 'TXN:' + ha
1161 txnid = 'TXN:' + ha
1162 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1162 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1163
1163
1164 self._writejournal(desc)
1164 self._writejournal(desc)
1165 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1165 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1166 if report:
1166 if report:
1167 rp = report
1167 rp = report
1168 else:
1168 else:
1169 rp = self.ui.warn
1169 rp = self.ui.warn
1170 vfsmap = {'plain': self.vfs} # root of .hg/
1170 vfsmap = {'plain': self.vfs} # root of .hg/
1171 # we must avoid cyclic reference between repo and transaction.
1171 # we must avoid cyclic reference between repo and transaction.
1172 reporef = weakref.ref(self)
1172 reporef = weakref.ref(self)
1173 # Code to track tag movement
1173 # Code to track tag movement
1174 #
1174 #
1175 # Since tags are all handled as file content, it is actually quite hard
1175 # Since tags are all handled as file content, it is actually quite hard
1176 # to track these movement from a code perspective. So we fallback to a
1176 # to track these movement from a code perspective. So we fallback to a
1177 # tracking at the repository level. One could envision to track changes
1177 # tracking at the repository level. One could envision to track changes
1178 # to the '.hgtags' file through changegroup apply but that fails to
1178 # to the '.hgtags' file through changegroup apply but that fails to
1179 # cope with case where transaction expose new heads without changegroup
1179 # cope with case where transaction expose new heads without changegroup
1180 # being involved (eg: phase movement).
1180 # being involved (eg: phase movement).
1181 #
1181 #
1182 # For now, We gate the feature behind a flag since this likely comes
1182 # For now, We gate the feature behind a flag since this likely comes
1183 # with performance impacts. The current code run more often than needed
1183 # with performance impacts. The current code run more often than needed
1184 # and do not use caches as much as it could. The current focus is on
1184 # and do not use caches as much as it could. The current focus is on
1185 # the behavior of the feature so we disable it by default. The flag
1185 # the behavior of the feature so we disable it by default. The flag
1186 # will be removed when we are happy with the performance impact.
1186 # will be removed when we are happy with the performance impact.
1187 #
1187 #
1188 # Once this feature is no longer experimental move the following
1188 # Once this feature is no longer experimental move the following
1189 # documentation to the appropriate help section:
1189 # documentation to the appropriate help section:
1190 #
1190 #
1191 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1191 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1192 # tags (new or changed or deleted tags). In addition the details of
1192 # tags (new or changed or deleted tags). In addition the details of
1193 # these changes are made available in a file at:
1193 # these changes are made available in a file at:
1194 # ``REPOROOT/.hg/changes/tags.changes``.
1194 # ``REPOROOT/.hg/changes/tags.changes``.
1195 # Make sure you check for HG_TAG_MOVED before reading that file as it
1195 # Make sure you check for HG_TAG_MOVED before reading that file as it
1196 # might exist from a previous transaction even if no tag were touched
1196 # might exist from a previous transaction even if no tag were touched
1197 # in this one. Changes are recorded in a line base format::
1197 # in this one. Changes are recorded in a line base format::
1198 #
1198 #
1199 # <action> <hex-node> <tag-name>\n
1199 # <action> <hex-node> <tag-name>\n
1200 #
1200 #
1201 # Actions are defined as follow:
1201 # Actions are defined as follow:
1202 # "-R": tag is removed,
1202 # "-R": tag is removed,
1203 # "+A": tag is added,
1203 # "+A": tag is added,
1204 # "-M": tag is moved (old value),
1204 # "-M": tag is moved (old value),
1205 # "+M": tag is moved (new value),
1205 # "+M": tag is moved (new value),
1206 tracktags = lambda x: None
1206 tracktags = lambda x: None
1207 # experimental config: experimental.hook-track-tags
1207 # experimental config: experimental.hook-track-tags
1208 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1208 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1209 if desc != 'strip' and shouldtracktags:
1209 if desc != 'strip' and shouldtracktags:
1210 oldheads = self.changelog.headrevs()
1210 oldheads = self.changelog.headrevs()
1211 def tracktags(tr2):
1211 def tracktags(tr2):
1212 repo = reporef()
1212 repo = reporef()
1213 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1213 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1214 newheads = repo.changelog.headrevs()
1214 newheads = repo.changelog.headrevs()
1215 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1215 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1216 # notes: we compare lists here.
1216 # notes: we compare lists here.
1217 # As we do it only once buiding set would not be cheaper
1217 # As we do it only once buiding set would not be cheaper
1218 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1218 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1219 if changes:
1219 if changes:
1220 tr2.hookargs['tag_moved'] = '1'
1220 tr2.hookargs['tag_moved'] = '1'
1221 with repo.vfs('changes/tags.changes', 'w',
1221 with repo.vfs('changes/tags.changes', 'w',
1222 atomictemp=True) as changesfile:
1222 atomictemp=True) as changesfile:
1223 # note: we do not register the file to the transaction
1223 # note: we do not register the file to the transaction
1224 # because we needs it to still exist on the transaction
1224 # because we needs it to still exist on the transaction
1225 # is close (for txnclose hooks)
1225 # is close (for txnclose hooks)
1226 tagsmod.writediff(changesfile, changes)
1226 tagsmod.writediff(changesfile, changes)
1227 def validate(tr2):
1227 def validate(tr2):
1228 """will run pre-closing hooks"""
1228 """will run pre-closing hooks"""
1229 # XXX the transaction API is a bit lacking here so we take a hacky
1229 # XXX the transaction API is a bit lacking here so we take a hacky
1230 # path for now
1230 # path for now
1231 #
1231 #
1232 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1232 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1233 # dict is copied before these run. In addition we needs the data
1233 # dict is copied before these run. In addition we needs the data
1234 # available to in memory hooks too.
1234 # available to in memory hooks too.
1235 #
1235 #
1236 # Moreover, we also need to make sure this runs before txnclose
1236 # Moreover, we also need to make sure this runs before txnclose
1237 # hooks and there is no "pending" mechanism that would execute
1237 # hooks and there is no "pending" mechanism that would execute
1238 # logic only if hooks are about to run.
1238 # logic only if hooks are about to run.
1239 #
1239 #
1240 # Fixing this limitation of the transaction is also needed to track
1240 # Fixing this limitation of the transaction is also needed to track
1241 # other families of changes (bookmarks, phases, obsolescence).
1241 # other families of changes (bookmarks, phases, obsolescence).
1242 #
1242 #
1243 # This will have to be fixed before we remove the experimental
1243 # This will have to be fixed before we remove the experimental
1244 # gating.
1244 # gating.
1245 tracktags(tr2)
1245 tracktags(tr2)
1246 repo = reporef()
1246 repo = reporef()
1247 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1247 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1248 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1248 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1249 args = tr.hookargs.copy()
1249 args = tr.hookargs.copy()
1250 args.update(bookmarks.preparehookargs(name, old, new))
1250 args.update(bookmarks.preparehookargs(name, old, new))
1251 repo.hook('pretxnclose-bookmark', throw=True,
1251 repo.hook('pretxnclose-bookmark', throw=True,
1252 txnname=desc,
1252 txnname=desc,
1253 **pycompat.strkwargs(args))
1253 **pycompat.strkwargs(args))
1254 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1254 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1255 cl = repo.unfiltered().changelog
1255 cl = repo.unfiltered().changelog
1256 for rev, (old, new) in tr.changes['phases'].items():
1256 for rev, (old, new) in tr.changes['phases'].items():
1257 args = tr.hookargs.copy()
1257 args = tr.hookargs.copy()
1258 node = hex(cl.node(rev))
1258 node = hex(cl.node(rev))
1259 args.update(phases.preparehookargs(node, old, new))
1259 args.update(phases.preparehookargs(node, old, new))
1260 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1260 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1261 **pycompat.strkwargs(args))
1261 **pycompat.strkwargs(args))
1262
1262
1263 repo.hook('pretxnclose', throw=True,
1263 repo.hook('pretxnclose', throw=True,
1264 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1264 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1265 def releasefn(tr, success):
1265 def releasefn(tr, success):
1266 repo = reporef()
1266 repo = reporef()
1267 if success:
1267 if success:
1268 # this should be explicitly invoked here, because
1268 # this should be explicitly invoked here, because
1269 # in-memory changes aren't written out at closing
1269 # in-memory changes aren't written out at closing
1270 # transaction, if tr.addfilegenerator (via
1270 # transaction, if tr.addfilegenerator (via
1271 # dirstate.write or so) isn't invoked while
1271 # dirstate.write or so) isn't invoked while
1272 # transaction running
1272 # transaction running
1273 repo.dirstate.write(None)
1273 repo.dirstate.write(None)
1274 else:
1274 else:
1275 # discard all changes (including ones already written
1275 # discard all changes (including ones already written
1276 # out) in this transaction
1276 # out) in this transaction
1277 repo.dirstate.restorebackup(None, 'journal.dirstate')
1277 repo.dirstate.restorebackup(None, 'journal.dirstate')
1278
1278
1279 repo.invalidate(clearfilecache=True)
1279 repo.invalidate(clearfilecache=True)
1280
1280
1281 tr = transaction.transaction(rp, self.svfs, vfsmap,
1281 tr = transaction.transaction(rp, self.svfs, vfsmap,
1282 "journal",
1282 "journal",
1283 "undo",
1283 "undo",
1284 aftertrans(renames),
1284 aftertrans(renames),
1285 self.store.createmode,
1285 self.store.createmode,
1286 validator=validate,
1286 validator=validate,
1287 releasefn=releasefn,
1287 releasefn=releasefn,
1288 checkambigfiles=_cachedfiles)
1288 checkambigfiles=_cachedfiles)
1289 tr.changes['revs'] = set()
1289 tr.changes['revs'] = set()
1290 tr.changes['obsmarkers'] = set()
1290 tr.changes['obsmarkers'] = set()
1291 tr.changes['phases'] = {}
1291 tr.changes['phases'] = {}
1292 tr.changes['bookmarks'] = {}
1292 tr.changes['bookmarks'] = {}
1293
1293
1294 tr.hookargs['txnid'] = txnid
1294 tr.hookargs['txnid'] = txnid
1295 # note: writing the fncache only during finalize mean that the file is
1295 # note: writing the fncache only during finalize mean that the file is
1296 # outdated when running hooks. As fncache is used for streaming clone,
1296 # outdated when running hooks. As fncache is used for streaming clone,
1297 # this is not expected to break anything that happen during the hooks.
1297 # this is not expected to break anything that happen during the hooks.
1298 tr.addfinalize('flush-fncache', self.store.write)
1298 tr.addfinalize('flush-fncache', self.store.write)
1299 def txnclosehook(tr2):
1299 def txnclosehook(tr2):
1300 """To be run if transaction is successful, will schedule a hook run
1300 """To be run if transaction is successful, will schedule a hook run
1301 """
1301 """
1302 # Don't reference tr2 in hook() so we don't hold a reference.
1302 # Don't reference tr2 in hook() so we don't hold a reference.
1303 # This reduces memory consumption when there are multiple
1303 # This reduces memory consumption when there are multiple
1304 # transactions per lock. This can likely go away if issue5045
1304 # transactions per lock. This can likely go away if issue5045
1305 # fixes the function accumulation.
1305 # fixes the function accumulation.
1306 hookargs = tr2.hookargs
1306 hookargs = tr2.hookargs
1307
1307
1308 def hookfunc():
1308 def hookfunc():
1309 repo = reporef()
1309 repo = reporef()
1310 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1310 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1311 bmchanges = sorted(tr.changes['bookmarks'].items())
1311 bmchanges = sorted(tr.changes['bookmarks'].items())
1312 for name, (old, new) in bmchanges:
1312 for name, (old, new) in bmchanges:
1313 args = tr.hookargs.copy()
1313 args = tr.hookargs.copy()
1314 args.update(bookmarks.preparehookargs(name, old, new))
1314 args.update(bookmarks.preparehookargs(name, old, new))
1315 repo.hook('txnclose-bookmark', throw=False,
1315 repo.hook('txnclose-bookmark', throw=False,
1316 txnname=desc, **pycompat.strkwargs(args))
1316 txnname=desc, **pycompat.strkwargs(args))
1317
1317
1318 if hook.hashook(repo.ui, 'txnclose-phase'):
1318 if hook.hashook(repo.ui, 'txnclose-phase'):
1319 cl = repo.unfiltered().changelog
1319 cl = repo.unfiltered().changelog
1320 phasemv = sorted(tr.changes['phases'].items())
1320 phasemv = sorted(tr.changes['phases'].items())
1321 for rev, (old, new) in phasemv:
1321 for rev, (old, new) in phasemv:
1322 args = tr.hookargs.copy()
1322 args = tr.hookargs.copy()
1323 node = hex(cl.node(rev))
1323 node = hex(cl.node(rev))
1324 args.update(phases.preparehookargs(node, old, new))
1324 args.update(phases.preparehookargs(node, old, new))
1325 repo.hook('txnclose-phase', throw=False, txnname=desc,
1325 repo.hook('txnclose-phase', throw=False, txnname=desc,
1326 **pycompat.strkwargs(args))
1326 **pycompat.strkwargs(args))
1327
1327
1328 repo.hook('txnclose', throw=False, txnname=desc,
1328 repo.hook('txnclose', throw=False, txnname=desc,
1329 **pycompat.strkwargs(hookargs))
1329 **pycompat.strkwargs(hookargs))
1330 reporef()._afterlock(hookfunc)
1330 reporef()._afterlock(hookfunc)
1331 tr.addfinalize('txnclose-hook', txnclosehook)
1331 tr.addfinalize('txnclose-hook', txnclosehook)
1332 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1332 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1333 def txnaborthook(tr2):
1333 def txnaborthook(tr2):
1334 """To be run if transaction is aborted
1334 """To be run if transaction is aborted
1335 """
1335 """
1336 reporef().hook('txnabort', throw=False, txnname=desc,
1336 reporef().hook('txnabort', throw=False, txnname=desc,
1337 **tr2.hookargs)
1337 **tr2.hookargs)
1338 tr.addabort('txnabort-hook', txnaborthook)
1338 tr.addabort('txnabort-hook', txnaborthook)
1339 # avoid eager cache invalidation. in-memory data should be identical
1339 # avoid eager cache invalidation. in-memory data should be identical
1340 # to stored data if transaction has no error.
1340 # to stored data if transaction has no error.
1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1342 self._transref = weakref.ref(tr)
1342 self._transref = weakref.ref(tr)
1343 scmutil.registersummarycallback(self, tr, desc)
1343 scmutil.registersummarycallback(self, tr, desc)
1344 return tr
1344 return tr
1345
1345
1346 def _journalfiles(self):
1346 def _journalfiles(self):
1347 return ((self.svfs, 'journal'),
1347 return ((self.svfs, 'journal'),
1348 (self.vfs, 'journal.dirstate'),
1348 (self.vfs, 'journal.dirstate'),
1349 (self.vfs, 'journal.branch'),
1349 (self.vfs, 'journal.branch'),
1350 (self.vfs, 'journal.desc'),
1350 (self.vfs, 'journal.desc'),
1351 (self.vfs, 'journal.bookmarks'),
1351 (self.vfs, 'journal.bookmarks'),
1352 (self.svfs, 'journal.phaseroots'))
1352 (self.svfs, 'journal.phaseroots'))
1353
1353
1354 def undofiles(self):
1354 def undofiles(self):
1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1356
1356
1357 @unfilteredmethod
1357 @unfilteredmethod
1358 def _writejournal(self, desc):
1358 def _writejournal(self, desc):
1359 self.dirstate.savebackup(None, 'journal.dirstate')
1359 self.dirstate.savebackup(None, 'journal.dirstate')
1360 self.vfs.write("journal.branch",
1360 self.vfs.write("journal.branch",
1361 encoding.fromlocal(self.dirstate.branch()))
1361 encoding.fromlocal(self.dirstate.branch()))
1362 self.vfs.write("journal.desc",
1362 self.vfs.write("journal.desc",
1363 "%d\n%s\n" % (len(self), desc))
1363 "%d\n%s\n" % (len(self), desc))
1364 self.vfs.write("journal.bookmarks",
1364 self.vfs.write("journal.bookmarks",
1365 self.vfs.tryread("bookmarks"))
1365 self.vfs.tryread("bookmarks"))
1366 self.svfs.write("journal.phaseroots",
1366 self.svfs.write("journal.phaseroots",
1367 self.svfs.tryread("phaseroots"))
1367 self.svfs.tryread("phaseroots"))
1368
1368
1369 def recover(self):
1369 def recover(self):
1370 with self.lock():
1370 with self.lock():
1371 if self.svfs.exists("journal"):
1371 if self.svfs.exists("journal"):
1372 self.ui.status(_("rolling back interrupted transaction\n"))
1372 self.ui.status(_("rolling back interrupted transaction\n"))
1373 vfsmap = {'': self.svfs,
1373 vfsmap = {'': self.svfs,
1374 'plain': self.vfs,}
1374 'plain': self.vfs,}
1375 transaction.rollback(self.svfs, vfsmap, "journal",
1375 transaction.rollback(self.svfs, vfsmap, "journal",
1376 self.ui.warn,
1376 self.ui.warn,
1377 checkambigfiles=_cachedfiles)
1377 checkambigfiles=_cachedfiles)
1378 self.invalidate()
1378 self.invalidate()
1379 return True
1379 return True
1380 else:
1380 else:
1381 self.ui.warn(_("no interrupted transaction available\n"))
1381 self.ui.warn(_("no interrupted transaction available\n"))
1382 return False
1382 return False
1383
1383
1384 def rollback(self, dryrun=False, force=False):
1384 def rollback(self, dryrun=False, force=False):
1385 wlock = lock = dsguard = None
1385 wlock = lock = dsguard = None
1386 try:
1386 try:
1387 wlock = self.wlock()
1387 wlock = self.wlock()
1388 lock = self.lock()
1388 lock = self.lock()
1389 if self.svfs.exists("undo"):
1389 if self.svfs.exists("undo"):
1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1391
1391
1392 return self._rollback(dryrun, force, dsguard)
1392 return self._rollback(dryrun, force, dsguard)
1393 else:
1393 else:
1394 self.ui.warn(_("no rollback information available\n"))
1394 self.ui.warn(_("no rollback information available\n"))
1395 return 1
1395 return 1
1396 finally:
1396 finally:
1397 release(dsguard, lock, wlock)
1397 release(dsguard, lock, wlock)
1398
1398
1399 @unfilteredmethod # Until we get smarter cache management
1399 @unfilteredmethod # Until we get smarter cache management
1400 def _rollback(self, dryrun, force, dsguard):
1400 def _rollback(self, dryrun, force, dsguard):
1401 ui = self.ui
1401 ui = self.ui
1402 try:
1402 try:
1403 args = self.vfs.read('undo.desc').splitlines()
1403 args = self.vfs.read('undo.desc').splitlines()
1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1405 if len(args) >= 3:
1405 if len(args) >= 3:
1406 detail = args[2]
1406 detail = args[2]
1407 oldtip = oldlen - 1
1407 oldtip = oldlen - 1
1408
1408
1409 if detail and ui.verbose:
1409 if detail and ui.verbose:
1410 msg = (_('repository tip rolled back to revision %d'
1410 msg = (_('repository tip rolled back to revision %d'
1411 ' (undo %s: %s)\n')
1411 ' (undo %s: %s)\n')
1412 % (oldtip, desc, detail))
1412 % (oldtip, desc, detail))
1413 else:
1413 else:
1414 msg = (_('repository tip rolled back to revision %d'
1414 msg = (_('repository tip rolled back to revision %d'
1415 ' (undo %s)\n')
1415 ' (undo %s)\n')
1416 % (oldtip, desc))
1416 % (oldtip, desc))
1417 except IOError:
1417 except IOError:
1418 msg = _('rolling back unknown transaction\n')
1418 msg = _('rolling back unknown transaction\n')
1419 desc = None
1419 desc = None
1420
1420
1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1422 raise error.Abort(
1422 raise error.Abort(
1423 _('rollback of last commit while not checked out '
1423 _('rollback of last commit while not checked out '
1424 'may lose data'), hint=_('use -f to force'))
1424 'may lose data'), hint=_('use -f to force'))
1425
1425
1426 ui.status(msg)
1426 ui.status(msg)
1427 if dryrun:
1427 if dryrun:
1428 return 0
1428 return 0
1429
1429
1430 parents = self.dirstate.parents()
1430 parents = self.dirstate.parents()
1431 self.destroying()
1431 self.destroying()
1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1434 checkambigfiles=_cachedfiles)
1434 checkambigfiles=_cachedfiles)
1435 if self.vfs.exists('undo.bookmarks'):
1435 if self.vfs.exists('undo.bookmarks'):
1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1437 if self.svfs.exists('undo.phaseroots'):
1437 if self.svfs.exists('undo.phaseroots'):
1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1439 self.invalidate()
1439 self.invalidate()
1440
1440
1441 parentgone = (parents[0] not in self.changelog.nodemap or
1441 parentgone = (parents[0] not in self.changelog.nodemap or
1442 parents[1] not in self.changelog.nodemap)
1442 parents[1] not in self.changelog.nodemap)
1443 if parentgone:
1443 if parentgone:
1444 # prevent dirstateguard from overwriting already restored one
1444 # prevent dirstateguard from overwriting already restored one
1445 dsguard.close()
1445 dsguard.close()
1446
1446
1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1448 try:
1448 try:
1449 branch = self.vfs.read('undo.branch')
1449 branch = self.vfs.read('undo.branch')
1450 self.dirstate.setbranch(encoding.tolocal(branch))
1450 self.dirstate.setbranch(encoding.tolocal(branch))
1451 except IOError:
1451 except IOError:
1452 ui.warn(_('named branch could not be reset: '
1452 ui.warn(_('named branch could not be reset: '
1453 'current branch is still \'%s\'\n')
1453 'current branch is still \'%s\'\n')
1454 % self.dirstate.branch())
1454 % self.dirstate.branch())
1455
1455
1456 parents = tuple([p.rev() for p in self[None].parents()])
1456 parents = tuple([p.rev() for p in self[None].parents()])
1457 if len(parents) > 1:
1457 if len(parents) > 1:
1458 ui.status(_('working directory now based on '
1458 ui.status(_('working directory now based on '
1459 'revisions %d and %d\n') % parents)
1459 'revisions %d and %d\n') % parents)
1460 else:
1460 else:
1461 ui.status(_('working directory now based on '
1461 ui.status(_('working directory now based on '
1462 'revision %d\n') % parents)
1462 'revision %d\n') % parents)
1463 mergemod.mergestate.clean(self, self['.'].node())
1463 mergemod.mergestate.clean(self, self['.'].node())
1464
1464
1465 # TODO: if we know which new heads may result from this rollback, pass
1465 # TODO: if we know which new heads may result from this rollback, pass
1466 # them to destroy(), which will prevent the branchhead cache from being
1466 # them to destroy(), which will prevent the branchhead cache from being
1467 # invalidated.
1467 # invalidated.
1468 self.destroyed()
1468 self.destroyed()
1469 return 0
1469 return 0
1470
1470
1471 def _buildcacheupdater(self, newtransaction):
1471 def _buildcacheupdater(self, newtransaction):
1472 """called during transaction to build the callback updating cache
1472 """called during transaction to build the callback updating cache
1473
1473
1474 Lives on the repository to help extension who might want to augment
1474 Lives on the repository to help extension who might want to augment
1475 this logic. For this purpose, the created transaction is passed to the
1475 this logic. For this purpose, the created transaction is passed to the
1476 method.
1476 method.
1477 """
1477 """
1478 # we must avoid cyclic reference between repo and transaction.
1478 # we must avoid cyclic reference between repo and transaction.
1479 reporef = weakref.ref(self)
1479 reporef = weakref.ref(self)
1480 def updater(tr):
1480 def updater(tr):
1481 repo = reporef()
1481 repo = reporef()
1482 repo.updatecaches(tr)
1482 repo.updatecaches(tr)
1483 return updater
1483 return updater
1484
1484
1485 @unfilteredmethod
1485 @unfilteredmethod
1486 def updatecaches(self, tr=None):
1486 def updatecaches(self, tr=None):
1487 """warm appropriate caches
1487 """warm appropriate caches
1488
1488
1489 If this function is called after a transaction closed. The transaction
1489 If this function is called after a transaction closed. The transaction
1490 will be available in the 'tr' argument. This can be used to selectively
1490 will be available in the 'tr' argument. This can be used to selectively
1491 update caches relevant to the changes in that transaction.
1491 update caches relevant to the changes in that transaction.
1492 """
1492 """
1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1494 # During strip, many caches are invalid but
1494 # During strip, many caches are invalid but
1495 # later call to `destroyed` will refresh them.
1495 # later call to `destroyed` will refresh them.
1496 return
1496 return
1497
1497
1498 if tr is None or tr.changes['revs']:
1498 if tr is None or tr.changes['revs']:
1499 # updating the unfiltered branchmap should refresh all the others,
1499 # updating the unfiltered branchmap should refresh all the others,
1500 self.ui.debug('updating the branch cache\n')
1500 self.ui.debug('updating the branch cache\n')
1501 branchmap.updatecache(self.filtered('served'))
1501 branchmap.updatecache(self.filtered('served'))
1502
1502
1503 def invalidatecaches(self):
1503 def invalidatecaches(self):
1504
1504
1505 if '_tagscache' in vars(self):
1505 if '_tagscache' in vars(self):
1506 # can't use delattr on proxy
1506 # can't use delattr on proxy
1507 del self.__dict__['_tagscache']
1507 del self.__dict__['_tagscache']
1508
1508
1509 self.unfiltered()._branchcaches.clear()
1509 self.unfiltered()._branchcaches.clear()
1510 self.invalidatevolatilesets()
1510 self.invalidatevolatilesets()
1511 self._sparsesignaturecache.clear()
1511 self._sparsesignaturecache.clear()
1512
1512
1513 def invalidatevolatilesets(self):
1513 def invalidatevolatilesets(self):
1514 self.filteredrevcache.clear()
1514 self.filteredrevcache.clear()
1515 obsolete.clearobscaches(self)
1515 obsolete.clearobscaches(self)
1516
1516
1517 def invalidatedirstate(self):
1517 def invalidatedirstate(self):
1518 '''Invalidates the dirstate, causing the next call to dirstate
1518 '''Invalidates the dirstate, causing the next call to dirstate
1519 to check if it was modified since the last time it was read,
1519 to check if it was modified since the last time it was read,
1520 rereading it if it has.
1520 rereading it if it has.
1521
1521
1522 This is different to dirstate.invalidate() that it doesn't always
1522 This is different to dirstate.invalidate() that it doesn't always
1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1524 explicitly read the dirstate again (i.e. restoring it to a previous
1524 explicitly read the dirstate again (i.e. restoring it to a previous
1525 known good state).'''
1525 known good state).'''
1526 if hasunfilteredcache(self, 'dirstate'):
1526 if hasunfilteredcache(self, 'dirstate'):
1527 for k in self.dirstate._filecache:
1527 for k in self.dirstate._filecache:
1528 try:
1528 try:
1529 delattr(self.dirstate, k)
1529 delattr(self.dirstate, k)
1530 except AttributeError:
1530 except AttributeError:
1531 pass
1531 pass
1532 delattr(self.unfiltered(), 'dirstate')
1532 delattr(self.unfiltered(), 'dirstate')
1533
1533
1534 def invalidate(self, clearfilecache=False):
1534 def invalidate(self, clearfilecache=False):
1535 '''Invalidates both store and non-store parts other than dirstate
1535 '''Invalidates both store and non-store parts other than dirstate
1536
1536
1537 If a transaction is running, invalidation of store is omitted,
1537 If a transaction is running, invalidation of store is omitted,
1538 because discarding in-memory changes might cause inconsistency
1538 because discarding in-memory changes might cause inconsistency
1539 (e.g. incomplete fncache causes unintentional failure, but
1539 (e.g. incomplete fncache causes unintentional failure, but
1540 redundant one doesn't).
1540 redundant one doesn't).
1541 '''
1541 '''
1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1543 for k in list(self._filecache.keys()):
1543 for k in list(self._filecache.keys()):
1544 # dirstate is invalidated separately in invalidatedirstate()
1544 # dirstate is invalidated separately in invalidatedirstate()
1545 if k == 'dirstate':
1545 if k == 'dirstate':
1546 continue
1546 continue
1547 if (k == 'changelog' and
1547 if (k == 'changelog' and
1548 self.currenttransaction() and
1548 self.currenttransaction() and
1549 self.changelog._delayed):
1549 self.changelog._delayed):
1550 # The changelog object may store unwritten revisions. We don't
1550 # The changelog object may store unwritten revisions. We don't
1551 # want to lose them.
1551 # want to lose them.
1552 # TODO: Solve the problem instead of working around it.
1552 # TODO: Solve the problem instead of working around it.
1553 continue
1553 continue
1554
1554
1555 if clearfilecache:
1555 if clearfilecache:
1556 del self._filecache[k]
1556 del self._filecache[k]
1557 try:
1557 try:
1558 delattr(unfiltered, k)
1558 delattr(unfiltered, k)
1559 except AttributeError:
1559 except AttributeError:
1560 pass
1560 pass
1561 self.invalidatecaches()
1561 self.invalidatecaches()
1562 if not self.currenttransaction():
1562 if not self.currenttransaction():
1563 # TODO: Changing contents of store outside transaction
1563 # TODO: Changing contents of store outside transaction
1564 # causes inconsistency. We should make in-memory store
1564 # causes inconsistency. We should make in-memory store
1565 # changes detectable, and abort if changed.
1565 # changes detectable, and abort if changed.
1566 self.store.invalidatecaches()
1566 self.store.invalidatecaches()
1567
1567
1568 def invalidateall(self):
1568 def invalidateall(self):
1569 '''Fully invalidates both store and non-store parts, causing the
1569 '''Fully invalidates both store and non-store parts, causing the
1570 subsequent operation to reread any outside changes.'''
1570 subsequent operation to reread any outside changes.'''
1571 # extension should hook this to invalidate its caches
1571 # extension should hook this to invalidate its caches
1572 self.invalidate()
1572 self.invalidate()
1573 self.invalidatedirstate()
1573 self.invalidatedirstate()
1574
1574
1575 @unfilteredmethod
1575 @unfilteredmethod
1576 def _refreshfilecachestats(self, tr):
1576 def _refreshfilecachestats(self, tr):
1577 """Reload stats of cached files so that they are flagged as valid"""
1577 """Reload stats of cached files so that they are flagged as valid"""
1578 for k, ce in self._filecache.items():
1578 for k, ce in self._filecache.items():
1579 if k == 'dirstate' or k not in self.__dict__:
1579 if k == 'dirstate' or k not in self.__dict__:
1580 continue
1580 continue
1581 ce.refresh()
1581 ce.refresh()
1582
1582
1583 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1583 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 inheritchecker=None, parentenvvar=None):
1584 inheritchecker=None, parentenvvar=None):
1585 parentlock = None
1585 parentlock = None
1586 # the contents of parentenvvar are used by the underlying lock to
1586 # the contents of parentenvvar are used by the underlying lock to
1587 # determine whether it can be inherited
1587 # determine whether it can be inherited
1588 if parentenvvar is not None:
1588 if parentenvvar is not None:
1589 parentlock = encoding.environ.get(parentenvvar)
1589 parentlock = encoding.environ.get(parentenvvar)
1590 try:
1590 try:
1591 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1591 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1592 acquirefn=acquirefn, desc=desc,
1592 acquirefn=acquirefn, desc=desc,
1593 inheritchecker=inheritchecker,
1593 inheritchecker=inheritchecker,
1594 parentlock=parentlock)
1594 parentlock=parentlock)
1595 except error.LockHeld as inst:
1595 except error.LockHeld as inst:
1596 if not wait:
1596 if not wait:
1597 raise
1597 raise
1598 # show more details for new-style locks
1598 # show more details for new-style locks
1599 if ':' in inst.locker:
1599 if ':' in inst.locker:
1600 host, pid = inst.locker.split(":", 1)
1600 host, pid = inst.locker.split(":", 1)
1601 self.ui.warn(
1601 self.ui.warn(
1602 _("waiting for lock on %s held by process %r "
1602 _("waiting for lock on %s held by process %r "
1603 "on host %r\n") % (desc, pid, host))
1603 "on host %r\n") % (desc, pid, host))
1604 else:
1604 else:
1605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1606 (desc, inst.locker))
1606 (desc, inst.locker))
1607 # default to 600 seconds timeout
1607 # default to 600 seconds timeout
1608 l = lockmod.lock(vfs, lockname,
1608 l = lockmod.lock(vfs, lockname,
1609 int(self.ui.config("ui", "timeout")),
1609 int(self.ui.config("ui", "timeout")),
1610 releasefn=releasefn, acquirefn=acquirefn,
1610 releasefn=releasefn, acquirefn=acquirefn,
1611 desc=desc)
1611 desc=desc)
1612 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1612 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1613 return l
1613 return l
1614
1614
1615 def _afterlock(self, callback):
1615 def _afterlock(self, callback):
1616 """add a callback to be run when the repository is fully unlocked
1616 """add a callback to be run when the repository is fully unlocked
1617
1617
1618 The callback will be executed when the outermost lock is released
1618 The callback will be executed when the outermost lock is released
1619 (with wlock being higher level than 'lock')."""
1619 (with wlock being higher level than 'lock')."""
1620 for ref in (self._wlockref, self._lockref):
1620 for ref in (self._wlockref, self._lockref):
1621 l = ref and ref()
1621 l = ref and ref()
1622 if l and l.held:
1622 if l and l.held:
1623 l.postrelease.append(callback)
1623 l.postrelease.append(callback)
1624 break
1624 break
1625 else: # no lock have been found.
1625 else: # no lock have been found.
1626 callback()
1626 callback()
1627
1627
1628 def lock(self, wait=True):
1628 def lock(self, wait=True):
1629 '''Lock the repository store (.hg/store) and return a weak reference
1629 '''Lock the repository store (.hg/store) and return a weak reference
1630 to the lock. Use this before modifying the store (e.g. committing or
1630 to the lock. Use this before modifying the store (e.g. committing or
1631 stripping). If you are opening a transaction, get a lock as well.)
1631 stripping). If you are opening a transaction, get a lock as well.)
1632
1632
1633 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1633 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1634 'wlock' first to avoid a dead-lock hazard.'''
1634 'wlock' first to avoid a dead-lock hazard.'''
1635 l = self._currentlock(self._lockref)
1635 l = self._currentlock(self._lockref)
1636 if l is not None:
1636 if l is not None:
1637 l.lock()
1637 l.lock()
1638 return l
1638 return l
1639
1639
1640 l = self._lock(self.svfs, "lock", wait, None,
1640 l = self._lock(self.svfs, "lock", wait, None,
1641 self.invalidate, _('repository %s') % self.origroot)
1641 self.invalidate, _('repository %s') % self.origroot)
1642 self._lockref = weakref.ref(l)
1642 self._lockref = weakref.ref(l)
1643 return l
1643 return l
1644
1644
1645 def _wlockchecktransaction(self):
1645 def _wlockchecktransaction(self):
1646 if self.currenttransaction() is not None:
1646 if self.currenttransaction() is not None:
1647 raise error.LockInheritanceContractViolation(
1647 raise error.LockInheritanceContractViolation(
1648 'wlock cannot be inherited in the middle of a transaction')
1648 'wlock cannot be inherited in the middle of a transaction')
1649
1649
1650 def wlock(self, wait=True):
1650 def wlock(self, wait=True):
1651 '''Lock the non-store parts of the repository (everything under
1651 '''Lock the non-store parts of the repository (everything under
1652 .hg except .hg/store) and return a weak reference to the lock.
1652 .hg except .hg/store) and return a weak reference to the lock.
1653
1653
1654 Use this before modifying files in .hg.
1654 Use this before modifying files in .hg.
1655
1655
1656 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1656 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1657 'wlock' first to avoid a dead-lock hazard.'''
1657 'wlock' first to avoid a dead-lock hazard.'''
1658 l = self._wlockref and self._wlockref()
1658 l = self._wlockref and self._wlockref()
1659 if l is not None and l.held:
1659 if l is not None and l.held:
1660 l.lock()
1660 l.lock()
1661 return l
1661 return l
1662
1662
1663 # We do not need to check for non-waiting lock acquisition. Such
1663 # We do not need to check for non-waiting lock acquisition. Such
1664 # acquisition would not cause dead-lock as they would just fail.
1664 # acquisition would not cause dead-lock as they would just fail.
1665 if wait and (self.ui.configbool('devel', 'all-warnings')
1665 if wait and (self.ui.configbool('devel', 'all-warnings')
1666 or self.ui.configbool('devel', 'check-locks')):
1666 or self.ui.configbool('devel', 'check-locks')):
1667 if self._currentlock(self._lockref) is not None:
1667 if self._currentlock(self._lockref) is not None:
1668 self.ui.develwarn('"wlock" acquired after "lock"')
1668 self.ui.develwarn('"wlock" acquired after "lock"')
1669
1669
1670 def unlock():
1670 def unlock():
1671 if self.dirstate.pendingparentchange():
1671 if self.dirstate.pendingparentchange():
1672 self.dirstate.invalidate()
1672 self.dirstate.invalidate()
1673 else:
1673 else:
1674 self.dirstate.write(None)
1674 self.dirstate.write(None)
1675
1675
1676 self._filecache['dirstate'].refresh()
1676 self._filecache['dirstate'].refresh()
1677
1677
1678 l = self._lock(self.vfs, "wlock", wait, unlock,
1678 l = self._lock(self.vfs, "wlock", wait, unlock,
1679 self.invalidatedirstate, _('working directory of %s') %
1679 self.invalidatedirstate, _('working directory of %s') %
1680 self.origroot,
1680 self.origroot,
1681 inheritchecker=self._wlockchecktransaction,
1681 inheritchecker=self._wlockchecktransaction,
1682 parentenvvar='HG_WLOCK_LOCKER')
1682 parentenvvar='HG_WLOCK_LOCKER')
1683 self._wlockref = weakref.ref(l)
1683 self._wlockref = weakref.ref(l)
1684 return l
1684 return l
1685
1685
1686 def _currentlock(self, lockref):
1686 def _currentlock(self, lockref):
1687 """Returns the lock if it's held, or None if it's not."""
1687 """Returns the lock if it's held, or None if it's not."""
1688 if lockref is None:
1688 if lockref is None:
1689 return None
1689 return None
1690 l = lockref()
1690 l = lockref()
1691 if l is None or not l.held:
1691 if l is None or not l.held:
1692 return None
1692 return None
1693 return l
1693 return l
1694
1694
1695 def currentwlock(self):
1695 def currentwlock(self):
1696 """Returns the wlock if it's held, or None if it's not."""
1696 """Returns the wlock if it's held, or None if it's not."""
1697 return self._currentlock(self._wlockref)
1697 return self._currentlock(self._wlockref)
1698
1698
1699 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1699 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1700 """
1700 """
1701 commit an individual file as part of a larger transaction
1701 commit an individual file as part of a larger transaction
1702 """
1702 """
1703
1703
1704 fname = fctx.path()
1704 fname = fctx.path()
1705 fparent1 = manifest1.get(fname, nullid)
1705 fparent1 = manifest1.get(fname, nullid)
1706 fparent2 = manifest2.get(fname, nullid)
1706 fparent2 = manifest2.get(fname, nullid)
1707 if isinstance(fctx, context.filectx):
1707 if isinstance(fctx, context.filectx):
1708 node = fctx.filenode()
1708 node = fctx.filenode()
1709 if node in [fparent1, fparent2]:
1709 if node in [fparent1, fparent2]:
1710 self.ui.debug('reusing %s filelog entry\n' % fname)
1710 self.ui.debug('reusing %s filelog entry\n' % fname)
1711 if manifest1.flags(fname) != fctx.flags():
1711 if manifest1.flags(fname) != fctx.flags():
1712 changelist.append(fname)
1712 changelist.append(fname)
1713 return node
1713 return node
1714
1714
1715 flog = self.file(fname)
1715 flog = self.file(fname)
1716 meta = {}
1716 meta = {}
1717 copy = fctx.renamed()
1717 copy = fctx.renamed()
1718 if copy and copy[0] != fname:
1718 if copy and copy[0] != fname:
1719 # Mark the new revision of this file as a copy of another
1719 # Mark the new revision of this file as a copy of another
1720 # file. This copy data will effectively act as a parent
1720 # file. This copy data will effectively act as a parent
1721 # of this new revision. If this is a merge, the first
1721 # of this new revision. If this is a merge, the first
1722 # parent will be the nullid (meaning "look up the copy data")
1722 # parent will be the nullid (meaning "look up the copy data")
1723 # and the second one will be the other parent. For example:
1723 # and the second one will be the other parent. For example:
1724 #
1724 #
1725 # 0 --- 1 --- 3 rev1 changes file foo
1725 # 0 --- 1 --- 3 rev1 changes file foo
1726 # \ / rev2 renames foo to bar and changes it
1726 # \ / rev2 renames foo to bar and changes it
1727 # \- 2 -/ rev3 should have bar with all changes and
1727 # \- 2 -/ rev3 should have bar with all changes and
1728 # should record that bar descends from
1728 # should record that bar descends from
1729 # bar in rev2 and foo in rev1
1729 # bar in rev2 and foo in rev1
1730 #
1730 #
1731 # this allows this merge to succeed:
1731 # this allows this merge to succeed:
1732 #
1732 #
1733 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1733 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1734 # \ / merging rev3 and rev4 should use bar@rev2
1734 # \ / merging rev3 and rev4 should use bar@rev2
1735 # \- 2 --- 4 as the merge base
1735 # \- 2 --- 4 as the merge base
1736 #
1736 #
1737
1737
1738 cfname = copy[0]
1738 cfname = copy[0]
1739 crev = manifest1.get(cfname)
1739 crev = manifest1.get(cfname)
1740 newfparent = fparent2
1740 newfparent = fparent2
1741
1741
1742 if manifest2: # branch merge
1742 if manifest2: # branch merge
1743 if fparent2 == nullid or crev is None: # copied on remote side
1743 if fparent2 == nullid or crev is None: # copied on remote side
1744 if cfname in manifest2:
1744 if cfname in manifest2:
1745 crev = manifest2[cfname]
1745 crev = manifest2[cfname]
1746 newfparent = fparent1
1746 newfparent = fparent1
1747
1747
1748 # Here, we used to search backwards through history to try to find
1748 # Here, we used to search backwards through history to try to find
1749 # where the file copy came from if the source of a copy was not in
1749 # where the file copy came from if the source of a copy was not in
1750 # the parent directory. However, this doesn't actually make sense to
1750 # the parent directory. However, this doesn't actually make sense to
1751 # do (what does a copy from something not in your working copy even
1751 # do (what does a copy from something not in your working copy even
1752 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1752 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1753 # the user that copy information was dropped, so if they didn't
1753 # the user that copy information was dropped, so if they didn't
1754 # expect this outcome it can be fixed, but this is the correct
1754 # expect this outcome it can be fixed, but this is the correct
1755 # behavior in this circumstance.
1755 # behavior in this circumstance.
1756
1756
1757 if crev:
1757 if crev:
1758 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1758 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1759 meta["copy"] = cfname
1759 meta["copy"] = cfname
1760 meta["copyrev"] = hex(crev)
1760 meta["copyrev"] = hex(crev)
1761 fparent1, fparent2 = nullid, newfparent
1761 fparent1, fparent2 = nullid, newfparent
1762 else:
1762 else:
1763 self.ui.warn(_("warning: can't find ancestor for '%s' "
1763 self.ui.warn(_("warning: can't find ancestor for '%s' "
1764 "copied from '%s'!\n") % (fname, cfname))
1764 "copied from '%s'!\n") % (fname, cfname))
1765
1765
1766 elif fparent1 == nullid:
1766 elif fparent1 == nullid:
1767 fparent1, fparent2 = fparent2, nullid
1767 fparent1, fparent2 = fparent2, nullid
1768 elif fparent2 != nullid:
1768 elif fparent2 != nullid:
1769 # is one parent an ancestor of the other?
1769 # is one parent an ancestor of the other?
1770 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1770 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1771 if fparent1 in fparentancestors:
1771 if fparent1 in fparentancestors:
1772 fparent1, fparent2 = fparent2, nullid
1772 fparent1, fparent2 = fparent2, nullid
1773 elif fparent2 in fparentancestors:
1773 elif fparent2 in fparentancestors:
1774 fparent2 = nullid
1774 fparent2 = nullid
1775
1775
1776 # is the file changed?
1776 # is the file changed?
1777 text = fctx.data()
1777 text = fctx.data()
1778 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1778 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1779 changelist.append(fname)
1779 changelist.append(fname)
1780 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1780 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1781 # are just the flags changed during merge?
1781 # are just the flags changed during merge?
1782 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1782 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1783 changelist.append(fname)
1783 changelist.append(fname)
1784
1784
1785 return fparent1
1785 return fparent1
1786
1786
1787 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1787 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1788 """check for commit arguments that aren't committable"""
1788 """check for commit arguments that aren't committable"""
1789 if match.isexact() or match.prefix():
1789 if match.isexact() or match.prefix():
1790 matched = set(status.modified + status.added + status.removed)
1790 matched = set(status.modified + status.added + status.removed)
1791
1791
1792 for f in match.files():
1792 for f in match.files():
1793 f = self.dirstate.normalize(f)
1793 f = self.dirstate.normalize(f)
1794 if f == '.' or f in matched or f in wctx.substate:
1794 if f == '.' or f in matched or f in wctx.substate:
1795 continue
1795 continue
1796 if f in status.deleted:
1796 if f in status.deleted:
1797 fail(f, _('file not found!'))
1797 fail(f, _('file not found!'))
1798 if f in vdirs: # visited directory
1798 if f in vdirs: # visited directory
1799 d = f + '/'
1799 d = f + '/'
1800 for mf in matched:
1800 for mf in matched:
1801 if mf.startswith(d):
1801 if mf.startswith(d):
1802 break
1802 break
1803 else:
1803 else:
1804 fail(f, _("no match under directory!"))
1804 fail(f, _("no match under directory!"))
1805 elif f not in self.dirstate:
1805 elif f not in self.dirstate:
1806 fail(f, _("file not tracked!"))
1806 fail(f, _("file not tracked!"))
1807
1807
1808 @unfilteredmethod
1808 @unfilteredmethod
1809 def commit(self, text="", user=None, date=None, match=None, force=False,
1809 def commit(self, text="", user=None, date=None, match=None, force=False,
1810 editor=False, extra=None):
1810 editor=False, extra=None):
1811 """Add a new revision to current repository.
1811 """Add a new revision to current repository.
1812
1812
1813 Revision information is gathered from the working directory,
1813 Revision information is gathered from the working directory,
1814 match can be used to filter the committed files. If editor is
1814 match can be used to filter the committed files. If editor is
1815 supplied, it is called to get a commit message.
1815 supplied, it is called to get a commit message.
1816 """
1816 """
1817 if extra is None:
1817 if extra is None:
1818 extra = {}
1818 extra = {}
1819
1819
1820 def fail(f, msg):
1820 def fail(f, msg):
1821 raise error.Abort('%s: %s' % (f, msg))
1821 raise error.Abort('%s: %s' % (f, msg))
1822
1822
1823 if not match:
1823 if not match:
1824 match = matchmod.always(self.root, '')
1824 match = matchmod.always(self.root, '')
1825
1825
1826 if not force:
1826 if not force:
1827 vdirs = []
1827 vdirs = []
1828 match.explicitdir = vdirs.append
1828 match.explicitdir = vdirs.append
1829 match.bad = fail
1829 match.bad = fail
1830
1830
1831 wlock = lock = tr = None
1831 wlock = lock = tr = None
1832 try:
1832 try:
1833 wlock = self.wlock()
1833 wlock = self.wlock()
1834 lock = self.lock() # for recent changelog (see issue4368)
1834 lock = self.lock() # for recent changelog (see issue4368)
1835
1835
1836 wctx = self[None]
1836 wctx = self[None]
1837 merge = len(wctx.parents()) > 1
1837 merge = len(wctx.parents()) > 1
1838
1838
1839 if not force and merge and not match.always():
1839 if not force and merge and not match.always():
1840 raise error.Abort(_('cannot partially commit a merge '
1840 raise error.Abort(_('cannot partially commit a merge '
1841 '(do not specify files or patterns)'))
1841 '(do not specify files or patterns)'))
1842
1842
1843 status = self.status(match=match, clean=force)
1843 status = self.status(match=match, clean=force)
1844 if force:
1844 if force:
1845 status.modified.extend(status.clean) # mq may commit clean files
1845 status.modified.extend(status.clean) # mq may commit clean files
1846
1846
1847 # check subrepos
1847 # check subrepos
1848 subs = []
1848 subs = []
1849 commitsubs = set()
1849 commitsubs = set()
1850 newstate = wctx.substate.copy()
1850 newstate = wctx.substate.copy()
1851 # only manage subrepos and .hgsubstate if .hgsub is present
1851 # only manage subrepos and .hgsubstate if .hgsub is present
1852 if '.hgsub' in wctx:
1852 if '.hgsub' in wctx:
1853 # we'll decide whether to track this ourselves, thanks
1853 # we'll decide whether to track this ourselves, thanks
1854 for c in status.modified, status.added, status.removed:
1854 for c in status.modified, status.added, status.removed:
1855 if '.hgsubstate' in c:
1855 if '.hgsubstate' in c:
1856 c.remove('.hgsubstate')
1856 c.remove('.hgsubstate')
1857
1857
1858 # compare current state to last committed state
1858 # compare current state to last committed state
1859 # build new substate based on last committed state
1859 # build new substate based on last committed state
1860 oldstate = wctx.p1().substate
1860 oldstate = wctx.p1().substate
1861 for s in sorted(newstate.keys()):
1861 for s in sorted(newstate.keys()):
1862 if not match(s):
1862 if not match(s):
1863 # ignore working copy, use old state if present
1863 # ignore working copy, use old state if present
1864 if s in oldstate:
1864 if s in oldstate:
1865 newstate[s] = oldstate[s]
1865 newstate[s] = oldstate[s]
1866 continue
1866 continue
1867 if not force:
1867 if not force:
1868 raise error.Abort(
1868 raise error.Abort(
1869 _("commit with new subrepo %s excluded") % s)
1869 _("commit with new subrepo %s excluded") % s)
1870 dirtyreason = wctx.sub(s).dirtyreason(True)
1870 dirtyreason = wctx.sub(s).dirtyreason(True)
1871 if dirtyreason:
1871 if dirtyreason:
1872 if not self.ui.configbool('ui', 'commitsubrepos'):
1872 if not self.ui.configbool('ui', 'commitsubrepos'):
1873 raise error.Abort(dirtyreason,
1873 raise error.Abort(dirtyreason,
1874 hint=_("use --subrepos for recursive commit"))
1874 hint=_("use --subrepos for recursive commit"))
1875 subs.append(s)
1875 subs.append(s)
1876 commitsubs.add(s)
1876 commitsubs.add(s)
1877 else:
1877 else:
1878 bs = wctx.sub(s).basestate()
1878 bs = wctx.sub(s).basestate()
1879 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1879 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1880 if oldstate.get(s, (None, None, None))[1] != bs:
1880 if oldstate.get(s, (None, None, None))[1] != bs:
1881 subs.append(s)
1881 subs.append(s)
1882
1882
1883 # check for removed subrepos
1883 # check for removed subrepos
1884 for p in wctx.parents():
1884 for p in wctx.parents():
1885 r = [s for s in p.substate if s not in newstate]
1885 r = [s for s in p.substate if s not in newstate]
1886 subs += [s for s in r if match(s)]
1886 subs += [s for s in r if match(s)]
1887 if subs:
1887 if subs:
1888 if (not match('.hgsub') and
1888 if (not match('.hgsub') and
1889 '.hgsub' in (wctx.modified() + wctx.added())):
1889 '.hgsub' in (wctx.modified() + wctx.added())):
1890 raise error.Abort(
1890 raise error.Abort(
1891 _("can't commit subrepos without .hgsub"))
1891 _("can't commit subrepos without .hgsub"))
1892 status.modified.insert(0, '.hgsubstate')
1892 status.modified.insert(0, '.hgsubstate')
1893
1893
1894 elif '.hgsub' in status.removed:
1894 elif '.hgsub' in status.removed:
1895 # clean up .hgsubstate when .hgsub is removed
1895 # clean up .hgsubstate when .hgsub is removed
1896 if ('.hgsubstate' in wctx and
1896 if ('.hgsubstate' in wctx and
1897 '.hgsubstate' not in (status.modified + status.added +
1897 '.hgsubstate' not in (status.modified + status.added +
1898 status.removed)):
1898 status.removed)):
1899 status.removed.insert(0, '.hgsubstate')
1899 status.removed.insert(0, '.hgsubstate')
1900
1900
1901 # make sure all explicit patterns are matched
1901 # make sure all explicit patterns are matched
1902 if not force:
1902 if not force:
1903 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1903 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1904
1904
1905 cctx = context.workingcommitctx(self, status,
1905 cctx = context.workingcommitctx(self, status,
1906 text, user, date, extra)
1906 text, user, date, extra)
1907
1907
1908 # internal config: ui.allowemptycommit
1908 # internal config: ui.allowemptycommit
1909 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1909 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1910 or extra.get('close') or merge or cctx.files()
1910 or extra.get('close') or merge or cctx.files()
1911 or self.ui.configbool('ui', 'allowemptycommit'))
1911 or self.ui.configbool('ui', 'allowemptycommit'))
1912 if not allowemptycommit:
1912 if not allowemptycommit:
1913 return None
1913 return None
1914
1914
1915 if merge and cctx.deleted():
1915 if merge and cctx.deleted():
1916 raise error.Abort(_("cannot commit merge with missing files"))
1916 raise error.Abort(_("cannot commit merge with missing files"))
1917
1917
1918 ms = mergemod.mergestate.read(self)
1918 ms = mergemod.mergestate.read(self)
1919 mergeutil.checkunresolved(ms)
1919 mergeutil.checkunresolved(ms)
1920
1920
1921 if editor:
1921 if editor:
1922 cctx._text = editor(self, cctx, subs)
1922 cctx._text = editor(self, cctx, subs)
1923 edited = (text != cctx._text)
1923 edited = (text != cctx._text)
1924
1924
1925 # Save commit message in case this transaction gets rolled back
1925 # Save commit message in case this transaction gets rolled back
1926 # (e.g. by a pretxncommit hook). Leave the content alone on
1926 # (e.g. by a pretxncommit hook). Leave the content alone on
1927 # the assumption that the user will use the same editor again.
1927 # the assumption that the user will use the same editor again.
1928 msgfn = self.savecommitmessage(cctx._text)
1928 msgfn = self.savecommitmessage(cctx._text)
1929
1929
1930 # commit subs and write new state
1930 # commit subs and write new state
1931 if subs:
1931 if subs:
1932 for s in sorted(commitsubs):
1932 for s in sorted(commitsubs):
1933 sub = wctx.sub(s)
1933 sub = wctx.sub(s)
1934 self.ui.status(_('committing subrepository %s\n') %
1934 self.ui.status(_('committing subrepository %s\n') %
1935 subrepo.subrelpath(sub))
1935 subrepo.subrelpath(sub))
1936 sr = sub.commit(cctx._text, user, date)
1936 sr = sub.commit(cctx._text, user, date)
1937 newstate[s] = (newstate[s][0], sr)
1937 newstate[s] = (newstate[s][0], sr)
1938 subrepo.writestate(self, newstate)
1938 subrepo.writestate(self, newstate)
1939
1939
1940 p1, p2 = self.dirstate.parents()
1940 p1, p2 = self.dirstate.parents()
1941 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1941 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1942 try:
1942 try:
1943 self.hook("precommit", throw=True, parent1=hookp1,
1943 self.hook("precommit", throw=True, parent1=hookp1,
1944 parent2=hookp2)
1944 parent2=hookp2)
1945 tr = self.transaction('commit')
1945 tr = self.transaction('commit')
1946 ret = self.commitctx(cctx, True)
1946 ret = self.commitctx(cctx, True)
1947 except: # re-raises
1947 except: # re-raises
1948 if edited:
1948 if edited:
1949 self.ui.write(
1949 self.ui.write(
1950 _('note: commit message saved in %s\n') % msgfn)
1950 _('note: commit message saved in %s\n') % msgfn)
1951 raise
1951 raise
1952 # update bookmarks, dirstate and mergestate
1952 # update bookmarks, dirstate and mergestate
1953 bookmarks.update(self, [p1, p2], ret)
1953 bookmarks.update(self, [p1, p2], ret)
1954 cctx.markcommitted(ret)
1954 cctx.markcommitted(ret)
1955 ms.reset()
1955 ms.reset()
1956 tr.close()
1956 tr.close()
1957
1957
1958 finally:
1958 finally:
1959 lockmod.release(tr, lock, wlock)
1959 lockmod.release(tr, lock, wlock)
1960
1960
1961 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1961 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1962 # hack for command that use a temporary commit (eg: histedit)
1962 # hack for command that use a temporary commit (eg: histedit)
1963 # temporary commit got stripped before hook release
1963 # temporary commit got stripped before hook release
1964 if self.changelog.hasnode(ret):
1964 if self.changelog.hasnode(ret):
1965 self.hook("commit", node=node, parent1=parent1,
1965 self.hook("commit", node=node, parent1=parent1,
1966 parent2=parent2)
1966 parent2=parent2)
1967 self._afterlock(commithook)
1967 self._afterlock(commithook)
1968 return ret
1968 return ret
1969
1969
1970 @unfilteredmethod
1970 @unfilteredmethod
1971 def commitctx(self, ctx, error=False):
1971 def commitctx(self, ctx, error=False):
1972 """Add a new revision to current repository.
1972 """Add a new revision to current repository.
1973 Revision information is passed via the context argument.
1973 Revision information is passed via the context argument.
1974 """
1974 """
1975
1975
1976 tr = None
1976 tr = None
1977 p1, p2 = ctx.p1(), ctx.p2()
1977 p1, p2 = ctx.p1(), ctx.p2()
1978 user = ctx.user()
1978 user = ctx.user()
1979
1979
1980 lock = self.lock()
1980 lock = self.lock()
1981 try:
1981 try:
1982 tr = self.transaction("commit")
1982 tr = self.transaction("commit")
1983 trp = weakref.proxy(tr)
1983 trp = weakref.proxy(tr)
1984
1984
1985 if ctx.manifestnode():
1985 if ctx.manifestnode():
1986 # reuse an existing manifest revision
1986 # reuse an existing manifest revision
1987 mn = ctx.manifestnode()
1987 mn = ctx.manifestnode()
1988 files = ctx.files()
1988 files = ctx.files()
1989 elif ctx.files():
1989 elif ctx.files():
1990 m1ctx = p1.manifestctx()
1990 m1ctx = p1.manifestctx()
1991 m2ctx = p2.manifestctx()
1991 m2ctx = p2.manifestctx()
1992 mctx = m1ctx.copy()
1992 mctx = m1ctx.copy()
1993
1993
1994 m = mctx.read()
1994 m = mctx.read()
1995 m1 = m1ctx.read()
1995 m1 = m1ctx.read()
1996 m2 = m2ctx.read()
1996 m2 = m2ctx.read()
1997
1997
1998 # check in files
1998 # check in files
1999 added = []
1999 added = []
2000 changed = []
2000 changed = []
2001 removed = list(ctx.removed())
2001 removed = list(ctx.removed())
2002 linkrev = len(self)
2002 linkrev = len(self)
2003 self.ui.note(_("committing files:\n"))
2003 self.ui.note(_("committing files:\n"))
2004 for f in sorted(ctx.modified() + ctx.added()):
2004 for f in sorted(ctx.modified() + ctx.added()):
2005 self.ui.note(f + "\n")
2005 self.ui.note(f + "\n")
2006 try:
2006 try:
2007 fctx = ctx[f]
2007 fctx = ctx[f]
2008 if fctx is None:
2008 if fctx is None:
2009 removed.append(f)
2009 removed.append(f)
2010 else:
2010 else:
2011 added.append(f)
2011 added.append(f)
2012 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2012 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2013 trp, changed)
2013 trp, changed)
2014 m.setflag(f, fctx.flags())
2014 m.setflag(f, fctx.flags())
2015 except OSError as inst:
2015 except OSError as inst:
2016 self.ui.warn(_("trouble committing %s!\n") % f)
2016 self.ui.warn(_("trouble committing %s!\n") % f)
2017 raise
2017 raise
2018 except IOError as inst:
2018 except IOError as inst:
2019 errcode = getattr(inst, 'errno', errno.ENOENT)
2019 errcode = getattr(inst, 'errno', errno.ENOENT)
2020 if error or errcode and errcode != errno.ENOENT:
2020 if error or errcode and errcode != errno.ENOENT:
2021 self.ui.warn(_("trouble committing %s!\n") % f)
2021 self.ui.warn(_("trouble committing %s!\n") % f)
2022 raise
2022 raise
2023
2023
2024 # update manifest
2024 # update manifest
2025 self.ui.note(_("committing manifest\n"))
2025 self.ui.note(_("committing manifest\n"))
2026 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2026 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2027 drop = [f for f in removed if f in m]
2027 drop = [f for f in removed if f in m]
2028 for f in drop:
2028 for f in drop:
2029 del m[f]
2029 del m[f]
2030 mn = mctx.write(trp, linkrev,
2030 mn = mctx.write(trp, linkrev,
2031 p1.manifestnode(), p2.manifestnode(),
2031 p1.manifestnode(), p2.manifestnode(),
2032 added, drop)
2032 added, drop)
2033 files = changed + removed
2033 files = changed + removed
2034 else:
2034 else:
2035 mn = p1.manifestnode()
2035 mn = p1.manifestnode()
2036 files = []
2036 files = []
2037
2037
2038 # update changelog
2038 # update changelog
2039 self.ui.note(_("committing changelog\n"))
2039 self.ui.note(_("committing changelog\n"))
2040 self.changelog.delayupdate(tr)
2040 self.changelog.delayupdate(tr)
2041 n = self.changelog.add(mn, files, ctx.description(),
2041 n = self.changelog.add(mn, files, ctx.description(),
2042 trp, p1.node(), p2.node(),
2042 trp, p1.node(), p2.node(),
2043 user, ctx.date(), ctx.extra().copy())
2043 user, ctx.date(), ctx.extra().copy())
2044 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2044 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2045 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2045 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2046 parent2=xp2)
2046 parent2=xp2)
2047 # set the new commit is proper phase
2047 # set the new commit is proper phase
2048 targetphase = subrepo.newcommitphase(self.ui, ctx)
2048 targetphase = subrepo.newcommitphase(self.ui, ctx)
2049 if targetphase:
2049 if targetphase:
2050 # retract boundary do not alter parent changeset.
2050 # retract boundary do not alter parent changeset.
2051 # if a parent have higher the resulting phase will
2051 # if a parent have higher the resulting phase will
2052 # be compliant anyway
2052 # be compliant anyway
2053 #
2053 #
2054 # if minimal phase was 0 we don't need to retract anything
2054 # if minimal phase was 0 we don't need to retract anything
2055 phases.registernew(self, tr, targetphase, [n])
2055 phases.registernew(self, tr, targetphase, [n])
2056 tr.close()
2056 tr.close()
2057 return n
2057 return n
2058 finally:
2058 finally:
2059 if tr:
2059 if tr:
2060 tr.release()
2060 tr.release()
2061 lock.release()
2061 lock.release()
2062
2062
2063 @unfilteredmethod
2063 @unfilteredmethod
2064 def destroying(self):
2064 def destroying(self):
2065 '''Inform the repository that nodes are about to be destroyed.
2065 '''Inform the repository that nodes are about to be destroyed.
2066 Intended for use by strip and rollback, so there's a common
2066 Intended for use by strip and rollback, so there's a common
2067 place for anything that has to be done before destroying history.
2067 place for anything that has to be done before destroying history.
2068
2068
2069 This is mostly useful for saving state that is in memory and waiting
2069 This is mostly useful for saving state that is in memory and waiting
2070 to be flushed when the current lock is released. Because a call to
2070 to be flushed when the current lock is released. Because a call to
2071 destroyed is imminent, the repo will be invalidated causing those
2071 destroyed is imminent, the repo will be invalidated causing those
2072 changes to stay in memory (waiting for the next unlock), or vanish
2072 changes to stay in memory (waiting for the next unlock), or vanish
2073 completely.
2073 completely.
2074 '''
2074 '''
2075 # When using the same lock to commit and strip, the phasecache is left
2075 # When using the same lock to commit and strip, the phasecache is left
2076 # dirty after committing. Then when we strip, the repo is invalidated,
2076 # dirty after committing. Then when we strip, the repo is invalidated,
2077 # causing those changes to disappear.
2077 # causing those changes to disappear.
2078 if '_phasecache' in vars(self):
2078 if '_phasecache' in vars(self):
2079 self._phasecache.write()
2079 self._phasecache.write()
2080
2080
2081 @unfilteredmethod
2081 @unfilteredmethod
2082 def destroyed(self):
2082 def destroyed(self):
2083 '''Inform the repository that nodes have been destroyed.
2083 '''Inform the repository that nodes have been destroyed.
2084 Intended for use by strip and rollback, so there's a common
2084 Intended for use by strip and rollback, so there's a common
2085 place for anything that has to be done after destroying history.
2085 place for anything that has to be done after destroying history.
2086 '''
2086 '''
2087 # When one tries to:
2087 # When one tries to:
2088 # 1) destroy nodes thus calling this method (e.g. strip)
2088 # 1) destroy nodes thus calling this method (e.g. strip)
2089 # 2) use phasecache somewhere (e.g. commit)
2089 # 2) use phasecache somewhere (e.g. commit)
2090 #
2090 #
2091 # then 2) will fail because the phasecache contains nodes that were
2091 # then 2) will fail because the phasecache contains nodes that were
2092 # removed. We can either remove phasecache from the filecache,
2092 # removed. We can either remove phasecache from the filecache,
2093 # causing it to reload next time it is accessed, or simply filter
2093 # causing it to reload next time it is accessed, or simply filter
2094 # the removed nodes now and write the updated cache.
2094 # the removed nodes now and write the updated cache.
2095 self._phasecache.filterunknown(self)
2095 self._phasecache.filterunknown(self)
2096 self._phasecache.write()
2096 self._phasecache.write()
2097
2097
2098 # refresh all repository caches
2098 # refresh all repository caches
2099 self.updatecaches()
2099 self.updatecaches()
2100
2100
2101 # Ensure the persistent tag cache is updated. Doing it now
2101 # Ensure the persistent tag cache is updated. Doing it now
2102 # means that the tag cache only has to worry about destroyed
2102 # means that the tag cache only has to worry about destroyed
2103 # heads immediately after a strip/rollback. That in turn
2103 # heads immediately after a strip/rollback. That in turn
2104 # guarantees that "cachetip == currenttip" (comparing both rev
2104 # guarantees that "cachetip == currenttip" (comparing both rev
2105 # and node) always means no nodes have been added or destroyed.
2105 # and node) always means no nodes have been added or destroyed.
2106
2106
2107 # XXX this is suboptimal when qrefresh'ing: we strip the current
2107 # XXX this is suboptimal when qrefresh'ing: we strip the current
2108 # head, refresh the tag cache, then immediately add a new head.
2108 # head, refresh the tag cache, then immediately add a new head.
2109 # But I think doing it this way is necessary for the "instant
2109 # But I think doing it this way is necessary for the "instant
2110 # tag cache retrieval" case to work.
2110 # tag cache retrieval" case to work.
2111 self.invalidate()
2111 self.invalidate()
2112
2112
2113 def walk(self, match, node=None):
2113 def walk(self, match, node=None):
2114 '''
2114 '''
2115 walk recursively through the directory tree or a given
2115 walk recursively through the directory tree or a given
2116 changeset, finding all files matched by the match
2116 changeset, finding all files matched by the match
2117 function
2117 function
2118 '''
2118 '''
2119 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2119 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2120 return self[node].walk(match)
2120 return self[node].walk(match)
2121
2121
2122 def status(self, node1='.', node2=None, match=None,
2122 def status(self, node1='.', node2=None, match=None,
2123 ignored=False, clean=False, unknown=False,
2123 ignored=False, clean=False, unknown=False,
2124 listsubrepos=False):
2124 listsubrepos=False):
2125 '''a convenience method that calls node1.status(node2)'''
2125 '''a convenience method that calls node1.status(node2)'''
2126 return self[node1].status(node2, match, ignored, clean, unknown,
2126 return self[node1].status(node2, match, ignored, clean, unknown,
2127 listsubrepos)
2127 listsubrepos)
2128
2128
2129 def addpostdsstatus(self, ps):
2129 def addpostdsstatus(self, ps):
2130 """Add a callback to run within the wlock, at the point at which status
2130 """Add a callback to run within the wlock, at the point at which status
2131 fixups happen.
2131 fixups happen.
2132
2132
2133 On status completion, callback(wctx, status) will be called with the
2133 On status completion, callback(wctx, status) will be called with the
2134 wlock held, unless the dirstate has changed from underneath or the wlock
2134 wlock held, unless the dirstate has changed from underneath or the wlock
2135 couldn't be grabbed.
2135 couldn't be grabbed.
2136
2136
2137 Callbacks should not capture and use a cached copy of the dirstate --
2137 Callbacks should not capture and use a cached copy of the dirstate --
2138 it might change in the meanwhile. Instead, they should access the
2138 it might change in the meanwhile. Instead, they should access the
2139 dirstate via wctx.repo().dirstate.
2139 dirstate via wctx.repo().dirstate.
2140
2140
2141 This list is emptied out after each status run -- extensions should
2141 This list is emptied out after each status run -- extensions should
2142 make sure it adds to this list each time dirstate.status is called.
2142 make sure it adds to this list each time dirstate.status is called.
2143 Extensions should also make sure they don't call this for statuses
2143 Extensions should also make sure they don't call this for statuses
2144 that don't involve the dirstate.
2144 that don't involve the dirstate.
2145 """
2145 """
2146
2146
2147 # The list is located here for uniqueness reasons -- it is actually
2147 # The list is located here for uniqueness reasons -- it is actually
2148 # managed by the workingctx, but that isn't unique per-repo.
2148 # managed by the workingctx, but that isn't unique per-repo.
2149 self._postdsstatus.append(ps)
2149 self._postdsstatus.append(ps)
2150
2150
2151 def postdsstatus(self):
2151 def postdsstatus(self):
2152 """Used by workingctx to get the list of post-dirstate-status hooks."""
2152 """Used by workingctx to get the list of post-dirstate-status hooks."""
2153 return self._postdsstatus
2153 return self._postdsstatus
2154
2154
2155 def clearpostdsstatus(self):
2155 def clearpostdsstatus(self):
2156 """Used by workingctx to clear post-dirstate-status hooks."""
2156 """Used by workingctx to clear post-dirstate-status hooks."""
2157 del self._postdsstatus[:]
2157 del self._postdsstatus[:]
2158
2158
2159 def heads(self, start=None):
2159 def heads(self, start=None):
2160 if start is None:
2160 if start is None:
2161 cl = self.changelog
2161 cl = self.changelog
2162 headrevs = reversed(cl.headrevs())
2162 headrevs = reversed(cl.headrevs())
2163 return [cl.node(rev) for rev in headrevs]
2163 return [cl.node(rev) for rev in headrevs]
2164
2164
2165 heads = self.changelog.heads(start)
2165 heads = self.changelog.heads(start)
2166 # sort the output in rev descending order
2166 # sort the output in rev descending order
2167 return sorted(heads, key=self.changelog.rev, reverse=True)
2167 return sorted(heads, key=self.changelog.rev, reverse=True)
2168
2168
2169 def branchheads(self, branch=None, start=None, closed=False):
2169 def branchheads(self, branch=None, start=None, closed=False):
2170 '''return a (possibly filtered) list of heads for the given branch
2170 '''return a (possibly filtered) list of heads for the given branch
2171
2171
2172 Heads are returned in topological order, from newest to oldest.
2172 Heads are returned in topological order, from newest to oldest.
2173 If branch is None, use the dirstate branch.
2173 If branch is None, use the dirstate branch.
2174 If start is not None, return only heads reachable from start.
2174 If start is not None, return only heads reachable from start.
2175 If closed is True, return heads that are marked as closed as well.
2175 If closed is True, return heads that are marked as closed as well.
2176 '''
2176 '''
2177 if branch is None:
2177 if branch is None:
2178 branch = self[None].branch()
2178 branch = self[None].branch()
2179 branches = self.branchmap()
2179 branches = self.branchmap()
2180 if branch not in branches:
2180 if branch not in branches:
2181 return []
2181 return []
2182 # the cache returns heads ordered lowest to highest
2182 # the cache returns heads ordered lowest to highest
2183 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2183 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2184 if start is not None:
2184 if start is not None:
2185 # filter out the heads that cannot be reached from startrev
2185 # filter out the heads that cannot be reached from startrev
2186 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2186 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2187 bheads = [h for h in bheads if h in fbheads]
2187 bheads = [h for h in bheads if h in fbheads]
2188 return bheads
2188 return bheads
2189
2189
2190 def branches(self, nodes):
2190 def branches(self, nodes):
2191 if not nodes:
2191 if not nodes:
2192 nodes = [self.changelog.tip()]
2192 nodes = [self.changelog.tip()]
2193 b = []
2193 b = []
2194 for n in nodes:
2194 for n in nodes:
2195 t = n
2195 t = n
2196 while True:
2196 while True:
2197 p = self.changelog.parents(n)
2197 p = self.changelog.parents(n)
2198 if p[1] != nullid or p[0] == nullid:
2198 if p[1] != nullid or p[0] == nullid:
2199 b.append((t, n, p[0], p[1]))
2199 b.append((t, n, p[0], p[1]))
2200 break
2200 break
2201 n = p[0]
2201 n = p[0]
2202 return b
2202 return b
2203
2203
2204 def between(self, pairs):
2204 def between(self, pairs):
2205 r = []
2205 r = []
2206
2206
2207 for top, bottom in pairs:
2207 for top, bottom in pairs:
2208 n, l, i = top, [], 0
2208 n, l, i = top, [], 0
2209 f = 1
2209 f = 1
2210
2210
2211 while n != bottom and n != nullid:
2211 while n != bottom and n != nullid:
2212 p = self.changelog.parents(n)[0]
2212 p = self.changelog.parents(n)[0]
2213 if i == f:
2213 if i == f:
2214 l.append(n)
2214 l.append(n)
2215 f = f * 2
2215 f = f * 2
2216 n = p
2216 n = p
2217 i += 1
2217 i += 1
2218
2218
2219 r.append(l)
2219 r.append(l)
2220
2220
2221 return r
2221 return r
2222
2222
2223 def checkpush(self, pushop):
2223 def checkpush(self, pushop):
2224 """Extensions can override this function if additional checks have
2224 """Extensions can override this function if additional checks have
2225 to be performed before pushing, or call it if they override push
2225 to be performed before pushing, or call it if they override push
2226 command.
2226 command.
2227 """
2227 """
2228
2228
2229 @unfilteredpropertycache
2229 @unfilteredpropertycache
2230 def prepushoutgoinghooks(self):
2230 def prepushoutgoinghooks(self):
2231 """Return util.hooks consists of a pushop with repo, remote, outgoing
2231 """Return util.hooks consists of a pushop with repo, remote, outgoing
2232 methods, which are called before pushing changesets.
2232 methods, which are called before pushing changesets.
2233 """
2233 """
2234 return util.hooks()
2234 return util.hooks()
2235
2235
2236 def pushkey(self, namespace, key, old, new):
2236 def pushkey(self, namespace, key, old, new):
2237 try:
2237 try:
2238 tr = self.currenttransaction()
2238 tr = self.currenttransaction()
2239 hookargs = {}
2239 hookargs = {}
2240 if tr is not None:
2240 if tr is not None:
2241 hookargs.update(tr.hookargs)
2241 hookargs.update(tr.hookargs)
2242 hookargs['namespace'] = namespace
2242 hookargs['namespace'] = namespace
2243 hookargs['key'] = key
2243 hookargs['key'] = key
2244 hookargs['old'] = old
2244 hookargs['old'] = old
2245 hookargs['new'] = new
2245 hookargs['new'] = new
2246 self.hook('prepushkey', throw=True, **hookargs)
2246 self.hook('prepushkey', throw=True, **hookargs)
2247 except error.HookAbort as exc:
2247 except error.HookAbort as exc:
2248 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2248 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2249 if exc.hint:
2249 if exc.hint:
2250 self.ui.write_err(_("(%s)\n") % exc.hint)
2250 self.ui.write_err(_("(%s)\n") % exc.hint)
2251 return False
2251 return False
2252 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2252 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2253 ret = pushkey.push(self, namespace, key, old, new)
2253 ret = pushkey.push(self, namespace, key, old, new)
2254 def runhook():
2254 def runhook():
2255 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2255 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2256 ret=ret)
2256 ret=ret)
2257 self._afterlock(runhook)
2257 self._afterlock(runhook)
2258 return ret
2258 return ret
2259
2259
2260 def listkeys(self, namespace):
2260 def listkeys(self, namespace):
2261 self.hook('prelistkeys', throw=True, namespace=namespace)
2261 self.hook('prelistkeys', throw=True, namespace=namespace)
2262 self.ui.debug('listing keys for "%s"\n' % namespace)
2262 self.ui.debug('listing keys for "%s"\n' % namespace)
2263 values = pushkey.list(self, namespace)
2263 values = pushkey.list(self, namespace)
2264 self.hook('listkeys', namespace=namespace, values=values)
2264 self.hook('listkeys', namespace=namespace, values=values)
2265 return values
2265 return values
2266
2266
2267 def debugwireargs(self, one, two, three=None, four=None, five=None):
2267 def debugwireargs(self, one, two, three=None, four=None, five=None):
2268 '''used to test argument passing over the wire'''
2268 '''used to test argument passing over the wire'''
2269 return "%s %s %s %s %s" % (one, two, three, four, five)
2269 return "%s %s %s %s %s" % (one, two, three, four, five)
2270
2270
2271 def savecommitmessage(self, text):
2271 def savecommitmessage(self, text):
2272 fp = self.vfs('last-message.txt', 'wb')
2272 fp = self.vfs('last-message.txt', 'wb')
2273 try:
2273 try:
2274 fp.write(text)
2274 fp.write(text)
2275 finally:
2275 finally:
2276 fp.close()
2276 fp.close()
2277 return self.pathto(fp.name[len(self.root) + 1:])
2277 return self.pathto(fp.name[len(self.root) + 1:])
2278
2278
2279 # used to avoid circular references so destructors work
2279 # used to avoid circular references so destructors work
2280 def aftertrans(files):
2280 def aftertrans(files):
2281 renamefiles = [tuple(t) for t in files]
2281 renamefiles = [tuple(t) for t in files]
2282 def a():
2282 def a():
2283 for vfs, src, dest in renamefiles:
2283 for vfs, src, dest in renamefiles:
2284 # if src and dest refer to a same file, vfs.rename is a no-op,
2284 # if src and dest refer to a same file, vfs.rename is a no-op,
2285 # leaving both src and dest on disk. delete dest to make sure
2285 # leaving both src and dest on disk. delete dest to make sure
2286 # the rename couldn't be such a no-op.
2286 # the rename couldn't be such a no-op.
2287 vfs.tryunlink(dest)
2287 vfs.tryunlink(dest)
2288 try:
2288 try:
2289 vfs.rename(src, dest)
2289 vfs.rename(src, dest)
2290 except OSError: # journal file does not yet exist
2290 except OSError: # journal file does not yet exist
2291 pass
2291 pass
2292 return a
2292 return a
2293
2293
2294 def undoname(fn):
2294 def undoname(fn):
2295 base, name = os.path.split(fn)
2295 base, name = os.path.split(fn)
2296 assert name.startswith('journal')
2296 assert name.startswith('journal')
2297 return os.path.join(base, name.replace('journal', 'undo', 1))
2297 return os.path.join(base, name.replace('journal', 'undo', 1))
2298
2298
2299 def instance(ui, path, create):
2299 def instance(ui, path, create):
2300 return localrepository(ui, util.urllocalpath(path), create)
2300 return localrepository(ui, util.urllocalpath(path), create)
2301
2301
2302 def islocal(path):
2302 def islocal(path):
2303 return True
2303 return True
2304
2304
2305 def newreporequirements(repo):
2305 def newreporequirements(repo):
2306 """Determine the set of requirements for a new local repository.
2306 """Determine the set of requirements for a new local repository.
2307
2307
2308 Extensions can wrap this function to specify custom requirements for
2308 Extensions can wrap this function to specify custom requirements for
2309 new repositories.
2309 new repositories.
2310 """
2310 """
2311 ui = repo.ui
2311 ui = repo.ui
2312 requirements = {'revlogv1'}
2312 requirements = {'revlogv1'}
2313 if ui.configbool('format', 'usestore'):
2313 if ui.configbool('format', 'usestore'):
2314 requirements.add('store')
2314 requirements.add('store')
2315 if ui.configbool('format', 'usefncache'):
2315 if ui.configbool('format', 'usefncache'):
2316 requirements.add('fncache')
2316 requirements.add('fncache')
2317 if ui.configbool('format', 'dotencode'):
2317 if ui.configbool('format', 'dotencode'):
2318 requirements.add('dotencode')
2318 requirements.add('dotencode')
2319
2319
2320 compengine = ui.config('experimental', 'format.compression')
2320 compengine = ui.config('experimental', 'format.compression')
2321 if compengine not in util.compengines:
2321 if compengine not in util.compengines:
2322 raise error.Abort(_('compression engine %s defined by '
2322 raise error.Abort(_('compression engine %s defined by '
2323 'experimental.format.compression not available') %
2323 'experimental.format.compression not available') %
2324 compengine,
2324 compengine,
2325 hint=_('run "hg debuginstall" to list available '
2325 hint=_('run "hg debuginstall" to list available '
2326 'compression engines'))
2326 'compression engines'))
2327
2327
2328 # zlib is the historical default and doesn't need an explicit requirement.
2328 # zlib is the historical default and doesn't need an explicit requirement.
2329 if compengine != 'zlib':
2329 if compengine != 'zlib':
2330 requirements.add('exp-compression-%s' % compengine)
2330 requirements.add('exp-compression-%s' % compengine)
2331
2331
2332 if scmutil.gdinitconfig(ui):
2332 if scmutil.gdinitconfig(ui):
2333 requirements.add('generaldelta')
2333 requirements.add('generaldelta')
2334 if ui.configbool('experimental', 'treemanifest'):
2334 if ui.configbool('experimental', 'treemanifest'):
2335 requirements.add('treemanifest')
2335 requirements.add('treemanifest')
2336 if ui.configbool('experimental', 'manifestv2'):
2336 if ui.configbool('experimental', 'manifestv2'):
2337 requirements.add('manifestv2')
2337 requirements.add('manifestv2')
2338
2338
2339 revlogv2 = ui.config('experimental', 'revlogv2')
2339 revlogv2 = ui.config('experimental', 'revlogv2')
2340 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2340 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2341 requirements.remove('revlogv1')
2341 requirements.remove('revlogv1')
2342 # generaldelta is implied by revlogv2.
2342 # generaldelta is implied by revlogv2.
2343 requirements.discard('generaldelta')
2343 requirements.discard('generaldelta')
2344 requirements.add(REVLOGV2_REQUIREMENT)
2344 requirements.add(REVLOGV2_REQUIREMENT)
2345
2345
2346 return requirements
2346 return requirements
@@ -1,2349 +1,2350 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import heapq
20 import heapq
21 import os
21 import os
22 import struct
22 import struct
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 wdirhex,
31 wdirhex,
32 wdirid,
32 wdirid,
33 wdirrev,
33 wdirrev,
34 )
34 )
35 from .i18n import _
35 from .i18n import _
36 from . import (
36 from . import (
37 ancestor,
37 ancestor,
38 error,
38 error,
39 mdiff,
39 mdiff,
40 policy,
40 policy,
41 pycompat,
41 pycompat,
42 templatefilters,
42 templatefilters,
43 util,
43 util,
44 )
44 )
45
45
46 parsers = policy.importmod(r'parsers')
46 parsers = policy.importmod(r'parsers')
47
47
48 # Aliased for performance.
48 # Aliased for performance.
49 _zlibdecompress = zlib.decompress
49 _zlibdecompress = zlib.decompress
50
50
51 # revlog header flags
51 # revlog header flags
52 REVLOGV0 = 0
52 REVLOGV0 = 0
53 REVLOGV1 = 1
53 REVLOGV1 = 1
54 # Dummy value until file format is finalized.
54 # Dummy value until file format is finalized.
55 # Reminder: change the bounds check in revlog.__init__ when this is changed.
55 # Reminder: change the bounds check in revlog.__init__ when this is changed.
56 REVLOGV2 = 0xDEAD
56 REVLOGV2 = 0xDEAD
57 FLAG_INLINE_DATA = (1 << 16)
57 FLAG_INLINE_DATA = (1 << 16)
58 FLAG_GENERALDELTA = (1 << 17)
58 FLAG_GENERALDELTA = (1 << 17)
59 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
59 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
60 REVLOG_DEFAULT_FORMAT = REVLOGV1
60 REVLOG_DEFAULT_FORMAT = REVLOGV1
61 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
61 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
62 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
62 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
63 REVLOGV2_FLAGS = REVLOGV1_FLAGS
63 REVLOGV2_FLAGS = REVLOGV1_FLAGS
64
64
65 # revlog index flags
65 # revlog index flags
66 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
66 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
67 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
67 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
68 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
68 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
69 REVIDX_DEFAULT_FLAGS = 0
69 REVIDX_DEFAULT_FLAGS = 0
70 # stable order in which flags need to be processed and their processors applied
70 # stable order in which flags need to be processed and their processors applied
71 REVIDX_FLAGS_ORDER = [
71 REVIDX_FLAGS_ORDER = [
72 REVIDX_ISCENSORED,
72 REVIDX_ISCENSORED,
73 REVIDX_ELLIPSIS,
73 REVIDX_ELLIPSIS,
74 REVIDX_EXTSTORED,
74 REVIDX_EXTSTORED,
75 ]
75 ]
76 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
76 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
77
77
78 # max size of revlog with inline data
78 # max size of revlog with inline data
79 _maxinline = 131072
79 _maxinline = 131072
80 _chunksize = 1048576
80 _chunksize = 1048576
81
81
82 RevlogError = error.RevlogError
82 RevlogError = error.RevlogError
83 LookupError = error.LookupError
83 LookupError = error.LookupError
84 CensoredNodeError = error.CensoredNodeError
84 CensoredNodeError = error.CensoredNodeError
85 ProgrammingError = error.ProgrammingError
85 ProgrammingError = error.ProgrammingError
86
86
87 # Store flag processors (cf. 'addflagprocessor()' to register)
87 # Store flag processors (cf. 'addflagprocessor()' to register)
88 _flagprocessors = {
88 _flagprocessors = {
89 REVIDX_ISCENSORED: None,
89 REVIDX_ISCENSORED: None,
90 }
90 }
91
91
92 def addflagprocessor(flag, processor):
92 def addflagprocessor(flag, processor):
93 """Register a flag processor on a revision data flag.
93 """Register a flag processor on a revision data flag.
94
94
95 Invariant:
95 Invariant:
96 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
96 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
97 - Only one flag processor can be registered on a specific flag.
97 - Only one flag processor can be registered on a specific flag.
98 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
98 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
99 following signatures:
99 following signatures:
100 - (read) f(self, rawtext) -> text, bool
100 - (read) f(self, rawtext) -> text, bool
101 - (write) f(self, text) -> rawtext, bool
101 - (write) f(self, text) -> rawtext, bool
102 - (raw) f(self, rawtext) -> bool
102 - (raw) f(self, rawtext) -> bool
103 "text" is presented to the user. "rawtext" is stored in revlog data, not
103 "text" is presented to the user. "rawtext" is stored in revlog data, not
104 directly visible to the user.
104 directly visible to the user.
105 The boolean returned by these transforms is used to determine whether
105 The boolean returned by these transforms is used to determine whether
106 the returned text can be used for hash integrity checking. For example,
106 the returned text can be used for hash integrity checking. For example,
107 if "write" returns False, then "text" is used to generate hash. If
107 if "write" returns False, then "text" is used to generate hash. If
108 "write" returns True, that basically means "rawtext" returned by "write"
108 "write" returns True, that basically means "rawtext" returned by "write"
109 should be used to generate hash. Usually, "write" and "read" return
109 should be used to generate hash. Usually, "write" and "read" return
110 different booleans. And "raw" returns a same boolean as "write".
110 different booleans. And "raw" returns a same boolean as "write".
111
111
112 Note: The 'raw' transform is used for changegroup generation and in some
112 Note: The 'raw' transform is used for changegroup generation and in some
113 debug commands. In this case the transform only indicates whether the
113 debug commands. In this case the transform only indicates whether the
114 contents can be used for hash integrity checks.
114 contents can be used for hash integrity checks.
115 """
115 """
116 if not flag & REVIDX_KNOWN_FLAGS:
116 if not flag & REVIDX_KNOWN_FLAGS:
117 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
117 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
118 raise ProgrammingError(msg)
118 raise ProgrammingError(msg)
119 if flag not in REVIDX_FLAGS_ORDER:
119 if flag not in REVIDX_FLAGS_ORDER:
120 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
120 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
121 raise ProgrammingError(msg)
121 raise ProgrammingError(msg)
122 if flag in _flagprocessors:
122 if flag in _flagprocessors:
123 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
123 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
124 raise error.Abort(msg)
124 raise error.Abort(msg)
125 _flagprocessors[flag] = processor
125 _flagprocessors[flag] = processor
126
126
127 def getoffset(q):
127 def getoffset(q):
128 return int(q >> 16)
128 return int(q >> 16)
129
129
130 def gettype(q):
130 def gettype(q):
131 return int(q & 0xFFFF)
131 return int(q & 0xFFFF)
132
132
133 def offset_type(offset, type):
133 def offset_type(offset, type):
134 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
134 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
135 raise ValueError('unknown revlog index flags')
135 raise ValueError('unknown revlog index flags')
136 return int(int(offset) << 16 | type)
136 return int(int(offset) << 16 | type)
137
137
138 _nullhash = hashlib.sha1(nullid)
138 _nullhash = hashlib.sha1(nullid)
139
139
140 def hash(text, p1, p2):
140 def hash(text, p1, p2):
141 """generate a hash from the given text and its parent hashes
141 """generate a hash from the given text and its parent hashes
142
142
143 This hash combines both the current file contents and its history
143 This hash combines both the current file contents and its history
144 in a manner that makes it easy to distinguish nodes with the same
144 in a manner that makes it easy to distinguish nodes with the same
145 content in the revision graph.
145 content in the revision graph.
146 """
146 """
147 # As of now, if one of the parent node is null, p2 is null
147 # As of now, if one of the parent node is null, p2 is null
148 if p2 == nullid:
148 if p2 == nullid:
149 # deep copy of a hash is faster than creating one
149 # deep copy of a hash is faster than creating one
150 s = _nullhash.copy()
150 s = _nullhash.copy()
151 s.update(p1)
151 s.update(p1)
152 else:
152 else:
153 # none of the parent nodes are nullid
153 # none of the parent nodes are nullid
154 if p1 < p2:
154 if p1 < p2:
155 a = p1
155 a = p1
156 b = p2
156 b = p2
157 else:
157 else:
158 a = p2
158 a = p2
159 b = p1
159 b = p1
160 s = hashlib.sha1(a)
160 s = hashlib.sha1(a)
161 s.update(b)
161 s.update(b)
162 s.update(text)
162 s.update(text)
163 return s.digest()
163 return s.digest()
164
164
165 def _slicechunk(revlog, revs):
165 def _slicechunk(revlog, revs):
166 """slice revs to reduce the amount of unrelated data to be read from disk.
166 """slice revs to reduce the amount of unrelated data to be read from disk.
167
167
168 ``revs`` is sliced into groups that should be read in one time.
168 ``revs`` is sliced into groups that should be read in one time.
169 Assume that revs are sorted.
169 Assume that revs are sorted.
170 """
170 """
171 start = revlog.start
171 start = revlog.start
172 length = revlog.length
172 length = revlog.length
173
173
174 if len(revs) <= 1:
174 if len(revs) <= 1:
175 yield revs
175 yield revs
176 return
176 return
177
177
178 startbyte = start(revs[0])
178 startbyte = start(revs[0])
179 endbyte = start(revs[-1]) + length(revs[-1])
179 endbyte = start(revs[-1]) + length(revs[-1])
180 readdata = deltachainspan = endbyte - startbyte
180 readdata = deltachainspan = endbyte - startbyte
181
181
182 chainpayload = sum(length(r) for r in revs)
182 chainpayload = sum(length(r) for r in revs)
183
183
184 if deltachainspan:
184 if deltachainspan:
185 density = chainpayload / float(deltachainspan)
185 density = chainpayload / float(deltachainspan)
186 else:
186 else:
187 density = 1.0
187 density = 1.0
188
188
189 # Store the gaps in a heap to have them sorted by decreasing size
189 # Store the gaps in a heap to have them sorted by decreasing size
190 gapsheap = []
190 gapsheap = []
191 heapq.heapify(gapsheap)
191 heapq.heapify(gapsheap)
192 prevend = None
192 prevend = None
193 for i, rev in enumerate(revs):
193 for i, rev in enumerate(revs):
194 revstart = start(rev)
194 revstart = start(rev)
195 revlen = length(rev)
195 revlen = length(rev)
196
196
197 if prevend is not None:
197 if prevend is not None:
198 gapsize = revstart - prevend
198 gapsize = revstart - prevend
199 if gapsize:
199 # only consider holes that are large enough
200 if gapsize > revlog._srmingapsize:
200 heapq.heappush(gapsheap, (-gapsize, i))
201 heapq.heappush(gapsheap, (-gapsize, i))
201
202
202 prevend = revstart + revlen
203 prevend = revstart + revlen
203
204
204 # Collect the indices of the largest holes until the density is acceptable
205 # Collect the indices of the largest holes until the density is acceptable
205 indicesheap = []
206 indicesheap = []
206 heapq.heapify(indicesheap)
207 heapq.heapify(indicesheap)
207 while gapsheap and density < revlog._srdensitythreshold:
208 while gapsheap and density < revlog._srdensitythreshold:
208 oppgapsize, gapidx = heapq.heappop(gapsheap)
209 oppgapsize, gapidx = heapq.heappop(gapsheap)
209
210
210 heapq.heappush(indicesheap, gapidx)
211 heapq.heappush(indicesheap, gapidx)
211
212
212 # the gap sizes are stored as negatives to be sorted decreasingly
213 # the gap sizes are stored as negatives to be sorted decreasingly
213 # by the heap
214 # by the heap
214 readdata -= (-oppgapsize)
215 readdata -= (-oppgapsize)
215 if readdata > 0:
216 if readdata > 0:
216 density = chainpayload / float(readdata)
217 density = chainpayload / float(readdata)
217 else:
218 else:
218 density = 1.0
219 density = 1.0
219
220
220 # Cut the revs at collected indices
221 # Cut the revs at collected indices
221 previdx = 0
222 previdx = 0
222 while indicesheap:
223 while indicesheap:
223 idx = heapq.heappop(indicesheap)
224 idx = heapq.heappop(indicesheap)
224 yield revs[previdx:idx]
225 yield revs[previdx:idx]
225 previdx = idx
226 previdx = idx
226 yield revs[previdx:]
227 yield revs[previdx:]
227
228
228 # index v0:
229 # index v0:
229 # 4 bytes: offset
230 # 4 bytes: offset
230 # 4 bytes: compressed length
231 # 4 bytes: compressed length
231 # 4 bytes: base rev
232 # 4 bytes: base rev
232 # 4 bytes: link rev
233 # 4 bytes: link rev
233 # 20 bytes: parent 1 nodeid
234 # 20 bytes: parent 1 nodeid
234 # 20 bytes: parent 2 nodeid
235 # 20 bytes: parent 2 nodeid
235 # 20 bytes: nodeid
236 # 20 bytes: nodeid
236 indexformatv0 = struct.Struct(">4l20s20s20s")
237 indexformatv0 = struct.Struct(">4l20s20s20s")
237 indexformatv0_pack = indexformatv0.pack
238 indexformatv0_pack = indexformatv0.pack
238 indexformatv0_unpack = indexformatv0.unpack
239 indexformatv0_unpack = indexformatv0.unpack
239
240
240 class revlogoldio(object):
241 class revlogoldio(object):
241 def __init__(self):
242 def __init__(self):
242 self.size = indexformatv0.size
243 self.size = indexformatv0.size
243
244
244 def parseindex(self, data, inline):
245 def parseindex(self, data, inline):
245 s = self.size
246 s = self.size
246 index = []
247 index = []
247 nodemap = {nullid: nullrev}
248 nodemap = {nullid: nullrev}
248 n = off = 0
249 n = off = 0
249 l = len(data)
250 l = len(data)
250 while off + s <= l:
251 while off + s <= l:
251 cur = data[off:off + s]
252 cur = data[off:off + s]
252 off += s
253 off += s
253 e = indexformatv0_unpack(cur)
254 e = indexformatv0_unpack(cur)
254 # transform to revlogv1 format
255 # transform to revlogv1 format
255 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 index.append(e2)
258 index.append(e2)
258 nodemap[e[6]] = n
259 nodemap[e[6]] = n
259 n += 1
260 n += 1
260
261
261 # add the magic null revision at -1
262 # add the magic null revision at -1
262 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
263 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
263
264
264 return index, nodemap, None
265 return index, nodemap, None
265
266
266 def packentry(self, entry, node, version, rev):
267 def packentry(self, entry, node, version, rev):
267 if gettype(entry[0]):
268 if gettype(entry[0]):
268 raise RevlogError(_('index entry flags need revlog version 1'))
269 raise RevlogError(_('index entry flags need revlog version 1'))
269 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
270 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
270 node(entry[5]), node(entry[6]), entry[7])
271 node(entry[5]), node(entry[6]), entry[7])
271 return indexformatv0_pack(*e2)
272 return indexformatv0_pack(*e2)
272
273
273 # index ng:
274 # index ng:
274 # 6 bytes: offset
275 # 6 bytes: offset
275 # 2 bytes: flags
276 # 2 bytes: flags
276 # 4 bytes: compressed length
277 # 4 bytes: compressed length
277 # 4 bytes: uncompressed length
278 # 4 bytes: uncompressed length
278 # 4 bytes: base rev
279 # 4 bytes: base rev
279 # 4 bytes: link rev
280 # 4 bytes: link rev
280 # 4 bytes: parent 1 rev
281 # 4 bytes: parent 1 rev
281 # 4 bytes: parent 2 rev
282 # 4 bytes: parent 2 rev
282 # 32 bytes: nodeid
283 # 32 bytes: nodeid
283 indexformatng = struct.Struct(">Qiiiiii20s12x")
284 indexformatng = struct.Struct(">Qiiiiii20s12x")
284 indexformatng_pack = indexformatng.pack
285 indexformatng_pack = indexformatng.pack
285 versionformat = struct.Struct(">I")
286 versionformat = struct.Struct(">I")
286 versionformat_pack = versionformat.pack
287 versionformat_pack = versionformat.pack
287 versionformat_unpack = versionformat.unpack
288 versionformat_unpack = versionformat.unpack
288
289
289 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
290 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
290 # signed integer)
291 # signed integer)
291 _maxentrysize = 0x7fffffff
292 _maxentrysize = 0x7fffffff
292
293
293 class revlogio(object):
294 class revlogio(object):
294 def __init__(self):
295 def __init__(self):
295 self.size = indexformatng.size
296 self.size = indexformatng.size
296
297
297 def parseindex(self, data, inline):
298 def parseindex(self, data, inline):
298 # call the C implementation to parse the index data
299 # call the C implementation to parse the index data
299 index, cache = parsers.parse_index2(data, inline)
300 index, cache = parsers.parse_index2(data, inline)
300 return index, getattr(index, 'nodemap', None), cache
301 return index, getattr(index, 'nodemap', None), cache
301
302
302 def packentry(self, entry, node, version, rev):
303 def packentry(self, entry, node, version, rev):
303 p = indexformatng_pack(*entry)
304 p = indexformatng_pack(*entry)
304 if rev == 0:
305 if rev == 0:
305 p = versionformat_pack(version) + p[4:]
306 p = versionformat_pack(version) + p[4:]
306 return p
307 return p
307
308
308 class revlog(object):
309 class revlog(object):
309 """
310 """
310 the underlying revision storage object
311 the underlying revision storage object
311
312
312 A revlog consists of two parts, an index and the revision data.
313 A revlog consists of two parts, an index and the revision data.
313
314
314 The index is a file with a fixed record size containing
315 The index is a file with a fixed record size containing
315 information on each revision, including its nodeid (hash), the
316 information on each revision, including its nodeid (hash), the
316 nodeids of its parents, the position and offset of its data within
317 nodeids of its parents, the position and offset of its data within
317 the data file, and the revision it's based on. Finally, each entry
318 the data file, and the revision it's based on. Finally, each entry
318 contains a linkrev entry that can serve as a pointer to external
319 contains a linkrev entry that can serve as a pointer to external
319 data.
320 data.
320
321
321 The revision data itself is a linear collection of data chunks.
322 The revision data itself is a linear collection of data chunks.
322 Each chunk represents a revision and is usually represented as a
323 Each chunk represents a revision and is usually represented as a
323 delta against the previous chunk. To bound lookup time, runs of
324 delta against the previous chunk. To bound lookup time, runs of
324 deltas are limited to about 2 times the length of the original
325 deltas are limited to about 2 times the length of the original
325 version data. This makes retrieval of a version proportional to
326 version data. This makes retrieval of a version proportional to
326 its size, or O(1) relative to the number of revisions.
327 its size, or O(1) relative to the number of revisions.
327
328
328 Both pieces of the revlog are written to in an append-only
329 Both pieces of the revlog are written to in an append-only
329 fashion, which means we never need to rewrite a file to insert or
330 fashion, which means we never need to rewrite a file to insert or
330 remove data, and can use some simple techniques to avoid the need
331 remove data, and can use some simple techniques to avoid the need
331 for locking while reading.
332 for locking while reading.
332
333
333 If checkambig, indexfile is opened with checkambig=True at
334 If checkambig, indexfile is opened with checkambig=True at
334 writing, to avoid file stat ambiguity.
335 writing, to avoid file stat ambiguity.
335
336
336 If mmaplargeindex is True, and an mmapindexthreshold is set, the
337 If mmaplargeindex is True, and an mmapindexthreshold is set, the
337 index will be mmapped rather than read if it is larger than the
338 index will be mmapped rather than read if it is larger than the
338 configured threshold.
339 configured threshold.
339 """
340 """
340 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 mmaplargeindex=False):
342 mmaplargeindex=False):
342 """
343 """
343 create a revlog object
344 create a revlog object
344
345
345 opener is a function that abstracts the file opening operation
346 opener is a function that abstracts the file opening operation
346 and can be used to implement COW semantics or the like.
347 and can be used to implement COW semantics or the like.
347 """
348 """
348 self.indexfile = indexfile
349 self.indexfile = indexfile
349 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.opener = opener
351 self.opener = opener
351 # When True, indexfile is opened with checkambig=True at writing, to
352 # When True, indexfile is opened with checkambig=True at writing, to
352 # avoid file stat ambiguity.
353 # avoid file stat ambiguity.
353 self._checkambig = checkambig
354 self._checkambig = checkambig
354 # 3-tuple of (node, rev, text) for a raw revision.
355 # 3-tuple of (node, rev, text) for a raw revision.
355 self._cache = None
356 self._cache = None
356 # Maps rev to chain base rev.
357 # Maps rev to chain base rev.
357 self._chainbasecache = util.lrucachedict(100)
358 self._chainbasecache = util.lrucachedict(100)
358 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
359 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
359 self._chunkcache = (0, '')
360 self._chunkcache = (0, '')
360 # How much data to read and cache into the raw revlog data cache.
361 # How much data to read and cache into the raw revlog data cache.
361 self._chunkcachesize = 65536
362 self._chunkcachesize = 65536
362 self._maxchainlen = None
363 self._maxchainlen = None
363 self._aggressivemergedeltas = False
364 self._aggressivemergedeltas = False
364 self.index = []
365 self.index = []
365 # Mapping of partial identifiers to full nodes.
366 # Mapping of partial identifiers to full nodes.
366 self._pcache = {}
367 self._pcache = {}
367 # Mapping of revision integer to full node.
368 # Mapping of revision integer to full node.
368 self._nodecache = {nullid: nullrev}
369 self._nodecache = {nullid: nullrev}
369 self._nodepos = None
370 self._nodepos = None
370 self._compengine = 'zlib'
371 self._compengine = 'zlib'
371 self._maxdeltachainspan = -1
372 self._maxdeltachainspan = -1
372 self._withsparseread = False
373 self._withsparseread = False
373 self._srdensitythreshold = 0.25
374 self._srdensitythreshold = 0.25
374 self._srminblocksize = 262144
375 self._srmingapsize = 262144
375
376
376 mmapindexthreshold = None
377 mmapindexthreshold = None
377 v = REVLOG_DEFAULT_VERSION
378 v = REVLOG_DEFAULT_VERSION
378 opts = getattr(opener, 'options', None)
379 opts = getattr(opener, 'options', None)
379 if opts is not None:
380 if opts is not None:
380 if 'revlogv2' in opts:
381 if 'revlogv2' in opts:
381 # version 2 revlogs always use generaldelta.
382 # version 2 revlogs always use generaldelta.
382 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
383 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
383 elif 'revlogv1' in opts:
384 elif 'revlogv1' in opts:
384 if 'generaldelta' in opts:
385 if 'generaldelta' in opts:
385 v |= FLAG_GENERALDELTA
386 v |= FLAG_GENERALDELTA
386 else:
387 else:
387 v = 0
388 v = 0
388 if 'chunkcachesize' in opts:
389 if 'chunkcachesize' in opts:
389 self._chunkcachesize = opts['chunkcachesize']
390 self._chunkcachesize = opts['chunkcachesize']
390 if 'maxchainlen' in opts:
391 if 'maxchainlen' in opts:
391 self._maxchainlen = opts['maxchainlen']
392 self._maxchainlen = opts['maxchainlen']
392 if 'aggressivemergedeltas' in opts:
393 if 'aggressivemergedeltas' in opts:
393 self._aggressivemergedeltas = opts['aggressivemergedeltas']
394 self._aggressivemergedeltas = opts['aggressivemergedeltas']
394 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
395 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
395 if 'compengine' in opts:
396 if 'compengine' in opts:
396 self._compengine = opts['compengine']
397 self._compengine = opts['compengine']
397 if 'maxdeltachainspan' in opts:
398 if 'maxdeltachainspan' in opts:
398 self._maxdeltachainspan = opts['maxdeltachainspan']
399 self._maxdeltachainspan = opts['maxdeltachainspan']
399 if mmaplargeindex and 'mmapindexthreshold' in opts:
400 if mmaplargeindex and 'mmapindexthreshold' in opts:
400 mmapindexthreshold = opts['mmapindexthreshold']
401 mmapindexthreshold = opts['mmapindexthreshold']
401 self._withsparseread = bool(opts.get('with-sparse-read', False))
402 self._withsparseread = bool(opts.get('with-sparse-read', False))
402 if 'sparse-read-density-threshold' in opts:
403 if 'sparse-read-density-threshold' in opts:
403 self._srdensitythreshold = opts['sparse-read-density-threshold']
404 self._srdensitythreshold = opts['sparse-read-density-threshold']
404 if 'sparse-read-min-block-size' in opts:
405 if 'sparse-read-min-gap-size' in opts:
405 self._srminblocksize = opts['sparse-read-min-block-size']
406 self._srmingapsize = opts['sparse-read-min-gap-size']
406
407
407 if self._chunkcachesize <= 0:
408 if self._chunkcachesize <= 0:
408 raise RevlogError(_('revlog chunk cache size %r is not greater '
409 raise RevlogError(_('revlog chunk cache size %r is not greater '
409 'than 0') % self._chunkcachesize)
410 'than 0') % self._chunkcachesize)
410 elif self._chunkcachesize & (self._chunkcachesize - 1):
411 elif self._chunkcachesize & (self._chunkcachesize - 1):
411 raise RevlogError(_('revlog chunk cache size %r is not a power '
412 raise RevlogError(_('revlog chunk cache size %r is not a power '
412 'of 2') % self._chunkcachesize)
413 'of 2') % self._chunkcachesize)
413
414
414 indexdata = ''
415 indexdata = ''
415 self._initempty = True
416 self._initempty = True
416 try:
417 try:
417 f = self.opener(self.indexfile)
418 f = self.opener(self.indexfile)
418 if (mmapindexthreshold is not None and
419 if (mmapindexthreshold is not None and
419 self.opener.fstat(f).st_size >= mmapindexthreshold):
420 self.opener.fstat(f).st_size >= mmapindexthreshold):
420 indexdata = util.buffer(util.mmapread(f))
421 indexdata = util.buffer(util.mmapread(f))
421 else:
422 else:
422 indexdata = f.read()
423 indexdata = f.read()
423 f.close()
424 f.close()
424 if len(indexdata) > 0:
425 if len(indexdata) > 0:
425 v = versionformat_unpack(indexdata[:4])[0]
426 v = versionformat_unpack(indexdata[:4])[0]
426 self._initempty = False
427 self._initempty = False
427 except IOError as inst:
428 except IOError as inst:
428 if inst.errno != errno.ENOENT:
429 if inst.errno != errno.ENOENT:
429 raise
430 raise
430
431
431 self.version = v
432 self.version = v
432 self._inline = v & FLAG_INLINE_DATA
433 self._inline = v & FLAG_INLINE_DATA
433 self._generaldelta = v & FLAG_GENERALDELTA
434 self._generaldelta = v & FLAG_GENERALDELTA
434 flags = v & ~0xFFFF
435 flags = v & ~0xFFFF
435 fmt = v & 0xFFFF
436 fmt = v & 0xFFFF
436 if fmt == REVLOGV0:
437 if fmt == REVLOGV0:
437 if flags:
438 if flags:
438 raise RevlogError(_('unknown flags (%#04x) in version %d '
439 raise RevlogError(_('unknown flags (%#04x) in version %d '
439 'revlog %s') %
440 'revlog %s') %
440 (flags >> 16, fmt, self.indexfile))
441 (flags >> 16, fmt, self.indexfile))
441 elif fmt == REVLOGV1:
442 elif fmt == REVLOGV1:
442 if flags & ~REVLOGV1_FLAGS:
443 if flags & ~REVLOGV1_FLAGS:
443 raise RevlogError(_('unknown flags (%#04x) in version %d '
444 raise RevlogError(_('unknown flags (%#04x) in version %d '
444 'revlog %s') %
445 'revlog %s') %
445 (flags >> 16, fmt, self.indexfile))
446 (flags >> 16, fmt, self.indexfile))
446 elif fmt == REVLOGV2:
447 elif fmt == REVLOGV2:
447 if flags & ~REVLOGV2_FLAGS:
448 if flags & ~REVLOGV2_FLAGS:
448 raise RevlogError(_('unknown flags (%#04x) in version %d '
449 raise RevlogError(_('unknown flags (%#04x) in version %d '
449 'revlog %s') %
450 'revlog %s') %
450 (flags >> 16, fmt, self.indexfile))
451 (flags >> 16, fmt, self.indexfile))
451 else:
452 else:
452 raise RevlogError(_('unknown version (%d) in revlog %s') %
453 raise RevlogError(_('unknown version (%d) in revlog %s') %
453 (fmt, self.indexfile))
454 (fmt, self.indexfile))
454
455
455 self.storedeltachains = True
456 self.storedeltachains = True
456
457
457 self._io = revlogio()
458 self._io = revlogio()
458 if self.version == REVLOGV0:
459 if self.version == REVLOGV0:
459 self._io = revlogoldio()
460 self._io = revlogoldio()
460 try:
461 try:
461 d = self._io.parseindex(indexdata, self._inline)
462 d = self._io.parseindex(indexdata, self._inline)
462 except (ValueError, IndexError):
463 except (ValueError, IndexError):
463 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
464 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
464 self.index, nodemap, self._chunkcache = d
465 self.index, nodemap, self._chunkcache = d
465 if nodemap is not None:
466 if nodemap is not None:
466 self.nodemap = self._nodecache = nodemap
467 self.nodemap = self._nodecache = nodemap
467 if not self._chunkcache:
468 if not self._chunkcache:
468 self._chunkclear()
469 self._chunkclear()
469 # revnum -> (chain-length, sum-delta-length)
470 # revnum -> (chain-length, sum-delta-length)
470 self._chaininfocache = {}
471 self._chaininfocache = {}
471 # revlog header -> revlog compressor
472 # revlog header -> revlog compressor
472 self._decompressors = {}
473 self._decompressors = {}
473
474
474 @util.propertycache
475 @util.propertycache
475 def _compressor(self):
476 def _compressor(self):
476 return util.compengines[self._compengine].revlogcompressor()
477 return util.compengines[self._compengine].revlogcompressor()
477
478
478 def tip(self):
479 def tip(self):
479 return self.node(len(self.index) - 2)
480 return self.node(len(self.index) - 2)
480 def __contains__(self, rev):
481 def __contains__(self, rev):
481 return 0 <= rev < len(self)
482 return 0 <= rev < len(self)
482 def __len__(self):
483 def __len__(self):
483 return len(self.index) - 1
484 return len(self.index) - 1
484 def __iter__(self):
485 def __iter__(self):
485 return iter(xrange(len(self)))
486 return iter(xrange(len(self)))
486 def revs(self, start=0, stop=None):
487 def revs(self, start=0, stop=None):
487 """iterate over all rev in this revlog (from start to stop)"""
488 """iterate over all rev in this revlog (from start to stop)"""
488 step = 1
489 step = 1
489 if stop is not None:
490 if stop is not None:
490 if start > stop:
491 if start > stop:
491 step = -1
492 step = -1
492 stop += step
493 stop += step
493 else:
494 else:
494 stop = len(self)
495 stop = len(self)
495 return xrange(start, stop, step)
496 return xrange(start, stop, step)
496
497
497 @util.propertycache
498 @util.propertycache
498 def nodemap(self):
499 def nodemap(self):
499 self.rev(self.node(0))
500 self.rev(self.node(0))
500 return self._nodecache
501 return self._nodecache
501
502
502 def hasnode(self, node):
503 def hasnode(self, node):
503 try:
504 try:
504 self.rev(node)
505 self.rev(node)
505 return True
506 return True
506 except KeyError:
507 except KeyError:
507 return False
508 return False
508
509
509 def clearcaches(self):
510 def clearcaches(self):
510 self._cache = None
511 self._cache = None
511 self._chainbasecache.clear()
512 self._chainbasecache.clear()
512 self._chunkcache = (0, '')
513 self._chunkcache = (0, '')
513 self._pcache = {}
514 self._pcache = {}
514
515
515 try:
516 try:
516 self._nodecache.clearcaches()
517 self._nodecache.clearcaches()
517 except AttributeError:
518 except AttributeError:
518 self._nodecache = {nullid: nullrev}
519 self._nodecache = {nullid: nullrev}
519 self._nodepos = None
520 self._nodepos = None
520
521
521 def rev(self, node):
522 def rev(self, node):
522 try:
523 try:
523 return self._nodecache[node]
524 return self._nodecache[node]
524 except TypeError:
525 except TypeError:
525 raise
526 raise
526 except RevlogError:
527 except RevlogError:
527 # parsers.c radix tree lookup failed
528 # parsers.c radix tree lookup failed
528 if node == wdirid:
529 if node == wdirid:
529 raise error.WdirUnsupported
530 raise error.WdirUnsupported
530 raise LookupError(node, self.indexfile, _('no node'))
531 raise LookupError(node, self.indexfile, _('no node'))
531 except KeyError:
532 except KeyError:
532 # pure python cache lookup failed
533 # pure python cache lookup failed
533 n = self._nodecache
534 n = self._nodecache
534 i = self.index
535 i = self.index
535 p = self._nodepos
536 p = self._nodepos
536 if p is None:
537 if p is None:
537 p = len(i) - 2
538 p = len(i) - 2
538 for r in xrange(p, -1, -1):
539 for r in xrange(p, -1, -1):
539 v = i[r][7]
540 v = i[r][7]
540 n[v] = r
541 n[v] = r
541 if v == node:
542 if v == node:
542 self._nodepos = r - 1
543 self._nodepos = r - 1
543 return r
544 return r
544 if node == wdirid:
545 if node == wdirid:
545 raise error.WdirUnsupported
546 raise error.WdirUnsupported
546 raise LookupError(node, self.indexfile, _('no node'))
547 raise LookupError(node, self.indexfile, _('no node'))
547
548
548 # Accessors for index entries.
549 # Accessors for index entries.
549
550
550 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
551 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
551 # are flags.
552 # are flags.
552 def start(self, rev):
553 def start(self, rev):
553 return int(self.index[rev][0] >> 16)
554 return int(self.index[rev][0] >> 16)
554
555
555 def flags(self, rev):
556 def flags(self, rev):
556 return self.index[rev][0] & 0xFFFF
557 return self.index[rev][0] & 0xFFFF
557
558
558 def length(self, rev):
559 def length(self, rev):
559 return self.index[rev][1]
560 return self.index[rev][1]
560
561
561 def rawsize(self, rev):
562 def rawsize(self, rev):
562 """return the length of the uncompressed text for a given revision"""
563 """return the length of the uncompressed text for a given revision"""
563 l = self.index[rev][2]
564 l = self.index[rev][2]
564 if l >= 0:
565 if l >= 0:
565 return l
566 return l
566
567
567 t = self.revision(rev, raw=True)
568 t = self.revision(rev, raw=True)
568 return len(t)
569 return len(t)
569
570
570 def size(self, rev):
571 def size(self, rev):
571 """length of non-raw text (processed by a "read" flag processor)"""
572 """length of non-raw text (processed by a "read" flag processor)"""
572 # fast path: if no "read" flag processor could change the content,
573 # fast path: if no "read" flag processor could change the content,
573 # size is rawsize. note: ELLIPSIS is known to not change the content.
574 # size is rawsize. note: ELLIPSIS is known to not change the content.
574 flags = self.flags(rev)
575 flags = self.flags(rev)
575 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
576 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
576 return self.rawsize(rev)
577 return self.rawsize(rev)
577
578
578 return len(self.revision(rev, raw=False))
579 return len(self.revision(rev, raw=False))
579
580
580 def chainbase(self, rev):
581 def chainbase(self, rev):
581 base = self._chainbasecache.get(rev)
582 base = self._chainbasecache.get(rev)
582 if base is not None:
583 if base is not None:
583 return base
584 return base
584
585
585 index = self.index
586 index = self.index
586 base = index[rev][3]
587 base = index[rev][3]
587 while base != rev:
588 while base != rev:
588 rev = base
589 rev = base
589 base = index[rev][3]
590 base = index[rev][3]
590
591
591 self._chainbasecache[rev] = base
592 self._chainbasecache[rev] = base
592 return base
593 return base
593
594
594 def linkrev(self, rev):
595 def linkrev(self, rev):
595 return self.index[rev][4]
596 return self.index[rev][4]
596
597
597 def parentrevs(self, rev):
598 def parentrevs(self, rev):
598 try:
599 try:
599 return self.index[rev][5:7]
600 return self.index[rev][5:7]
600 except IndexError:
601 except IndexError:
601 if rev == wdirrev:
602 if rev == wdirrev:
602 raise error.WdirUnsupported
603 raise error.WdirUnsupported
603 raise
604 raise
604
605
605 def node(self, rev):
606 def node(self, rev):
606 try:
607 try:
607 return self.index[rev][7]
608 return self.index[rev][7]
608 except IndexError:
609 except IndexError:
609 if rev == wdirrev:
610 if rev == wdirrev:
610 raise error.WdirUnsupported
611 raise error.WdirUnsupported
611 raise
612 raise
612
613
613 # Derived from index values.
614 # Derived from index values.
614
615
615 def end(self, rev):
616 def end(self, rev):
616 return self.start(rev) + self.length(rev)
617 return self.start(rev) + self.length(rev)
617
618
618 def parents(self, node):
619 def parents(self, node):
619 i = self.index
620 i = self.index
620 d = i[self.rev(node)]
621 d = i[self.rev(node)]
621 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
622 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
622
623
623 def chainlen(self, rev):
624 def chainlen(self, rev):
624 return self._chaininfo(rev)[0]
625 return self._chaininfo(rev)[0]
625
626
626 def _chaininfo(self, rev):
627 def _chaininfo(self, rev):
627 chaininfocache = self._chaininfocache
628 chaininfocache = self._chaininfocache
628 if rev in chaininfocache:
629 if rev in chaininfocache:
629 return chaininfocache[rev]
630 return chaininfocache[rev]
630 index = self.index
631 index = self.index
631 generaldelta = self._generaldelta
632 generaldelta = self._generaldelta
632 iterrev = rev
633 iterrev = rev
633 e = index[iterrev]
634 e = index[iterrev]
634 clen = 0
635 clen = 0
635 compresseddeltalen = 0
636 compresseddeltalen = 0
636 while iterrev != e[3]:
637 while iterrev != e[3]:
637 clen += 1
638 clen += 1
638 compresseddeltalen += e[1]
639 compresseddeltalen += e[1]
639 if generaldelta:
640 if generaldelta:
640 iterrev = e[3]
641 iterrev = e[3]
641 else:
642 else:
642 iterrev -= 1
643 iterrev -= 1
643 if iterrev in chaininfocache:
644 if iterrev in chaininfocache:
644 t = chaininfocache[iterrev]
645 t = chaininfocache[iterrev]
645 clen += t[0]
646 clen += t[0]
646 compresseddeltalen += t[1]
647 compresseddeltalen += t[1]
647 break
648 break
648 e = index[iterrev]
649 e = index[iterrev]
649 else:
650 else:
650 # Add text length of base since decompressing that also takes
651 # Add text length of base since decompressing that also takes
651 # work. For cache hits the length is already included.
652 # work. For cache hits the length is already included.
652 compresseddeltalen += e[1]
653 compresseddeltalen += e[1]
653 r = (clen, compresseddeltalen)
654 r = (clen, compresseddeltalen)
654 chaininfocache[rev] = r
655 chaininfocache[rev] = r
655 return r
656 return r
656
657
657 def _deltachain(self, rev, stoprev=None):
658 def _deltachain(self, rev, stoprev=None):
658 """Obtain the delta chain for a revision.
659 """Obtain the delta chain for a revision.
659
660
660 ``stoprev`` specifies a revision to stop at. If not specified, we
661 ``stoprev`` specifies a revision to stop at. If not specified, we
661 stop at the base of the chain.
662 stop at the base of the chain.
662
663
663 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
664 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
664 revs in ascending order and ``stopped`` is a bool indicating whether
665 revs in ascending order and ``stopped`` is a bool indicating whether
665 ``stoprev`` was hit.
666 ``stoprev`` was hit.
666 """
667 """
667 # Try C implementation.
668 # Try C implementation.
668 try:
669 try:
669 return self.index.deltachain(rev, stoprev, self._generaldelta)
670 return self.index.deltachain(rev, stoprev, self._generaldelta)
670 except AttributeError:
671 except AttributeError:
671 pass
672 pass
672
673
673 chain = []
674 chain = []
674
675
675 # Alias to prevent attribute lookup in tight loop.
676 # Alias to prevent attribute lookup in tight loop.
676 index = self.index
677 index = self.index
677 generaldelta = self._generaldelta
678 generaldelta = self._generaldelta
678
679
679 iterrev = rev
680 iterrev = rev
680 e = index[iterrev]
681 e = index[iterrev]
681 while iterrev != e[3] and iterrev != stoprev:
682 while iterrev != e[3] and iterrev != stoprev:
682 chain.append(iterrev)
683 chain.append(iterrev)
683 if generaldelta:
684 if generaldelta:
684 iterrev = e[3]
685 iterrev = e[3]
685 else:
686 else:
686 iterrev -= 1
687 iterrev -= 1
687 e = index[iterrev]
688 e = index[iterrev]
688
689
689 if iterrev == stoprev:
690 if iterrev == stoprev:
690 stopped = True
691 stopped = True
691 else:
692 else:
692 chain.append(iterrev)
693 chain.append(iterrev)
693 stopped = False
694 stopped = False
694
695
695 chain.reverse()
696 chain.reverse()
696 return chain, stopped
697 return chain, stopped
697
698
698 def ancestors(self, revs, stoprev=0, inclusive=False):
699 def ancestors(self, revs, stoprev=0, inclusive=False):
699 """Generate the ancestors of 'revs' in reverse topological order.
700 """Generate the ancestors of 'revs' in reverse topological order.
700 Does not generate revs lower than stoprev.
701 Does not generate revs lower than stoprev.
701
702
702 See the documentation for ancestor.lazyancestors for more details."""
703 See the documentation for ancestor.lazyancestors for more details."""
703
704
704 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
705 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
705 inclusive=inclusive)
706 inclusive=inclusive)
706
707
707 def descendants(self, revs):
708 def descendants(self, revs):
708 """Generate the descendants of 'revs' in revision order.
709 """Generate the descendants of 'revs' in revision order.
709
710
710 Yield a sequence of revision numbers starting with a child of
711 Yield a sequence of revision numbers starting with a child of
711 some rev in revs, i.e., each revision is *not* considered a
712 some rev in revs, i.e., each revision is *not* considered a
712 descendant of itself. Results are ordered by revision number (a
713 descendant of itself. Results are ordered by revision number (a
713 topological sort)."""
714 topological sort)."""
714 first = min(revs)
715 first = min(revs)
715 if first == nullrev:
716 if first == nullrev:
716 for i in self:
717 for i in self:
717 yield i
718 yield i
718 return
719 return
719
720
720 seen = set(revs)
721 seen = set(revs)
721 for i in self.revs(start=first + 1):
722 for i in self.revs(start=first + 1):
722 for x in self.parentrevs(i):
723 for x in self.parentrevs(i):
723 if x != nullrev and x in seen:
724 if x != nullrev and x in seen:
724 seen.add(i)
725 seen.add(i)
725 yield i
726 yield i
726 break
727 break
727
728
728 def findcommonmissing(self, common=None, heads=None):
729 def findcommonmissing(self, common=None, heads=None):
729 """Return a tuple of the ancestors of common and the ancestors of heads
730 """Return a tuple of the ancestors of common and the ancestors of heads
730 that are not ancestors of common. In revset terminology, we return the
731 that are not ancestors of common. In revset terminology, we return the
731 tuple:
732 tuple:
732
733
733 ::common, (::heads) - (::common)
734 ::common, (::heads) - (::common)
734
735
735 The list is sorted by revision number, meaning it is
736 The list is sorted by revision number, meaning it is
736 topologically sorted.
737 topologically sorted.
737
738
738 'heads' and 'common' are both lists of node IDs. If heads is
739 'heads' and 'common' are both lists of node IDs. If heads is
739 not supplied, uses all of the revlog's heads. If common is not
740 not supplied, uses all of the revlog's heads. If common is not
740 supplied, uses nullid."""
741 supplied, uses nullid."""
741 if common is None:
742 if common is None:
742 common = [nullid]
743 common = [nullid]
743 if heads is None:
744 if heads is None:
744 heads = self.heads()
745 heads = self.heads()
745
746
746 common = [self.rev(n) for n in common]
747 common = [self.rev(n) for n in common]
747 heads = [self.rev(n) for n in heads]
748 heads = [self.rev(n) for n in heads]
748
749
749 # we want the ancestors, but inclusive
750 # we want the ancestors, but inclusive
750 class lazyset(object):
751 class lazyset(object):
751 def __init__(self, lazyvalues):
752 def __init__(self, lazyvalues):
752 self.addedvalues = set()
753 self.addedvalues = set()
753 self.lazyvalues = lazyvalues
754 self.lazyvalues = lazyvalues
754
755
755 def __contains__(self, value):
756 def __contains__(self, value):
756 return value in self.addedvalues or value in self.lazyvalues
757 return value in self.addedvalues or value in self.lazyvalues
757
758
758 def __iter__(self):
759 def __iter__(self):
759 added = self.addedvalues
760 added = self.addedvalues
760 for r in added:
761 for r in added:
761 yield r
762 yield r
762 for r in self.lazyvalues:
763 for r in self.lazyvalues:
763 if not r in added:
764 if not r in added:
764 yield r
765 yield r
765
766
766 def add(self, value):
767 def add(self, value):
767 self.addedvalues.add(value)
768 self.addedvalues.add(value)
768
769
769 def update(self, values):
770 def update(self, values):
770 self.addedvalues.update(values)
771 self.addedvalues.update(values)
771
772
772 has = lazyset(self.ancestors(common))
773 has = lazyset(self.ancestors(common))
773 has.add(nullrev)
774 has.add(nullrev)
774 has.update(common)
775 has.update(common)
775
776
776 # take all ancestors from heads that aren't in has
777 # take all ancestors from heads that aren't in has
777 missing = set()
778 missing = set()
778 visit = collections.deque(r for r in heads if r not in has)
779 visit = collections.deque(r for r in heads if r not in has)
779 while visit:
780 while visit:
780 r = visit.popleft()
781 r = visit.popleft()
781 if r in missing:
782 if r in missing:
782 continue
783 continue
783 else:
784 else:
784 missing.add(r)
785 missing.add(r)
785 for p in self.parentrevs(r):
786 for p in self.parentrevs(r):
786 if p not in has:
787 if p not in has:
787 visit.append(p)
788 visit.append(p)
788 missing = list(missing)
789 missing = list(missing)
789 missing.sort()
790 missing.sort()
790 return has, [self.node(miss) for miss in missing]
791 return has, [self.node(miss) for miss in missing]
791
792
792 def incrementalmissingrevs(self, common=None):
793 def incrementalmissingrevs(self, common=None):
793 """Return an object that can be used to incrementally compute the
794 """Return an object that can be used to incrementally compute the
794 revision numbers of the ancestors of arbitrary sets that are not
795 revision numbers of the ancestors of arbitrary sets that are not
795 ancestors of common. This is an ancestor.incrementalmissingancestors
796 ancestors of common. This is an ancestor.incrementalmissingancestors
796 object.
797 object.
797
798
798 'common' is a list of revision numbers. If common is not supplied, uses
799 'common' is a list of revision numbers. If common is not supplied, uses
799 nullrev.
800 nullrev.
800 """
801 """
801 if common is None:
802 if common is None:
802 common = [nullrev]
803 common = [nullrev]
803
804
804 return ancestor.incrementalmissingancestors(self.parentrevs, common)
805 return ancestor.incrementalmissingancestors(self.parentrevs, common)
805
806
806 def findmissingrevs(self, common=None, heads=None):
807 def findmissingrevs(self, common=None, heads=None):
807 """Return the revision numbers of the ancestors of heads that
808 """Return the revision numbers of the ancestors of heads that
808 are not ancestors of common.
809 are not ancestors of common.
809
810
810 More specifically, return a list of revision numbers corresponding to
811 More specifically, return a list of revision numbers corresponding to
811 nodes N such that every N satisfies the following constraints:
812 nodes N such that every N satisfies the following constraints:
812
813
813 1. N is an ancestor of some node in 'heads'
814 1. N is an ancestor of some node in 'heads'
814 2. N is not an ancestor of any node in 'common'
815 2. N is not an ancestor of any node in 'common'
815
816
816 The list is sorted by revision number, meaning it is
817 The list is sorted by revision number, meaning it is
817 topologically sorted.
818 topologically sorted.
818
819
819 'heads' and 'common' are both lists of revision numbers. If heads is
820 'heads' and 'common' are both lists of revision numbers. If heads is
820 not supplied, uses all of the revlog's heads. If common is not
821 not supplied, uses all of the revlog's heads. If common is not
821 supplied, uses nullid."""
822 supplied, uses nullid."""
822 if common is None:
823 if common is None:
823 common = [nullrev]
824 common = [nullrev]
824 if heads is None:
825 if heads is None:
825 heads = self.headrevs()
826 heads = self.headrevs()
826
827
827 inc = self.incrementalmissingrevs(common=common)
828 inc = self.incrementalmissingrevs(common=common)
828 return inc.missingancestors(heads)
829 return inc.missingancestors(heads)
829
830
830 def findmissing(self, common=None, heads=None):
831 def findmissing(self, common=None, heads=None):
831 """Return the ancestors of heads that are not ancestors of common.
832 """Return the ancestors of heads that are not ancestors of common.
832
833
833 More specifically, return a list of nodes N such that every N
834 More specifically, return a list of nodes N such that every N
834 satisfies the following constraints:
835 satisfies the following constraints:
835
836
836 1. N is an ancestor of some node in 'heads'
837 1. N is an ancestor of some node in 'heads'
837 2. N is not an ancestor of any node in 'common'
838 2. N is not an ancestor of any node in 'common'
838
839
839 The list is sorted by revision number, meaning it is
840 The list is sorted by revision number, meaning it is
840 topologically sorted.
841 topologically sorted.
841
842
842 'heads' and 'common' are both lists of node IDs. If heads is
843 'heads' and 'common' are both lists of node IDs. If heads is
843 not supplied, uses all of the revlog's heads. If common is not
844 not supplied, uses all of the revlog's heads. If common is not
844 supplied, uses nullid."""
845 supplied, uses nullid."""
845 if common is None:
846 if common is None:
846 common = [nullid]
847 common = [nullid]
847 if heads is None:
848 if heads is None:
848 heads = self.heads()
849 heads = self.heads()
849
850
850 common = [self.rev(n) for n in common]
851 common = [self.rev(n) for n in common]
851 heads = [self.rev(n) for n in heads]
852 heads = [self.rev(n) for n in heads]
852
853
853 inc = self.incrementalmissingrevs(common=common)
854 inc = self.incrementalmissingrevs(common=common)
854 return [self.node(r) for r in inc.missingancestors(heads)]
855 return [self.node(r) for r in inc.missingancestors(heads)]
855
856
856 def nodesbetween(self, roots=None, heads=None):
857 def nodesbetween(self, roots=None, heads=None):
857 """Return a topological path from 'roots' to 'heads'.
858 """Return a topological path from 'roots' to 'heads'.
858
859
859 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
860 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
860 topologically sorted list of all nodes N that satisfy both of
861 topologically sorted list of all nodes N that satisfy both of
861 these constraints:
862 these constraints:
862
863
863 1. N is a descendant of some node in 'roots'
864 1. N is a descendant of some node in 'roots'
864 2. N is an ancestor of some node in 'heads'
865 2. N is an ancestor of some node in 'heads'
865
866
866 Every node is considered to be both a descendant and an ancestor
867 Every node is considered to be both a descendant and an ancestor
867 of itself, so every reachable node in 'roots' and 'heads' will be
868 of itself, so every reachable node in 'roots' and 'heads' will be
868 included in 'nodes'.
869 included in 'nodes'.
869
870
870 'outroots' is the list of reachable nodes in 'roots', i.e., the
871 'outroots' is the list of reachable nodes in 'roots', i.e., the
871 subset of 'roots' that is returned in 'nodes'. Likewise,
872 subset of 'roots' that is returned in 'nodes'. Likewise,
872 'outheads' is the subset of 'heads' that is also in 'nodes'.
873 'outheads' is the subset of 'heads' that is also in 'nodes'.
873
874
874 'roots' and 'heads' are both lists of node IDs. If 'roots' is
875 'roots' and 'heads' are both lists of node IDs. If 'roots' is
875 unspecified, uses nullid as the only root. If 'heads' is
876 unspecified, uses nullid as the only root. If 'heads' is
876 unspecified, uses list of all of the revlog's heads."""
877 unspecified, uses list of all of the revlog's heads."""
877 nonodes = ([], [], [])
878 nonodes = ([], [], [])
878 if roots is not None:
879 if roots is not None:
879 roots = list(roots)
880 roots = list(roots)
880 if not roots:
881 if not roots:
881 return nonodes
882 return nonodes
882 lowestrev = min([self.rev(n) for n in roots])
883 lowestrev = min([self.rev(n) for n in roots])
883 else:
884 else:
884 roots = [nullid] # Everybody's a descendant of nullid
885 roots = [nullid] # Everybody's a descendant of nullid
885 lowestrev = nullrev
886 lowestrev = nullrev
886 if (lowestrev == nullrev) and (heads is None):
887 if (lowestrev == nullrev) and (heads is None):
887 # We want _all_ the nodes!
888 # We want _all_ the nodes!
888 return ([self.node(r) for r in self], [nullid], list(self.heads()))
889 return ([self.node(r) for r in self], [nullid], list(self.heads()))
889 if heads is None:
890 if heads is None:
890 # All nodes are ancestors, so the latest ancestor is the last
891 # All nodes are ancestors, so the latest ancestor is the last
891 # node.
892 # node.
892 highestrev = len(self) - 1
893 highestrev = len(self) - 1
893 # Set ancestors to None to signal that every node is an ancestor.
894 # Set ancestors to None to signal that every node is an ancestor.
894 ancestors = None
895 ancestors = None
895 # Set heads to an empty dictionary for later discovery of heads
896 # Set heads to an empty dictionary for later discovery of heads
896 heads = {}
897 heads = {}
897 else:
898 else:
898 heads = list(heads)
899 heads = list(heads)
899 if not heads:
900 if not heads:
900 return nonodes
901 return nonodes
901 ancestors = set()
902 ancestors = set()
902 # Turn heads into a dictionary so we can remove 'fake' heads.
903 # Turn heads into a dictionary so we can remove 'fake' heads.
903 # Also, later we will be using it to filter out the heads we can't
904 # Also, later we will be using it to filter out the heads we can't
904 # find from roots.
905 # find from roots.
905 heads = dict.fromkeys(heads, False)
906 heads = dict.fromkeys(heads, False)
906 # Start at the top and keep marking parents until we're done.
907 # Start at the top and keep marking parents until we're done.
907 nodestotag = set(heads)
908 nodestotag = set(heads)
908 # Remember where the top was so we can use it as a limit later.
909 # Remember where the top was so we can use it as a limit later.
909 highestrev = max([self.rev(n) for n in nodestotag])
910 highestrev = max([self.rev(n) for n in nodestotag])
910 while nodestotag:
911 while nodestotag:
911 # grab a node to tag
912 # grab a node to tag
912 n = nodestotag.pop()
913 n = nodestotag.pop()
913 # Never tag nullid
914 # Never tag nullid
914 if n == nullid:
915 if n == nullid:
915 continue
916 continue
916 # A node's revision number represents its place in a
917 # A node's revision number represents its place in a
917 # topologically sorted list of nodes.
918 # topologically sorted list of nodes.
918 r = self.rev(n)
919 r = self.rev(n)
919 if r >= lowestrev:
920 if r >= lowestrev:
920 if n not in ancestors:
921 if n not in ancestors:
921 # If we are possibly a descendant of one of the roots
922 # If we are possibly a descendant of one of the roots
922 # and we haven't already been marked as an ancestor
923 # and we haven't already been marked as an ancestor
923 ancestors.add(n) # Mark as ancestor
924 ancestors.add(n) # Mark as ancestor
924 # Add non-nullid parents to list of nodes to tag.
925 # Add non-nullid parents to list of nodes to tag.
925 nodestotag.update([p for p in self.parents(n) if
926 nodestotag.update([p for p in self.parents(n) if
926 p != nullid])
927 p != nullid])
927 elif n in heads: # We've seen it before, is it a fake head?
928 elif n in heads: # We've seen it before, is it a fake head?
928 # So it is, real heads should not be the ancestors of
929 # So it is, real heads should not be the ancestors of
929 # any other heads.
930 # any other heads.
930 heads.pop(n)
931 heads.pop(n)
931 if not ancestors:
932 if not ancestors:
932 return nonodes
933 return nonodes
933 # Now that we have our set of ancestors, we want to remove any
934 # Now that we have our set of ancestors, we want to remove any
934 # roots that are not ancestors.
935 # roots that are not ancestors.
935
936
936 # If one of the roots was nullid, everything is included anyway.
937 # If one of the roots was nullid, everything is included anyway.
937 if lowestrev > nullrev:
938 if lowestrev > nullrev:
938 # But, since we weren't, let's recompute the lowest rev to not
939 # But, since we weren't, let's recompute the lowest rev to not
939 # include roots that aren't ancestors.
940 # include roots that aren't ancestors.
940
941
941 # Filter out roots that aren't ancestors of heads
942 # Filter out roots that aren't ancestors of heads
942 roots = [root for root in roots if root in ancestors]
943 roots = [root for root in roots if root in ancestors]
943 # Recompute the lowest revision
944 # Recompute the lowest revision
944 if roots:
945 if roots:
945 lowestrev = min([self.rev(root) for root in roots])
946 lowestrev = min([self.rev(root) for root in roots])
946 else:
947 else:
947 # No more roots? Return empty list
948 # No more roots? Return empty list
948 return nonodes
949 return nonodes
949 else:
950 else:
950 # We are descending from nullid, and don't need to care about
951 # We are descending from nullid, and don't need to care about
951 # any other roots.
952 # any other roots.
952 lowestrev = nullrev
953 lowestrev = nullrev
953 roots = [nullid]
954 roots = [nullid]
954 # Transform our roots list into a set.
955 # Transform our roots list into a set.
955 descendants = set(roots)
956 descendants = set(roots)
956 # Also, keep the original roots so we can filter out roots that aren't
957 # Also, keep the original roots so we can filter out roots that aren't
957 # 'real' roots (i.e. are descended from other roots).
958 # 'real' roots (i.e. are descended from other roots).
958 roots = descendants.copy()
959 roots = descendants.copy()
959 # Our topologically sorted list of output nodes.
960 # Our topologically sorted list of output nodes.
960 orderedout = []
961 orderedout = []
961 # Don't start at nullid since we don't want nullid in our output list,
962 # Don't start at nullid since we don't want nullid in our output list,
962 # and if nullid shows up in descendants, empty parents will look like
963 # and if nullid shows up in descendants, empty parents will look like
963 # they're descendants.
964 # they're descendants.
964 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
965 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
965 n = self.node(r)
966 n = self.node(r)
966 isdescendant = False
967 isdescendant = False
967 if lowestrev == nullrev: # Everybody is a descendant of nullid
968 if lowestrev == nullrev: # Everybody is a descendant of nullid
968 isdescendant = True
969 isdescendant = True
969 elif n in descendants:
970 elif n in descendants:
970 # n is already a descendant
971 # n is already a descendant
971 isdescendant = True
972 isdescendant = True
972 # This check only needs to be done here because all the roots
973 # This check only needs to be done here because all the roots
973 # will start being marked is descendants before the loop.
974 # will start being marked is descendants before the loop.
974 if n in roots:
975 if n in roots:
975 # If n was a root, check if it's a 'real' root.
976 # If n was a root, check if it's a 'real' root.
976 p = tuple(self.parents(n))
977 p = tuple(self.parents(n))
977 # If any of its parents are descendants, it's not a root.
978 # If any of its parents are descendants, it's not a root.
978 if (p[0] in descendants) or (p[1] in descendants):
979 if (p[0] in descendants) or (p[1] in descendants):
979 roots.remove(n)
980 roots.remove(n)
980 else:
981 else:
981 p = tuple(self.parents(n))
982 p = tuple(self.parents(n))
982 # A node is a descendant if either of its parents are
983 # A node is a descendant if either of its parents are
983 # descendants. (We seeded the dependents list with the roots
984 # descendants. (We seeded the dependents list with the roots
984 # up there, remember?)
985 # up there, remember?)
985 if (p[0] in descendants) or (p[1] in descendants):
986 if (p[0] in descendants) or (p[1] in descendants):
986 descendants.add(n)
987 descendants.add(n)
987 isdescendant = True
988 isdescendant = True
988 if isdescendant and ((ancestors is None) or (n in ancestors)):
989 if isdescendant and ((ancestors is None) or (n in ancestors)):
989 # Only include nodes that are both descendants and ancestors.
990 # Only include nodes that are both descendants and ancestors.
990 orderedout.append(n)
991 orderedout.append(n)
991 if (ancestors is not None) and (n in heads):
992 if (ancestors is not None) and (n in heads):
992 # We're trying to figure out which heads are reachable
993 # We're trying to figure out which heads are reachable
993 # from roots.
994 # from roots.
994 # Mark this head as having been reached
995 # Mark this head as having been reached
995 heads[n] = True
996 heads[n] = True
996 elif ancestors is None:
997 elif ancestors is None:
997 # Otherwise, we're trying to discover the heads.
998 # Otherwise, we're trying to discover the heads.
998 # Assume this is a head because if it isn't, the next step
999 # Assume this is a head because if it isn't, the next step
999 # will eventually remove it.
1000 # will eventually remove it.
1000 heads[n] = True
1001 heads[n] = True
1001 # But, obviously its parents aren't.
1002 # But, obviously its parents aren't.
1002 for p in self.parents(n):
1003 for p in self.parents(n):
1003 heads.pop(p, None)
1004 heads.pop(p, None)
1004 heads = [head for head, flag in heads.iteritems() if flag]
1005 heads = [head for head, flag in heads.iteritems() if flag]
1005 roots = list(roots)
1006 roots = list(roots)
1006 assert orderedout
1007 assert orderedout
1007 assert roots
1008 assert roots
1008 assert heads
1009 assert heads
1009 return (orderedout, roots, heads)
1010 return (orderedout, roots, heads)
1010
1011
1011 def headrevs(self):
1012 def headrevs(self):
1012 try:
1013 try:
1013 return self.index.headrevs()
1014 return self.index.headrevs()
1014 except AttributeError:
1015 except AttributeError:
1015 return self._headrevs()
1016 return self._headrevs()
1016
1017
1017 def computephases(self, roots):
1018 def computephases(self, roots):
1018 return self.index.computephasesmapsets(roots)
1019 return self.index.computephasesmapsets(roots)
1019
1020
1020 def _headrevs(self):
1021 def _headrevs(self):
1021 count = len(self)
1022 count = len(self)
1022 if not count:
1023 if not count:
1023 return [nullrev]
1024 return [nullrev]
1024 # we won't iter over filtered rev so nobody is a head at start
1025 # we won't iter over filtered rev so nobody is a head at start
1025 ishead = [0] * (count + 1)
1026 ishead = [0] * (count + 1)
1026 index = self.index
1027 index = self.index
1027 for r in self:
1028 for r in self:
1028 ishead[r] = 1 # I may be an head
1029 ishead[r] = 1 # I may be an head
1029 e = index[r]
1030 e = index[r]
1030 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1031 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1031 return [r for r, val in enumerate(ishead) if val]
1032 return [r for r, val in enumerate(ishead) if val]
1032
1033
1033 def heads(self, start=None, stop=None):
1034 def heads(self, start=None, stop=None):
1034 """return the list of all nodes that have no children
1035 """return the list of all nodes that have no children
1035
1036
1036 if start is specified, only heads that are descendants of
1037 if start is specified, only heads that are descendants of
1037 start will be returned
1038 start will be returned
1038 if stop is specified, it will consider all the revs from stop
1039 if stop is specified, it will consider all the revs from stop
1039 as if they had no children
1040 as if they had no children
1040 """
1041 """
1041 if start is None and stop is None:
1042 if start is None and stop is None:
1042 if not len(self):
1043 if not len(self):
1043 return [nullid]
1044 return [nullid]
1044 return [self.node(r) for r in self.headrevs()]
1045 return [self.node(r) for r in self.headrevs()]
1045
1046
1046 if start is None:
1047 if start is None:
1047 start = nullid
1048 start = nullid
1048 if stop is None:
1049 if stop is None:
1049 stop = []
1050 stop = []
1050 stoprevs = set([self.rev(n) for n in stop])
1051 stoprevs = set([self.rev(n) for n in stop])
1051 startrev = self.rev(start)
1052 startrev = self.rev(start)
1052 reachable = {startrev}
1053 reachable = {startrev}
1053 heads = {startrev}
1054 heads = {startrev}
1054
1055
1055 parentrevs = self.parentrevs
1056 parentrevs = self.parentrevs
1056 for r in self.revs(start=startrev + 1):
1057 for r in self.revs(start=startrev + 1):
1057 for p in parentrevs(r):
1058 for p in parentrevs(r):
1058 if p in reachable:
1059 if p in reachable:
1059 if r not in stoprevs:
1060 if r not in stoprevs:
1060 reachable.add(r)
1061 reachable.add(r)
1061 heads.add(r)
1062 heads.add(r)
1062 if p in heads and p not in stoprevs:
1063 if p in heads and p not in stoprevs:
1063 heads.remove(p)
1064 heads.remove(p)
1064
1065
1065 return [self.node(r) for r in heads]
1066 return [self.node(r) for r in heads]
1066
1067
1067 def children(self, node):
1068 def children(self, node):
1068 """find the children of a given node"""
1069 """find the children of a given node"""
1069 c = []
1070 c = []
1070 p = self.rev(node)
1071 p = self.rev(node)
1071 for r in self.revs(start=p + 1):
1072 for r in self.revs(start=p + 1):
1072 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1073 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1073 if prevs:
1074 if prevs:
1074 for pr in prevs:
1075 for pr in prevs:
1075 if pr == p:
1076 if pr == p:
1076 c.append(self.node(r))
1077 c.append(self.node(r))
1077 elif p == nullrev:
1078 elif p == nullrev:
1078 c.append(self.node(r))
1079 c.append(self.node(r))
1079 return c
1080 return c
1080
1081
1081 def descendant(self, start, end):
1082 def descendant(self, start, end):
1082 if start == nullrev:
1083 if start == nullrev:
1083 return True
1084 return True
1084 for i in self.descendants([start]):
1085 for i in self.descendants([start]):
1085 if i == end:
1086 if i == end:
1086 return True
1087 return True
1087 elif i > end:
1088 elif i > end:
1088 break
1089 break
1089 return False
1090 return False
1090
1091
1091 def commonancestorsheads(self, a, b):
1092 def commonancestorsheads(self, a, b):
1092 """calculate all the heads of the common ancestors of nodes a and b"""
1093 """calculate all the heads of the common ancestors of nodes a and b"""
1093 a, b = self.rev(a), self.rev(b)
1094 a, b = self.rev(a), self.rev(b)
1094 try:
1095 try:
1095 ancs = self.index.commonancestorsheads(a, b)
1096 ancs = self.index.commonancestorsheads(a, b)
1096 except (AttributeError, OverflowError): # C implementation failed
1097 except (AttributeError, OverflowError): # C implementation failed
1097 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1098 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1098 return pycompat.maplist(self.node, ancs)
1099 return pycompat.maplist(self.node, ancs)
1099
1100
1100 def isancestor(self, a, b):
1101 def isancestor(self, a, b):
1101 """return True if node a is an ancestor of node b
1102 """return True if node a is an ancestor of node b
1102
1103
1103 The implementation of this is trivial but the use of
1104 The implementation of this is trivial but the use of
1104 commonancestorsheads is not."""
1105 commonancestorsheads is not."""
1105 return a in self.commonancestorsheads(a, b)
1106 return a in self.commonancestorsheads(a, b)
1106
1107
1107 def ancestor(self, a, b):
1108 def ancestor(self, a, b):
1108 """calculate the "best" common ancestor of nodes a and b"""
1109 """calculate the "best" common ancestor of nodes a and b"""
1109
1110
1110 a, b = self.rev(a), self.rev(b)
1111 a, b = self.rev(a), self.rev(b)
1111 try:
1112 try:
1112 ancs = self.index.ancestors(a, b)
1113 ancs = self.index.ancestors(a, b)
1113 except (AttributeError, OverflowError):
1114 except (AttributeError, OverflowError):
1114 ancs = ancestor.ancestors(self.parentrevs, a, b)
1115 ancs = ancestor.ancestors(self.parentrevs, a, b)
1115 if ancs:
1116 if ancs:
1116 # choose a consistent winner when there's a tie
1117 # choose a consistent winner when there's a tie
1117 return min(map(self.node, ancs))
1118 return min(map(self.node, ancs))
1118 return nullid
1119 return nullid
1119
1120
1120 def _match(self, id):
1121 def _match(self, id):
1121 if isinstance(id, int):
1122 if isinstance(id, int):
1122 # rev
1123 # rev
1123 return self.node(id)
1124 return self.node(id)
1124 if len(id) == 20:
1125 if len(id) == 20:
1125 # possibly a binary node
1126 # possibly a binary node
1126 # odds of a binary node being all hex in ASCII are 1 in 10**25
1127 # odds of a binary node being all hex in ASCII are 1 in 10**25
1127 try:
1128 try:
1128 node = id
1129 node = id
1129 self.rev(node) # quick search the index
1130 self.rev(node) # quick search the index
1130 return node
1131 return node
1131 except LookupError:
1132 except LookupError:
1132 pass # may be partial hex id
1133 pass # may be partial hex id
1133 try:
1134 try:
1134 # str(rev)
1135 # str(rev)
1135 rev = int(id)
1136 rev = int(id)
1136 if str(rev) != id:
1137 if str(rev) != id:
1137 raise ValueError
1138 raise ValueError
1138 if rev < 0:
1139 if rev < 0:
1139 rev = len(self) + rev
1140 rev = len(self) + rev
1140 if rev < 0 or rev >= len(self):
1141 if rev < 0 or rev >= len(self):
1141 raise ValueError
1142 raise ValueError
1142 return self.node(rev)
1143 return self.node(rev)
1143 except (ValueError, OverflowError):
1144 except (ValueError, OverflowError):
1144 pass
1145 pass
1145 if len(id) == 40:
1146 if len(id) == 40:
1146 try:
1147 try:
1147 # a full hex nodeid?
1148 # a full hex nodeid?
1148 node = bin(id)
1149 node = bin(id)
1149 self.rev(node)
1150 self.rev(node)
1150 return node
1151 return node
1151 except (TypeError, LookupError):
1152 except (TypeError, LookupError):
1152 pass
1153 pass
1153
1154
1154 def _partialmatch(self, id):
1155 def _partialmatch(self, id):
1155 maybewdir = wdirhex.startswith(id)
1156 maybewdir = wdirhex.startswith(id)
1156 try:
1157 try:
1157 partial = self.index.partialmatch(id)
1158 partial = self.index.partialmatch(id)
1158 if partial and self.hasnode(partial):
1159 if partial and self.hasnode(partial):
1159 if maybewdir:
1160 if maybewdir:
1160 # single 'ff...' match in radix tree, ambiguous with wdir
1161 # single 'ff...' match in radix tree, ambiguous with wdir
1161 raise RevlogError
1162 raise RevlogError
1162 return partial
1163 return partial
1163 if maybewdir:
1164 if maybewdir:
1164 # no 'ff...' match in radix tree, wdir identified
1165 # no 'ff...' match in radix tree, wdir identified
1165 raise error.WdirUnsupported
1166 raise error.WdirUnsupported
1166 return None
1167 return None
1167 except RevlogError:
1168 except RevlogError:
1168 # parsers.c radix tree lookup gave multiple matches
1169 # parsers.c radix tree lookup gave multiple matches
1169 # fast path: for unfiltered changelog, radix tree is accurate
1170 # fast path: for unfiltered changelog, radix tree is accurate
1170 if not getattr(self, 'filteredrevs', None):
1171 if not getattr(self, 'filteredrevs', None):
1171 raise LookupError(id, self.indexfile,
1172 raise LookupError(id, self.indexfile,
1172 _('ambiguous identifier'))
1173 _('ambiguous identifier'))
1173 # fall through to slow path that filters hidden revisions
1174 # fall through to slow path that filters hidden revisions
1174 except (AttributeError, ValueError):
1175 except (AttributeError, ValueError):
1175 # we are pure python, or key was too short to search radix tree
1176 # we are pure python, or key was too short to search radix tree
1176 pass
1177 pass
1177
1178
1178 if id in self._pcache:
1179 if id in self._pcache:
1179 return self._pcache[id]
1180 return self._pcache[id]
1180
1181
1181 if len(id) < 40:
1182 if len(id) < 40:
1182 try:
1183 try:
1183 # hex(node)[:...]
1184 # hex(node)[:...]
1184 l = len(id) // 2 # grab an even number of digits
1185 l = len(id) // 2 # grab an even number of digits
1185 prefix = bin(id[:l * 2])
1186 prefix = bin(id[:l * 2])
1186 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1187 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1187 nl = [n for n in nl if hex(n).startswith(id) and
1188 nl = [n for n in nl if hex(n).startswith(id) and
1188 self.hasnode(n)]
1189 self.hasnode(n)]
1189 if len(nl) > 0:
1190 if len(nl) > 0:
1190 if len(nl) == 1 and not maybewdir:
1191 if len(nl) == 1 and not maybewdir:
1191 self._pcache[id] = nl[0]
1192 self._pcache[id] = nl[0]
1192 return nl[0]
1193 return nl[0]
1193 raise LookupError(id, self.indexfile,
1194 raise LookupError(id, self.indexfile,
1194 _('ambiguous identifier'))
1195 _('ambiguous identifier'))
1195 if maybewdir:
1196 if maybewdir:
1196 raise error.WdirUnsupported
1197 raise error.WdirUnsupported
1197 return None
1198 return None
1198 except (TypeError, binascii.Error):
1199 except (TypeError, binascii.Error):
1199 pass
1200 pass
1200
1201
1201 def lookup(self, id):
1202 def lookup(self, id):
1202 """locate a node based on:
1203 """locate a node based on:
1203 - revision number or str(revision number)
1204 - revision number or str(revision number)
1204 - nodeid or subset of hex nodeid
1205 - nodeid or subset of hex nodeid
1205 """
1206 """
1206 n = self._match(id)
1207 n = self._match(id)
1207 if n is not None:
1208 if n is not None:
1208 return n
1209 return n
1209 n = self._partialmatch(id)
1210 n = self._partialmatch(id)
1210 if n:
1211 if n:
1211 return n
1212 return n
1212
1213
1213 raise LookupError(id, self.indexfile, _('no match found'))
1214 raise LookupError(id, self.indexfile, _('no match found'))
1214
1215
1215 def shortest(self, hexnode, minlength=1):
1216 def shortest(self, hexnode, minlength=1):
1216 """Find the shortest unambiguous prefix that matches hexnode."""
1217 """Find the shortest unambiguous prefix that matches hexnode."""
1217 def isvalid(test):
1218 def isvalid(test):
1218 try:
1219 try:
1219 if self._partialmatch(test) is None:
1220 if self._partialmatch(test) is None:
1220 return False
1221 return False
1221
1222
1222 try:
1223 try:
1223 i = int(test)
1224 i = int(test)
1224 # if we are a pure int, then starting with zero will not be
1225 # if we are a pure int, then starting with zero will not be
1225 # confused as a rev; or, obviously, if the int is larger
1226 # confused as a rev; or, obviously, if the int is larger
1226 # than the value of the tip rev
1227 # than the value of the tip rev
1227 if test[0] == '0' or i > len(self):
1228 if test[0] == '0' or i > len(self):
1228 return True
1229 return True
1229 return False
1230 return False
1230 except ValueError:
1231 except ValueError:
1231 return True
1232 return True
1232 except error.RevlogError:
1233 except error.RevlogError:
1233 return False
1234 return False
1234 except error.WdirUnsupported:
1235 except error.WdirUnsupported:
1235 # single 'ff...' match
1236 # single 'ff...' match
1236 return True
1237 return True
1237
1238
1238 shortest = hexnode
1239 shortest = hexnode
1239 startlength = max(6, minlength)
1240 startlength = max(6, minlength)
1240 length = startlength
1241 length = startlength
1241 while True:
1242 while True:
1242 test = hexnode[:length]
1243 test = hexnode[:length]
1243 if isvalid(test):
1244 if isvalid(test):
1244 shortest = test
1245 shortest = test
1245 if length == minlength or length > startlength:
1246 if length == minlength or length > startlength:
1246 return shortest
1247 return shortest
1247 length -= 1
1248 length -= 1
1248 else:
1249 else:
1249 length += 1
1250 length += 1
1250 if len(shortest) <= length:
1251 if len(shortest) <= length:
1251 return shortest
1252 return shortest
1252
1253
1253 def cmp(self, node, text):
1254 def cmp(self, node, text):
1254 """compare text with a given file revision
1255 """compare text with a given file revision
1255
1256
1256 returns True if text is different than what is stored.
1257 returns True if text is different than what is stored.
1257 """
1258 """
1258 p1, p2 = self.parents(node)
1259 p1, p2 = self.parents(node)
1259 return hash(text, p1, p2) != node
1260 return hash(text, p1, p2) != node
1260
1261
1261 def _cachesegment(self, offset, data):
1262 def _cachesegment(self, offset, data):
1262 """Add a segment to the revlog cache.
1263 """Add a segment to the revlog cache.
1263
1264
1264 Accepts an absolute offset and the data that is at that location.
1265 Accepts an absolute offset and the data that is at that location.
1265 """
1266 """
1266 o, d = self._chunkcache
1267 o, d = self._chunkcache
1267 # try to add to existing cache
1268 # try to add to existing cache
1268 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1269 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1269 self._chunkcache = o, d + data
1270 self._chunkcache = o, d + data
1270 else:
1271 else:
1271 self._chunkcache = offset, data
1272 self._chunkcache = offset, data
1272
1273
1273 def _readsegment(self, offset, length, df=None):
1274 def _readsegment(self, offset, length, df=None):
1274 """Load a segment of raw data from the revlog.
1275 """Load a segment of raw data from the revlog.
1275
1276
1276 Accepts an absolute offset, length to read, and an optional existing
1277 Accepts an absolute offset, length to read, and an optional existing
1277 file handle to read from.
1278 file handle to read from.
1278
1279
1279 If an existing file handle is passed, it will be seeked and the
1280 If an existing file handle is passed, it will be seeked and the
1280 original seek position will NOT be restored.
1281 original seek position will NOT be restored.
1281
1282
1282 Returns a str or buffer of raw byte data.
1283 Returns a str or buffer of raw byte data.
1283 """
1284 """
1284 if df is not None:
1285 if df is not None:
1285 closehandle = False
1286 closehandle = False
1286 else:
1287 else:
1287 if self._inline:
1288 if self._inline:
1288 df = self.opener(self.indexfile)
1289 df = self.opener(self.indexfile)
1289 else:
1290 else:
1290 df = self.opener(self.datafile)
1291 df = self.opener(self.datafile)
1291 closehandle = True
1292 closehandle = True
1292
1293
1293 # Cache data both forward and backward around the requested
1294 # Cache data both forward and backward around the requested
1294 # data, in a fixed size window. This helps speed up operations
1295 # data, in a fixed size window. This helps speed up operations
1295 # involving reading the revlog backwards.
1296 # involving reading the revlog backwards.
1296 cachesize = self._chunkcachesize
1297 cachesize = self._chunkcachesize
1297 realoffset = offset & ~(cachesize - 1)
1298 realoffset = offset & ~(cachesize - 1)
1298 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1299 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1299 - realoffset)
1300 - realoffset)
1300 df.seek(realoffset)
1301 df.seek(realoffset)
1301 d = df.read(reallength)
1302 d = df.read(reallength)
1302 if closehandle:
1303 if closehandle:
1303 df.close()
1304 df.close()
1304 self._cachesegment(realoffset, d)
1305 self._cachesegment(realoffset, d)
1305 if offset != realoffset or reallength != length:
1306 if offset != realoffset or reallength != length:
1306 return util.buffer(d, offset - realoffset, length)
1307 return util.buffer(d, offset - realoffset, length)
1307 return d
1308 return d
1308
1309
1309 def _getsegment(self, offset, length, df=None):
1310 def _getsegment(self, offset, length, df=None):
1310 """Obtain a segment of raw data from the revlog.
1311 """Obtain a segment of raw data from the revlog.
1311
1312
1312 Accepts an absolute offset, length of bytes to obtain, and an
1313 Accepts an absolute offset, length of bytes to obtain, and an
1313 optional file handle to the already-opened revlog. If the file
1314 optional file handle to the already-opened revlog. If the file
1314 handle is used, it's original seek position will not be preserved.
1315 handle is used, it's original seek position will not be preserved.
1315
1316
1316 Requests for data may be returned from a cache.
1317 Requests for data may be returned from a cache.
1317
1318
1318 Returns a str or a buffer instance of raw byte data.
1319 Returns a str or a buffer instance of raw byte data.
1319 """
1320 """
1320 o, d = self._chunkcache
1321 o, d = self._chunkcache
1321 l = len(d)
1322 l = len(d)
1322
1323
1323 # is it in the cache?
1324 # is it in the cache?
1324 cachestart = offset - o
1325 cachestart = offset - o
1325 cacheend = cachestart + length
1326 cacheend = cachestart + length
1326 if cachestart >= 0 and cacheend <= l:
1327 if cachestart >= 0 and cacheend <= l:
1327 if cachestart == 0 and cacheend == l:
1328 if cachestart == 0 and cacheend == l:
1328 return d # avoid a copy
1329 return d # avoid a copy
1329 return util.buffer(d, cachestart, cacheend - cachestart)
1330 return util.buffer(d, cachestart, cacheend - cachestart)
1330
1331
1331 return self._readsegment(offset, length, df=df)
1332 return self._readsegment(offset, length, df=df)
1332
1333
1333 def _getsegmentforrevs(self, startrev, endrev, df=None):
1334 def _getsegmentforrevs(self, startrev, endrev, df=None):
1334 """Obtain a segment of raw data corresponding to a range of revisions.
1335 """Obtain a segment of raw data corresponding to a range of revisions.
1335
1336
1336 Accepts the start and end revisions and an optional already-open
1337 Accepts the start and end revisions and an optional already-open
1337 file handle to be used for reading. If the file handle is read, its
1338 file handle to be used for reading. If the file handle is read, its
1338 seek position will not be preserved.
1339 seek position will not be preserved.
1339
1340
1340 Requests for data may be satisfied by a cache.
1341 Requests for data may be satisfied by a cache.
1341
1342
1342 Returns a 2-tuple of (offset, data) for the requested range of
1343 Returns a 2-tuple of (offset, data) for the requested range of
1343 revisions. Offset is the integer offset from the beginning of the
1344 revisions. Offset is the integer offset from the beginning of the
1344 revlog and data is a str or buffer of the raw byte data.
1345 revlog and data is a str or buffer of the raw byte data.
1345
1346
1346 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1347 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1347 to determine where each revision's data begins and ends.
1348 to determine where each revision's data begins and ends.
1348 """
1349 """
1349 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1350 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1350 # (functions are expensive).
1351 # (functions are expensive).
1351 index = self.index
1352 index = self.index
1352 istart = index[startrev]
1353 istart = index[startrev]
1353 start = int(istart[0] >> 16)
1354 start = int(istart[0] >> 16)
1354 if startrev == endrev:
1355 if startrev == endrev:
1355 end = start + istart[1]
1356 end = start + istart[1]
1356 else:
1357 else:
1357 iend = index[endrev]
1358 iend = index[endrev]
1358 end = int(iend[0] >> 16) + iend[1]
1359 end = int(iend[0] >> 16) + iend[1]
1359
1360
1360 if self._inline:
1361 if self._inline:
1361 start += (startrev + 1) * self._io.size
1362 start += (startrev + 1) * self._io.size
1362 end += (endrev + 1) * self._io.size
1363 end += (endrev + 1) * self._io.size
1363 length = end - start
1364 length = end - start
1364
1365
1365 return start, self._getsegment(start, length, df=df)
1366 return start, self._getsegment(start, length, df=df)
1366
1367
1367 def _chunk(self, rev, df=None):
1368 def _chunk(self, rev, df=None):
1368 """Obtain a single decompressed chunk for a revision.
1369 """Obtain a single decompressed chunk for a revision.
1369
1370
1370 Accepts an integer revision and an optional already-open file handle
1371 Accepts an integer revision and an optional already-open file handle
1371 to be used for reading. If used, the seek position of the file will not
1372 to be used for reading. If used, the seek position of the file will not
1372 be preserved.
1373 be preserved.
1373
1374
1374 Returns a str holding uncompressed data for the requested revision.
1375 Returns a str holding uncompressed data for the requested revision.
1375 """
1376 """
1376 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1377 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1377
1378
1378 def _chunks(self, revs, df=None):
1379 def _chunks(self, revs, df=None):
1379 """Obtain decompressed chunks for the specified revisions.
1380 """Obtain decompressed chunks for the specified revisions.
1380
1381
1381 Accepts an iterable of numeric revisions that are assumed to be in
1382 Accepts an iterable of numeric revisions that are assumed to be in
1382 ascending order. Also accepts an optional already-open file handle
1383 ascending order. Also accepts an optional already-open file handle
1383 to be used for reading. If used, the seek position of the file will
1384 to be used for reading. If used, the seek position of the file will
1384 not be preserved.
1385 not be preserved.
1385
1386
1386 This function is similar to calling ``self._chunk()`` multiple times,
1387 This function is similar to calling ``self._chunk()`` multiple times,
1387 but is faster.
1388 but is faster.
1388
1389
1389 Returns a list with decompressed data for each requested revision.
1390 Returns a list with decompressed data for each requested revision.
1390 """
1391 """
1391 if not revs:
1392 if not revs:
1392 return []
1393 return []
1393 start = self.start
1394 start = self.start
1394 length = self.length
1395 length = self.length
1395 inline = self._inline
1396 inline = self._inline
1396 iosize = self._io.size
1397 iosize = self._io.size
1397 buffer = util.buffer
1398 buffer = util.buffer
1398
1399
1399 l = []
1400 l = []
1400 ladd = l.append
1401 ladd = l.append
1401
1402
1402 if not self._withsparseread:
1403 if not self._withsparseread:
1403 slicedchunks = (revs,)
1404 slicedchunks = (revs,)
1404 else:
1405 else:
1405 slicedchunks = _slicechunk(self, revs)
1406 slicedchunks = _slicechunk(self, revs)
1406
1407
1407 for revschunk in slicedchunks:
1408 for revschunk in slicedchunks:
1408 firstrev = revschunk[0]
1409 firstrev = revschunk[0]
1409 # Skip trailing revisions with empty diff
1410 # Skip trailing revisions with empty diff
1410 for lastrev in revschunk[::-1]:
1411 for lastrev in revschunk[::-1]:
1411 if length(lastrev) != 0:
1412 if length(lastrev) != 0:
1412 break
1413 break
1413
1414
1414 try:
1415 try:
1415 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1416 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1416 except OverflowError:
1417 except OverflowError:
1417 # issue4215 - we can't cache a run of chunks greater than
1418 # issue4215 - we can't cache a run of chunks greater than
1418 # 2G on Windows
1419 # 2G on Windows
1419 return [self._chunk(rev, df=df) for rev in revschunk]
1420 return [self._chunk(rev, df=df) for rev in revschunk]
1420
1421
1421 decomp = self.decompress
1422 decomp = self.decompress
1422 for rev in revschunk:
1423 for rev in revschunk:
1423 chunkstart = start(rev)
1424 chunkstart = start(rev)
1424 if inline:
1425 if inline:
1425 chunkstart += (rev + 1) * iosize
1426 chunkstart += (rev + 1) * iosize
1426 chunklength = length(rev)
1427 chunklength = length(rev)
1427 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1428 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1428
1429
1429 return l
1430 return l
1430
1431
1431 def _chunkclear(self):
1432 def _chunkclear(self):
1432 """Clear the raw chunk cache."""
1433 """Clear the raw chunk cache."""
1433 self._chunkcache = (0, '')
1434 self._chunkcache = (0, '')
1434
1435
1435 def deltaparent(self, rev):
1436 def deltaparent(self, rev):
1436 """return deltaparent of the given revision"""
1437 """return deltaparent of the given revision"""
1437 base = self.index[rev][3]
1438 base = self.index[rev][3]
1438 if base == rev:
1439 if base == rev:
1439 return nullrev
1440 return nullrev
1440 elif self._generaldelta:
1441 elif self._generaldelta:
1441 return base
1442 return base
1442 else:
1443 else:
1443 return rev - 1
1444 return rev - 1
1444
1445
1445 def revdiff(self, rev1, rev2):
1446 def revdiff(self, rev1, rev2):
1446 """return or calculate a delta between two revisions
1447 """return or calculate a delta between two revisions
1447
1448
1448 The delta calculated is in binary form and is intended to be written to
1449 The delta calculated is in binary form and is intended to be written to
1449 revlog data directly. So this function needs raw revision data.
1450 revlog data directly. So this function needs raw revision data.
1450 """
1451 """
1451 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1452 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1452 return bytes(self._chunk(rev2))
1453 return bytes(self._chunk(rev2))
1453
1454
1454 return mdiff.textdiff(self.revision(rev1, raw=True),
1455 return mdiff.textdiff(self.revision(rev1, raw=True),
1455 self.revision(rev2, raw=True))
1456 self.revision(rev2, raw=True))
1456
1457
1457 def revision(self, nodeorrev, _df=None, raw=False):
1458 def revision(self, nodeorrev, _df=None, raw=False):
1458 """return an uncompressed revision of a given node or revision
1459 """return an uncompressed revision of a given node or revision
1459 number.
1460 number.
1460
1461
1461 _df - an existing file handle to read from. (internal-only)
1462 _df - an existing file handle to read from. (internal-only)
1462 raw - an optional argument specifying if the revision data is to be
1463 raw - an optional argument specifying if the revision data is to be
1463 treated as raw data when applying flag transforms. 'raw' should be set
1464 treated as raw data when applying flag transforms. 'raw' should be set
1464 to True when generating changegroups or in debug commands.
1465 to True when generating changegroups or in debug commands.
1465 """
1466 """
1466 if isinstance(nodeorrev, int):
1467 if isinstance(nodeorrev, int):
1467 rev = nodeorrev
1468 rev = nodeorrev
1468 node = self.node(rev)
1469 node = self.node(rev)
1469 else:
1470 else:
1470 node = nodeorrev
1471 node = nodeorrev
1471 rev = None
1472 rev = None
1472
1473
1473 cachedrev = None
1474 cachedrev = None
1474 flags = None
1475 flags = None
1475 rawtext = None
1476 rawtext = None
1476 if node == nullid:
1477 if node == nullid:
1477 return ""
1478 return ""
1478 if self._cache:
1479 if self._cache:
1479 if self._cache[0] == node:
1480 if self._cache[0] == node:
1480 # _cache only stores rawtext
1481 # _cache only stores rawtext
1481 if raw:
1482 if raw:
1482 return self._cache[2]
1483 return self._cache[2]
1483 # duplicated, but good for perf
1484 # duplicated, but good for perf
1484 if rev is None:
1485 if rev is None:
1485 rev = self.rev(node)
1486 rev = self.rev(node)
1486 if flags is None:
1487 if flags is None:
1487 flags = self.flags(rev)
1488 flags = self.flags(rev)
1488 # no extra flags set, no flag processor runs, text = rawtext
1489 # no extra flags set, no flag processor runs, text = rawtext
1489 if flags == REVIDX_DEFAULT_FLAGS:
1490 if flags == REVIDX_DEFAULT_FLAGS:
1490 return self._cache[2]
1491 return self._cache[2]
1491 # rawtext is reusable. need to run flag processor
1492 # rawtext is reusable. need to run flag processor
1492 rawtext = self._cache[2]
1493 rawtext = self._cache[2]
1493
1494
1494 cachedrev = self._cache[1]
1495 cachedrev = self._cache[1]
1495
1496
1496 # look up what we need to read
1497 # look up what we need to read
1497 if rawtext is None:
1498 if rawtext is None:
1498 if rev is None:
1499 if rev is None:
1499 rev = self.rev(node)
1500 rev = self.rev(node)
1500
1501
1501 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1502 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1502 if stopped:
1503 if stopped:
1503 rawtext = self._cache[2]
1504 rawtext = self._cache[2]
1504
1505
1505 # drop cache to save memory
1506 # drop cache to save memory
1506 self._cache = None
1507 self._cache = None
1507
1508
1508 bins = self._chunks(chain, df=_df)
1509 bins = self._chunks(chain, df=_df)
1509 if rawtext is None:
1510 if rawtext is None:
1510 rawtext = bytes(bins[0])
1511 rawtext = bytes(bins[0])
1511 bins = bins[1:]
1512 bins = bins[1:]
1512
1513
1513 rawtext = mdiff.patches(rawtext, bins)
1514 rawtext = mdiff.patches(rawtext, bins)
1514 self._cache = (node, rev, rawtext)
1515 self._cache = (node, rev, rawtext)
1515
1516
1516 if flags is None:
1517 if flags is None:
1517 if rev is None:
1518 if rev is None:
1518 rev = self.rev(node)
1519 rev = self.rev(node)
1519 flags = self.flags(rev)
1520 flags = self.flags(rev)
1520
1521
1521 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1522 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1522 if validatehash:
1523 if validatehash:
1523 self.checkhash(text, node, rev=rev)
1524 self.checkhash(text, node, rev=rev)
1524
1525
1525 return text
1526 return text
1526
1527
1527 def hash(self, text, p1, p2):
1528 def hash(self, text, p1, p2):
1528 """Compute a node hash.
1529 """Compute a node hash.
1529
1530
1530 Available as a function so that subclasses can replace the hash
1531 Available as a function so that subclasses can replace the hash
1531 as needed.
1532 as needed.
1532 """
1533 """
1533 return hash(text, p1, p2)
1534 return hash(text, p1, p2)
1534
1535
1535 def _processflags(self, text, flags, operation, raw=False):
1536 def _processflags(self, text, flags, operation, raw=False):
1536 """Inspect revision data flags and applies transforms defined by
1537 """Inspect revision data flags and applies transforms defined by
1537 registered flag processors.
1538 registered flag processors.
1538
1539
1539 ``text`` - the revision data to process
1540 ``text`` - the revision data to process
1540 ``flags`` - the revision flags
1541 ``flags`` - the revision flags
1541 ``operation`` - the operation being performed (read or write)
1542 ``operation`` - the operation being performed (read or write)
1542 ``raw`` - an optional argument describing if the raw transform should be
1543 ``raw`` - an optional argument describing if the raw transform should be
1543 applied.
1544 applied.
1544
1545
1545 This method processes the flags in the order (or reverse order if
1546 This method processes the flags in the order (or reverse order if
1546 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1547 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1547 flag processors registered for present flags. The order of flags defined
1548 flag processors registered for present flags. The order of flags defined
1548 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1549 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1549
1550
1550 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1551 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1551 processed text and ``validatehash`` is a bool indicating whether the
1552 processed text and ``validatehash`` is a bool indicating whether the
1552 returned text should be checked for hash integrity.
1553 returned text should be checked for hash integrity.
1553
1554
1554 Note: If the ``raw`` argument is set, it has precedence over the
1555 Note: If the ``raw`` argument is set, it has precedence over the
1555 operation and will only update the value of ``validatehash``.
1556 operation and will only update the value of ``validatehash``.
1556 """
1557 """
1557 # fast path: no flag processors will run
1558 # fast path: no flag processors will run
1558 if flags == 0:
1559 if flags == 0:
1559 return text, True
1560 return text, True
1560 if not operation in ('read', 'write'):
1561 if not operation in ('read', 'write'):
1561 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1562 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1562 # Check all flags are known.
1563 # Check all flags are known.
1563 if flags & ~REVIDX_KNOWN_FLAGS:
1564 if flags & ~REVIDX_KNOWN_FLAGS:
1564 raise RevlogError(_("incompatible revision flag '%#x'") %
1565 raise RevlogError(_("incompatible revision flag '%#x'") %
1565 (flags & ~REVIDX_KNOWN_FLAGS))
1566 (flags & ~REVIDX_KNOWN_FLAGS))
1566 validatehash = True
1567 validatehash = True
1567 # Depending on the operation (read or write), the order might be
1568 # Depending on the operation (read or write), the order might be
1568 # reversed due to non-commutative transforms.
1569 # reversed due to non-commutative transforms.
1569 orderedflags = REVIDX_FLAGS_ORDER
1570 orderedflags = REVIDX_FLAGS_ORDER
1570 if operation == 'write':
1571 if operation == 'write':
1571 orderedflags = reversed(orderedflags)
1572 orderedflags = reversed(orderedflags)
1572
1573
1573 for flag in orderedflags:
1574 for flag in orderedflags:
1574 # If a flagprocessor has been registered for a known flag, apply the
1575 # If a flagprocessor has been registered for a known flag, apply the
1575 # related operation transform and update result tuple.
1576 # related operation transform and update result tuple.
1576 if flag & flags:
1577 if flag & flags:
1577 vhash = True
1578 vhash = True
1578
1579
1579 if flag not in _flagprocessors:
1580 if flag not in _flagprocessors:
1580 message = _("missing processor for flag '%#x'") % (flag)
1581 message = _("missing processor for flag '%#x'") % (flag)
1581 raise RevlogError(message)
1582 raise RevlogError(message)
1582
1583
1583 processor = _flagprocessors[flag]
1584 processor = _flagprocessors[flag]
1584 if processor is not None:
1585 if processor is not None:
1585 readtransform, writetransform, rawtransform = processor
1586 readtransform, writetransform, rawtransform = processor
1586
1587
1587 if raw:
1588 if raw:
1588 vhash = rawtransform(self, text)
1589 vhash = rawtransform(self, text)
1589 elif operation == 'read':
1590 elif operation == 'read':
1590 text, vhash = readtransform(self, text)
1591 text, vhash = readtransform(self, text)
1591 else: # write operation
1592 else: # write operation
1592 text, vhash = writetransform(self, text)
1593 text, vhash = writetransform(self, text)
1593 validatehash = validatehash and vhash
1594 validatehash = validatehash and vhash
1594
1595
1595 return text, validatehash
1596 return text, validatehash
1596
1597
1597 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1598 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1598 """Check node hash integrity.
1599 """Check node hash integrity.
1599
1600
1600 Available as a function so that subclasses can extend hash mismatch
1601 Available as a function so that subclasses can extend hash mismatch
1601 behaviors as needed.
1602 behaviors as needed.
1602 """
1603 """
1603 if p1 is None and p2 is None:
1604 if p1 is None and p2 is None:
1604 p1, p2 = self.parents(node)
1605 p1, p2 = self.parents(node)
1605 if node != self.hash(text, p1, p2):
1606 if node != self.hash(text, p1, p2):
1606 revornode = rev
1607 revornode = rev
1607 if revornode is None:
1608 if revornode is None:
1608 revornode = templatefilters.short(hex(node))
1609 revornode = templatefilters.short(hex(node))
1609 raise RevlogError(_("integrity check failed on %s:%s")
1610 raise RevlogError(_("integrity check failed on %s:%s")
1610 % (self.indexfile, pycompat.bytestr(revornode)))
1611 % (self.indexfile, pycompat.bytestr(revornode)))
1611
1612
1612 def checkinlinesize(self, tr, fp=None):
1613 def checkinlinesize(self, tr, fp=None):
1613 """Check if the revlog is too big for inline and convert if so.
1614 """Check if the revlog is too big for inline and convert if so.
1614
1615
1615 This should be called after revisions are added to the revlog. If the
1616 This should be called after revisions are added to the revlog. If the
1616 revlog has grown too large to be an inline revlog, it will convert it
1617 revlog has grown too large to be an inline revlog, it will convert it
1617 to use multiple index and data files.
1618 to use multiple index and data files.
1618 """
1619 """
1619 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1620 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1620 return
1621 return
1621
1622
1622 trinfo = tr.find(self.indexfile)
1623 trinfo = tr.find(self.indexfile)
1623 if trinfo is None:
1624 if trinfo is None:
1624 raise RevlogError(_("%s not found in the transaction")
1625 raise RevlogError(_("%s not found in the transaction")
1625 % self.indexfile)
1626 % self.indexfile)
1626
1627
1627 trindex = trinfo[2]
1628 trindex = trinfo[2]
1628 if trindex is not None:
1629 if trindex is not None:
1629 dataoff = self.start(trindex)
1630 dataoff = self.start(trindex)
1630 else:
1631 else:
1631 # revlog was stripped at start of transaction, use all leftover data
1632 # revlog was stripped at start of transaction, use all leftover data
1632 trindex = len(self) - 1
1633 trindex = len(self) - 1
1633 dataoff = self.end(-2)
1634 dataoff = self.end(-2)
1634
1635
1635 tr.add(self.datafile, dataoff)
1636 tr.add(self.datafile, dataoff)
1636
1637
1637 if fp:
1638 if fp:
1638 fp.flush()
1639 fp.flush()
1639 fp.close()
1640 fp.close()
1640
1641
1641 df = self.opener(self.datafile, 'w')
1642 df = self.opener(self.datafile, 'w')
1642 try:
1643 try:
1643 for r in self:
1644 for r in self:
1644 df.write(self._getsegmentforrevs(r, r)[1])
1645 df.write(self._getsegmentforrevs(r, r)[1])
1645 finally:
1646 finally:
1646 df.close()
1647 df.close()
1647
1648
1648 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1649 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1649 checkambig=self._checkambig)
1650 checkambig=self._checkambig)
1650 self.version &= ~FLAG_INLINE_DATA
1651 self.version &= ~FLAG_INLINE_DATA
1651 self._inline = False
1652 self._inline = False
1652 for i in self:
1653 for i in self:
1653 e = self._io.packentry(self.index[i], self.node, self.version, i)
1654 e = self._io.packentry(self.index[i], self.node, self.version, i)
1654 fp.write(e)
1655 fp.write(e)
1655
1656
1656 # if we don't call close, the temp file will never replace the
1657 # if we don't call close, the temp file will never replace the
1657 # real index
1658 # real index
1658 fp.close()
1659 fp.close()
1659
1660
1660 tr.replace(self.indexfile, trindex * self._io.size)
1661 tr.replace(self.indexfile, trindex * self._io.size)
1661 self._chunkclear()
1662 self._chunkclear()
1662
1663
1663 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1664 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1664 node=None, flags=REVIDX_DEFAULT_FLAGS):
1665 node=None, flags=REVIDX_DEFAULT_FLAGS):
1665 """add a revision to the log
1666 """add a revision to the log
1666
1667
1667 text - the revision data to add
1668 text - the revision data to add
1668 transaction - the transaction object used for rollback
1669 transaction - the transaction object used for rollback
1669 link - the linkrev data to add
1670 link - the linkrev data to add
1670 p1, p2 - the parent nodeids of the revision
1671 p1, p2 - the parent nodeids of the revision
1671 cachedelta - an optional precomputed delta
1672 cachedelta - an optional precomputed delta
1672 node - nodeid of revision; typically node is not specified, and it is
1673 node - nodeid of revision; typically node is not specified, and it is
1673 computed by default as hash(text, p1, p2), however subclasses might
1674 computed by default as hash(text, p1, p2), however subclasses might
1674 use different hashing method (and override checkhash() in such case)
1675 use different hashing method (and override checkhash() in such case)
1675 flags - the known flags to set on the revision
1676 flags - the known flags to set on the revision
1676 """
1677 """
1677 if link == nullrev:
1678 if link == nullrev:
1678 raise RevlogError(_("attempted to add linkrev -1 to %s")
1679 raise RevlogError(_("attempted to add linkrev -1 to %s")
1679 % self.indexfile)
1680 % self.indexfile)
1680
1681
1681 if flags:
1682 if flags:
1682 node = node or self.hash(text, p1, p2)
1683 node = node or self.hash(text, p1, p2)
1683
1684
1684 rawtext, validatehash = self._processflags(text, flags, 'write')
1685 rawtext, validatehash = self._processflags(text, flags, 'write')
1685
1686
1686 # If the flag processor modifies the revision data, ignore any provided
1687 # If the flag processor modifies the revision data, ignore any provided
1687 # cachedelta.
1688 # cachedelta.
1688 if rawtext != text:
1689 if rawtext != text:
1689 cachedelta = None
1690 cachedelta = None
1690
1691
1691 if len(rawtext) > _maxentrysize:
1692 if len(rawtext) > _maxentrysize:
1692 raise RevlogError(
1693 raise RevlogError(
1693 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1694 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1694 % (self.indexfile, len(rawtext)))
1695 % (self.indexfile, len(rawtext)))
1695
1696
1696 node = node or self.hash(rawtext, p1, p2)
1697 node = node or self.hash(rawtext, p1, p2)
1697 if node in self.nodemap:
1698 if node in self.nodemap:
1698 return node
1699 return node
1699
1700
1700 if validatehash:
1701 if validatehash:
1701 self.checkhash(rawtext, node, p1=p1, p2=p2)
1702 self.checkhash(rawtext, node, p1=p1, p2=p2)
1702
1703
1703 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1704 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1704 flags, cachedelta=cachedelta)
1705 flags, cachedelta=cachedelta)
1705
1706
1706 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1707 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1707 cachedelta=None):
1708 cachedelta=None):
1708 """add a raw revision with known flags, node and parents
1709 """add a raw revision with known flags, node and parents
1709 useful when reusing a revision not stored in this revlog (ex: received
1710 useful when reusing a revision not stored in this revlog (ex: received
1710 over wire, or read from an external bundle).
1711 over wire, or read from an external bundle).
1711 """
1712 """
1712 dfh = None
1713 dfh = None
1713 if not self._inline:
1714 if not self._inline:
1714 dfh = self.opener(self.datafile, "a+")
1715 dfh = self.opener(self.datafile, "a+")
1715 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1716 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1716 try:
1717 try:
1717 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1718 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1718 flags, cachedelta, ifh, dfh)
1719 flags, cachedelta, ifh, dfh)
1719 finally:
1720 finally:
1720 if dfh:
1721 if dfh:
1721 dfh.close()
1722 dfh.close()
1722 ifh.close()
1723 ifh.close()
1723
1724
1724 def compress(self, data):
1725 def compress(self, data):
1725 """Generate a possibly-compressed representation of data."""
1726 """Generate a possibly-compressed representation of data."""
1726 if not data:
1727 if not data:
1727 return '', data
1728 return '', data
1728
1729
1729 compressed = self._compressor.compress(data)
1730 compressed = self._compressor.compress(data)
1730
1731
1731 if compressed:
1732 if compressed:
1732 # The revlog compressor added the header in the returned data.
1733 # The revlog compressor added the header in the returned data.
1733 return '', compressed
1734 return '', compressed
1734
1735
1735 if data[0:1] == '\0':
1736 if data[0:1] == '\0':
1736 return '', data
1737 return '', data
1737 return 'u', data
1738 return 'u', data
1738
1739
1739 def decompress(self, data):
1740 def decompress(self, data):
1740 """Decompress a revlog chunk.
1741 """Decompress a revlog chunk.
1741
1742
1742 The chunk is expected to begin with a header identifying the
1743 The chunk is expected to begin with a header identifying the
1743 format type so it can be routed to an appropriate decompressor.
1744 format type so it can be routed to an appropriate decompressor.
1744 """
1745 """
1745 if not data:
1746 if not data:
1746 return data
1747 return data
1747
1748
1748 # Revlogs are read much more frequently than they are written and many
1749 # Revlogs are read much more frequently than they are written and many
1749 # chunks only take microseconds to decompress, so performance is
1750 # chunks only take microseconds to decompress, so performance is
1750 # important here.
1751 # important here.
1751 #
1752 #
1752 # We can make a few assumptions about revlogs:
1753 # We can make a few assumptions about revlogs:
1753 #
1754 #
1754 # 1) the majority of chunks will be compressed (as opposed to inline
1755 # 1) the majority of chunks will be compressed (as opposed to inline
1755 # raw data).
1756 # raw data).
1756 # 2) decompressing *any* data will likely by at least 10x slower than
1757 # 2) decompressing *any* data will likely by at least 10x slower than
1757 # returning raw inline data.
1758 # returning raw inline data.
1758 # 3) we want to prioritize common and officially supported compression
1759 # 3) we want to prioritize common and officially supported compression
1759 # engines
1760 # engines
1760 #
1761 #
1761 # It follows that we want to optimize for "decompress compressed data
1762 # It follows that we want to optimize for "decompress compressed data
1762 # when encoded with common and officially supported compression engines"
1763 # when encoded with common and officially supported compression engines"
1763 # case over "raw data" and "data encoded by less common or non-official
1764 # case over "raw data" and "data encoded by less common or non-official
1764 # compression engines." That is why we have the inline lookup first
1765 # compression engines." That is why we have the inline lookup first
1765 # followed by the compengines lookup.
1766 # followed by the compengines lookup.
1766 #
1767 #
1767 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1768 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1768 # compressed chunks. And this matters for changelog and manifest reads.
1769 # compressed chunks. And this matters for changelog and manifest reads.
1769 t = data[0:1]
1770 t = data[0:1]
1770
1771
1771 if t == 'x':
1772 if t == 'x':
1772 try:
1773 try:
1773 return _zlibdecompress(data)
1774 return _zlibdecompress(data)
1774 except zlib.error as e:
1775 except zlib.error as e:
1775 raise RevlogError(_('revlog decompress error: %s') % str(e))
1776 raise RevlogError(_('revlog decompress error: %s') % str(e))
1776 # '\0' is more common than 'u' so it goes first.
1777 # '\0' is more common than 'u' so it goes first.
1777 elif t == '\0':
1778 elif t == '\0':
1778 return data
1779 return data
1779 elif t == 'u':
1780 elif t == 'u':
1780 return util.buffer(data, 1)
1781 return util.buffer(data, 1)
1781
1782
1782 try:
1783 try:
1783 compressor = self._decompressors[t]
1784 compressor = self._decompressors[t]
1784 except KeyError:
1785 except KeyError:
1785 try:
1786 try:
1786 engine = util.compengines.forrevlogheader(t)
1787 engine = util.compengines.forrevlogheader(t)
1787 compressor = engine.revlogcompressor()
1788 compressor = engine.revlogcompressor()
1788 self._decompressors[t] = compressor
1789 self._decompressors[t] = compressor
1789 except KeyError:
1790 except KeyError:
1790 raise RevlogError(_('unknown compression type %r') % t)
1791 raise RevlogError(_('unknown compression type %r') % t)
1791
1792
1792 return compressor.decompress(data)
1793 return compressor.decompress(data)
1793
1794
1794 def _isgooddelta(self, d, textlen):
1795 def _isgooddelta(self, d, textlen):
1795 """Returns True if the given delta is good. Good means that it is within
1796 """Returns True if the given delta is good. Good means that it is within
1796 the disk span, disk size, and chain length bounds that we know to be
1797 the disk span, disk size, and chain length bounds that we know to be
1797 performant."""
1798 performant."""
1798 if d is None:
1799 if d is None:
1799 return False
1800 return False
1800
1801
1801 # - 'dist' is the distance from the base revision -- bounding it limits
1802 # - 'dist' is the distance from the base revision -- bounding it limits
1802 # the amount of I/O we need to do.
1803 # the amount of I/O we need to do.
1803 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1804 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1804 # to apply -- bounding it limits the amount of CPU we consume.
1805 # to apply -- bounding it limits the amount of CPU we consume.
1805 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1806 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1806
1807
1807 defaultmax = textlen * 4
1808 defaultmax = textlen * 4
1808 maxdist = self._maxdeltachainspan
1809 maxdist = self._maxdeltachainspan
1809 if not maxdist:
1810 if not maxdist:
1810 maxdist = dist # ensure the conditional pass
1811 maxdist = dist # ensure the conditional pass
1811 maxdist = max(maxdist, defaultmax)
1812 maxdist = max(maxdist, defaultmax)
1812 if (dist > maxdist or l > textlen or
1813 if (dist > maxdist or l > textlen or
1813 compresseddeltalen > textlen * 2 or
1814 compresseddeltalen > textlen * 2 or
1814 (self._maxchainlen and chainlen > self._maxchainlen)):
1815 (self._maxchainlen and chainlen > self._maxchainlen)):
1815 return False
1816 return False
1816
1817
1817 return True
1818 return True
1818
1819
1819 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1820 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1820 cachedelta, ifh, dfh, alwayscache=False):
1821 cachedelta, ifh, dfh, alwayscache=False):
1821 """internal function to add revisions to the log
1822 """internal function to add revisions to the log
1822
1823
1823 see addrevision for argument descriptions.
1824 see addrevision for argument descriptions.
1824
1825
1825 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1826 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1826
1827
1827 invariants:
1828 invariants:
1828 - rawtext is optional (can be None); if not set, cachedelta must be set.
1829 - rawtext is optional (can be None); if not set, cachedelta must be set.
1829 if both are set, they must correspond to each other.
1830 if both are set, they must correspond to each other.
1830 """
1831 """
1831 if node == nullid:
1832 if node == nullid:
1832 raise RevlogError(_("%s: attempt to add null revision") %
1833 raise RevlogError(_("%s: attempt to add null revision") %
1833 (self.indexfile))
1834 (self.indexfile))
1834 if node == wdirid:
1835 if node == wdirid:
1835 raise RevlogError(_("%s: attempt to add wdir revision") %
1836 raise RevlogError(_("%s: attempt to add wdir revision") %
1836 (self.indexfile))
1837 (self.indexfile))
1837
1838
1838 btext = [rawtext]
1839 btext = [rawtext]
1839 def buildtext():
1840 def buildtext():
1840 if btext[0] is not None:
1841 if btext[0] is not None:
1841 return btext[0]
1842 return btext[0]
1842 baserev = cachedelta[0]
1843 baserev = cachedelta[0]
1843 delta = cachedelta[1]
1844 delta = cachedelta[1]
1844 # special case deltas which replace entire base; no need to decode
1845 # special case deltas which replace entire base; no need to decode
1845 # base revision. this neatly avoids censored bases, which throw when
1846 # base revision. this neatly avoids censored bases, which throw when
1846 # they're decoded.
1847 # they're decoded.
1847 hlen = struct.calcsize(">lll")
1848 hlen = struct.calcsize(">lll")
1848 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1849 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1849 len(delta) - hlen):
1850 len(delta) - hlen):
1850 btext[0] = delta[hlen:]
1851 btext[0] = delta[hlen:]
1851 else:
1852 else:
1852 if self._inline:
1853 if self._inline:
1853 fh = ifh
1854 fh = ifh
1854 else:
1855 else:
1855 fh = dfh
1856 fh = dfh
1856 basetext = self.revision(baserev, _df=fh, raw=True)
1857 basetext = self.revision(baserev, _df=fh, raw=True)
1857 btext[0] = mdiff.patch(basetext, delta)
1858 btext[0] = mdiff.patch(basetext, delta)
1858
1859
1859 try:
1860 try:
1860 res = self._processflags(btext[0], flags, 'read', raw=True)
1861 res = self._processflags(btext[0], flags, 'read', raw=True)
1861 btext[0], validatehash = res
1862 btext[0], validatehash = res
1862 if validatehash:
1863 if validatehash:
1863 self.checkhash(btext[0], node, p1=p1, p2=p2)
1864 self.checkhash(btext[0], node, p1=p1, p2=p2)
1864 if flags & REVIDX_ISCENSORED:
1865 if flags & REVIDX_ISCENSORED:
1865 raise RevlogError(_('node %s is not censored') % node)
1866 raise RevlogError(_('node %s is not censored') % node)
1866 except CensoredNodeError:
1867 except CensoredNodeError:
1867 # must pass the censored index flag to add censored revisions
1868 # must pass the censored index flag to add censored revisions
1868 if not flags & REVIDX_ISCENSORED:
1869 if not flags & REVIDX_ISCENSORED:
1869 raise
1870 raise
1870 return btext[0]
1871 return btext[0]
1871
1872
1872 def builddelta(rev):
1873 def builddelta(rev):
1873 # can we use the cached delta?
1874 # can we use the cached delta?
1874 if cachedelta and cachedelta[0] == rev:
1875 if cachedelta and cachedelta[0] == rev:
1875 delta = cachedelta[1]
1876 delta = cachedelta[1]
1876 else:
1877 else:
1877 t = buildtext()
1878 t = buildtext()
1878 if self.iscensored(rev):
1879 if self.iscensored(rev):
1879 # deltas based on a censored revision must replace the
1880 # deltas based on a censored revision must replace the
1880 # full content in one patch, so delta works everywhere
1881 # full content in one patch, so delta works everywhere
1881 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1882 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1882 delta = header + t
1883 delta = header + t
1883 else:
1884 else:
1884 if self._inline:
1885 if self._inline:
1885 fh = ifh
1886 fh = ifh
1886 else:
1887 else:
1887 fh = dfh
1888 fh = dfh
1888 ptext = self.revision(rev, _df=fh, raw=True)
1889 ptext = self.revision(rev, _df=fh, raw=True)
1889 delta = mdiff.textdiff(ptext, t)
1890 delta = mdiff.textdiff(ptext, t)
1890 header, data = self.compress(delta)
1891 header, data = self.compress(delta)
1891 deltalen = len(header) + len(data)
1892 deltalen = len(header) + len(data)
1892 chainbase = self.chainbase(rev)
1893 chainbase = self.chainbase(rev)
1893 dist = deltalen + offset - self.start(chainbase)
1894 dist = deltalen + offset - self.start(chainbase)
1894 if self._generaldelta:
1895 if self._generaldelta:
1895 base = rev
1896 base = rev
1896 else:
1897 else:
1897 base = chainbase
1898 base = chainbase
1898 chainlen, compresseddeltalen = self._chaininfo(rev)
1899 chainlen, compresseddeltalen = self._chaininfo(rev)
1899 chainlen += 1
1900 chainlen += 1
1900 compresseddeltalen += deltalen
1901 compresseddeltalen += deltalen
1901 return (dist, deltalen, (header, data), base,
1902 return (dist, deltalen, (header, data), base,
1902 chainbase, chainlen, compresseddeltalen)
1903 chainbase, chainlen, compresseddeltalen)
1903
1904
1904 curr = len(self)
1905 curr = len(self)
1905 prev = curr - 1
1906 prev = curr - 1
1906 offset = self.end(prev)
1907 offset = self.end(prev)
1907 delta = None
1908 delta = None
1908 p1r, p2r = self.rev(p1), self.rev(p2)
1909 p1r, p2r = self.rev(p1), self.rev(p2)
1909
1910
1910 # full versions are inserted when the needed deltas
1911 # full versions are inserted when the needed deltas
1911 # become comparable to the uncompressed text
1912 # become comparable to the uncompressed text
1912 if rawtext is None:
1913 if rawtext is None:
1913 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1914 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1914 cachedelta[1])
1915 cachedelta[1])
1915 else:
1916 else:
1916 textlen = len(rawtext)
1917 textlen = len(rawtext)
1917
1918
1918 # should we try to build a delta?
1919 # should we try to build a delta?
1919 if prev != nullrev and self.storedeltachains:
1920 if prev != nullrev and self.storedeltachains:
1920 tested = set()
1921 tested = set()
1921 # This condition is true most of the time when processing
1922 # This condition is true most of the time when processing
1922 # changegroup data into a generaldelta repo. The only time it
1923 # changegroup data into a generaldelta repo. The only time it
1923 # isn't true is if this is the first revision in a delta chain
1924 # isn't true is if this is the first revision in a delta chain
1924 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1925 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1925 if cachedelta and self._generaldelta and self._lazydeltabase:
1926 if cachedelta and self._generaldelta and self._lazydeltabase:
1926 # Assume what we received from the server is a good choice
1927 # Assume what we received from the server is a good choice
1927 # build delta will reuse the cache
1928 # build delta will reuse the cache
1928 candidatedelta = builddelta(cachedelta[0])
1929 candidatedelta = builddelta(cachedelta[0])
1929 tested.add(cachedelta[0])
1930 tested.add(cachedelta[0])
1930 if self._isgooddelta(candidatedelta, textlen):
1931 if self._isgooddelta(candidatedelta, textlen):
1931 delta = candidatedelta
1932 delta = candidatedelta
1932 if delta is None and self._generaldelta:
1933 if delta is None and self._generaldelta:
1933 # exclude already lazy tested base if any
1934 # exclude already lazy tested base if any
1934 parents = [p for p in (p1r, p2r)
1935 parents = [p for p in (p1r, p2r)
1935 if p != nullrev and p not in tested]
1936 if p != nullrev and p not in tested]
1936 if parents and not self._aggressivemergedeltas:
1937 if parents and not self._aggressivemergedeltas:
1937 # Pick whichever parent is closer to us (to minimize the
1938 # Pick whichever parent is closer to us (to minimize the
1938 # chance of having to build a fulltext).
1939 # chance of having to build a fulltext).
1939 parents = [max(parents)]
1940 parents = [max(parents)]
1940 tested.update(parents)
1941 tested.update(parents)
1941 pdeltas = []
1942 pdeltas = []
1942 for p in parents:
1943 for p in parents:
1943 pd = builddelta(p)
1944 pd = builddelta(p)
1944 if self._isgooddelta(pd, textlen):
1945 if self._isgooddelta(pd, textlen):
1945 pdeltas.append(pd)
1946 pdeltas.append(pd)
1946 if pdeltas:
1947 if pdeltas:
1947 delta = min(pdeltas, key=lambda x: x[1])
1948 delta = min(pdeltas, key=lambda x: x[1])
1948 if delta is None and prev not in tested:
1949 if delta is None and prev not in tested:
1949 # other approach failed try against prev to hopefully save us a
1950 # other approach failed try against prev to hopefully save us a
1950 # fulltext.
1951 # fulltext.
1951 candidatedelta = builddelta(prev)
1952 candidatedelta = builddelta(prev)
1952 if self._isgooddelta(candidatedelta, textlen):
1953 if self._isgooddelta(candidatedelta, textlen):
1953 delta = candidatedelta
1954 delta = candidatedelta
1954 if delta is not None:
1955 if delta is not None:
1955 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1956 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1956 else:
1957 else:
1957 rawtext = buildtext()
1958 rawtext = buildtext()
1958 data = self.compress(rawtext)
1959 data = self.compress(rawtext)
1959 l = len(data[1]) + len(data[0])
1960 l = len(data[1]) + len(data[0])
1960 base = chainbase = curr
1961 base = chainbase = curr
1961
1962
1962 e = (offset_type(offset, flags), l, textlen,
1963 e = (offset_type(offset, flags), l, textlen,
1963 base, link, p1r, p2r, node)
1964 base, link, p1r, p2r, node)
1964 self.index.insert(-1, e)
1965 self.index.insert(-1, e)
1965 self.nodemap[node] = curr
1966 self.nodemap[node] = curr
1966
1967
1967 entry = self._io.packentry(e, self.node, self.version, curr)
1968 entry = self._io.packentry(e, self.node, self.version, curr)
1968 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1969 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1969
1970
1970 if alwayscache and rawtext is None:
1971 if alwayscache and rawtext is None:
1971 rawtext = buildtext()
1972 rawtext = buildtext()
1972
1973
1973 if type(rawtext) == str: # only accept immutable objects
1974 if type(rawtext) == str: # only accept immutable objects
1974 self._cache = (node, curr, rawtext)
1975 self._cache = (node, curr, rawtext)
1975 self._chainbasecache[curr] = chainbase
1976 self._chainbasecache[curr] = chainbase
1976 return node
1977 return node
1977
1978
1978 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1979 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1979 # Files opened in a+ mode have inconsistent behavior on various
1980 # Files opened in a+ mode have inconsistent behavior on various
1980 # platforms. Windows requires that a file positioning call be made
1981 # platforms. Windows requires that a file positioning call be made
1981 # when the file handle transitions between reads and writes. See
1982 # when the file handle transitions between reads and writes. See
1982 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1983 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1983 # platforms, Python or the platform itself can be buggy. Some versions
1984 # platforms, Python or the platform itself can be buggy. Some versions
1984 # of Solaris have been observed to not append at the end of the file
1985 # of Solaris have been observed to not append at the end of the file
1985 # if the file was seeked to before the end. See issue4943 for more.
1986 # if the file was seeked to before the end. See issue4943 for more.
1986 #
1987 #
1987 # We work around this issue by inserting a seek() before writing.
1988 # We work around this issue by inserting a seek() before writing.
1988 # Note: This is likely not necessary on Python 3.
1989 # Note: This is likely not necessary on Python 3.
1989 ifh.seek(0, os.SEEK_END)
1990 ifh.seek(0, os.SEEK_END)
1990 if dfh:
1991 if dfh:
1991 dfh.seek(0, os.SEEK_END)
1992 dfh.seek(0, os.SEEK_END)
1992
1993
1993 curr = len(self) - 1
1994 curr = len(self) - 1
1994 if not self._inline:
1995 if not self._inline:
1995 transaction.add(self.datafile, offset)
1996 transaction.add(self.datafile, offset)
1996 transaction.add(self.indexfile, curr * len(entry))
1997 transaction.add(self.indexfile, curr * len(entry))
1997 if data[0]:
1998 if data[0]:
1998 dfh.write(data[0])
1999 dfh.write(data[0])
1999 dfh.write(data[1])
2000 dfh.write(data[1])
2000 ifh.write(entry)
2001 ifh.write(entry)
2001 else:
2002 else:
2002 offset += curr * self._io.size
2003 offset += curr * self._io.size
2003 transaction.add(self.indexfile, offset, curr)
2004 transaction.add(self.indexfile, offset, curr)
2004 ifh.write(entry)
2005 ifh.write(entry)
2005 ifh.write(data[0])
2006 ifh.write(data[0])
2006 ifh.write(data[1])
2007 ifh.write(data[1])
2007 self.checkinlinesize(transaction, ifh)
2008 self.checkinlinesize(transaction, ifh)
2008
2009
2009 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2010 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2010 """
2011 """
2011 add a delta group
2012 add a delta group
2012
2013
2013 given a set of deltas, add them to the revision log. the
2014 given a set of deltas, add them to the revision log. the
2014 first delta is against its parent, which should be in our
2015 first delta is against its parent, which should be in our
2015 log, the rest are against the previous delta.
2016 log, the rest are against the previous delta.
2016
2017
2017 If ``addrevisioncb`` is defined, it will be called with arguments of
2018 If ``addrevisioncb`` is defined, it will be called with arguments of
2018 this revlog and the node that was added.
2019 this revlog and the node that was added.
2019 """
2020 """
2020
2021
2021 nodes = []
2022 nodes = []
2022
2023
2023 r = len(self)
2024 r = len(self)
2024 end = 0
2025 end = 0
2025 if r:
2026 if r:
2026 end = self.end(r - 1)
2027 end = self.end(r - 1)
2027 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
2028 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
2028 isize = r * self._io.size
2029 isize = r * self._io.size
2029 if self._inline:
2030 if self._inline:
2030 transaction.add(self.indexfile, end + isize, r)
2031 transaction.add(self.indexfile, end + isize, r)
2031 dfh = None
2032 dfh = None
2032 else:
2033 else:
2033 transaction.add(self.indexfile, isize, r)
2034 transaction.add(self.indexfile, isize, r)
2034 transaction.add(self.datafile, end)
2035 transaction.add(self.datafile, end)
2035 dfh = self.opener(self.datafile, "a+")
2036 dfh = self.opener(self.datafile, "a+")
2036 def flush():
2037 def flush():
2037 if dfh:
2038 if dfh:
2038 dfh.flush()
2039 dfh.flush()
2039 ifh.flush()
2040 ifh.flush()
2040 try:
2041 try:
2041 # loop through our set of deltas
2042 # loop through our set of deltas
2042 for data in deltas:
2043 for data in deltas:
2043 node, p1, p2, linknode, deltabase, delta, flags = data
2044 node, p1, p2, linknode, deltabase, delta, flags = data
2044 link = linkmapper(linknode)
2045 link = linkmapper(linknode)
2045 flags = flags or REVIDX_DEFAULT_FLAGS
2046 flags = flags or REVIDX_DEFAULT_FLAGS
2046
2047
2047 nodes.append(node)
2048 nodes.append(node)
2048
2049
2049 if node in self.nodemap:
2050 if node in self.nodemap:
2050 # this can happen if two branches make the same change
2051 # this can happen if two branches make the same change
2051 continue
2052 continue
2052
2053
2053 for p in (p1, p2):
2054 for p in (p1, p2):
2054 if p not in self.nodemap:
2055 if p not in self.nodemap:
2055 raise LookupError(p, self.indexfile,
2056 raise LookupError(p, self.indexfile,
2056 _('unknown parent'))
2057 _('unknown parent'))
2057
2058
2058 if deltabase not in self.nodemap:
2059 if deltabase not in self.nodemap:
2059 raise LookupError(deltabase, self.indexfile,
2060 raise LookupError(deltabase, self.indexfile,
2060 _('unknown delta base'))
2061 _('unknown delta base'))
2061
2062
2062 baserev = self.rev(deltabase)
2063 baserev = self.rev(deltabase)
2063
2064
2064 if baserev != nullrev and self.iscensored(baserev):
2065 if baserev != nullrev and self.iscensored(baserev):
2065 # if base is censored, delta must be full replacement in a
2066 # if base is censored, delta must be full replacement in a
2066 # single patch operation
2067 # single patch operation
2067 hlen = struct.calcsize(">lll")
2068 hlen = struct.calcsize(">lll")
2068 oldlen = self.rawsize(baserev)
2069 oldlen = self.rawsize(baserev)
2069 newlen = len(delta) - hlen
2070 newlen = len(delta) - hlen
2070 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2071 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2071 raise error.CensoredBaseError(self.indexfile,
2072 raise error.CensoredBaseError(self.indexfile,
2072 self.node(baserev))
2073 self.node(baserev))
2073
2074
2074 if not flags and self._peek_iscensored(baserev, delta, flush):
2075 if not flags and self._peek_iscensored(baserev, delta, flush):
2075 flags |= REVIDX_ISCENSORED
2076 flags |= REVIDX_ISCENSORED
2076
2077
2077 # We assume consumers of addrevisioncb will want to retrieve
2078 # We assume consumers of addrevisioncb will want to retrieve
2078 # the added revision, which will require a call to
2079 # the added revision, which will require a call to
2079 # revision(). revision() will fast path if there is a cache
2080 # revision(). revision() will fast path if there is a cache
2080 # hit. So, we tell _addrevision() to always cache in this case.
2081 # hit. So, we tell _addrevision() to always cache in this case.
2081 # We're only using addgroup() in the context of changegroup
2082 # We're only using addgroup() in the context of changegroup
2082 # generation so the revision data can always be handled as raw
2083 # generation so the revision data can always be handled as raw
2083 # by the flagprocessor.
2084 # by the flagprocessor.
2084 self._addrevision(node, None, transaction, link,
2085 self._addrevision(node, None, transaction, link,
2085 p1, p2, flags, (baserev, delta),
2086 p1, p2, flags, (baserev, delta),
2086 ifh, dfh,
2087 ifh, dfh,
2087 alwayscache=bool(addrevisioncb))
2088 alwayscache=bool(addrevisioncb))
2088
2089
2089 if addrevisioncb:
2090 if addrevisioncb:
2090 addrevisioncb(self, node)
2091 addrevisioncb(self, node)
2091
2092
2092 if not dfh and not self._inline:
2093 if not dfh and not self._inline:
2093 # addrevision switched from inline to conventional
2094 # addrevision switched from inline to conventional
2094 # reopen the index
2095 # reopen the index
2095 ifh.close()
2096 ifh.close()
2096 dfh = self.opener(self.datafile, "a+")
2097 dfh = self.opener(self.datafile, "a+")
2097 ifh = self.opener(self.indexfile, "a+",
2098 ifh = self.opener(self.indexfile, "a+",
2098 checkambig=self._checkambig)
2099 checkambig=self._checkambig)
2099 finally:
2100 finally:
2100 if dfh:
2101 if dfh:
2101 dfh.close()
2102 dfh.close()
2102 ifh.close()
2103 ifh.close()
2103
2104
2104 return nodes
2105 return nodes
2105
2106
2106 def iscensored(self, rev):
2107 def iscensored(self, rev):
2107 """Check if a file revision is censored."""
2108 """Check if a file revision is censored."""
2108 return False
2109 return False
2109
2110
2110 def _peek_iscensored(self, baserev, delta, flush):
2111 def _peek_iscensored(self, baserev, delta, flush):
2111 """Quickly check if a delta produces a censored revision."""
2112 """Quickly check if a delta produces a censored revision."""
2112 return False
2113 return False
2113
2114
2114 def getstrippoint(self, minlink):
2115 def getstrippoint(self, minlink):
2115 """find the minimum rev that must be stripped to strip the linkrev
2116 """find the minimum rev that must be stripped to strip the linkrev
2116
2117
2117 Returns a tuple containing the minimum rev and a set of all revs that
2118 Returns a tuple containing the minimum rev and a set of all revs that
2118 have linkrevs that will be broken by this strip.
2119 have linkrevs that will be broken by this strip.
2119 """
2120 """
2120 brokenrevs = set()
2121 brokenrevs = set()
2121 strippoint = len(self)
2122 strippoint = len(self)
2122
2123
2123 heads = {}
2124 heads = {}
2124 futurelargelinkrevs = set()
2125 futurelargelinkrevs = set()
2125 for head in self.headrevs():
2126 for head in self.headrevs():
2126 headlinkrev = self.linkrev(head)
2127 headlinkrev = self.linkrev(head)
2127 heads[head] = headlinkrev
2128 heads[head] = headlinkrev
2128 if headlinkrev >= minlink:
2129 if headlinkrev >= minlink:
2129 futurelargelinkrevs.add(headlinkrev)
2130 futurelargelinkrevs.add(headlinkrev)
2130
2131
2131 # This algorithm involves walking down the rev graph, starting at the
2132 # This algorithm involves walking down the rev graph, starting at the
2132 # heads. Since the revs are topologically sorted according to linkrev,
2133 # heads. Since the revs are topologically sorted according to linkrev,
2133 # once all head linkrevs are below the minlink, we know there are
2134 # once all head linkrevs are below the minlink, we know there are
2134 # no more revs that could have a linkrev greater than minlink.
2135 # no more revs that could have a linkrev greater than minlink.
2135 # So we can stop walking.
2136 # So we can stop walking.
2136 while futurelargelinkrevs:
2137 while futurelargelinkrevs:
2137 strippoint -= 1
2138 strippoint -= 1
2138 linkrev = heads.pop(strippoint)
2139 linkrev = heads.pop(strippoint)
2139
2140
2140 if linkrev < minlink:
2141 if linkrev < minlink:
2141 brokenrevs.add(strippoint)
2142 brokenrevs.add(strippoint)
2142 else:
2143 else:
2143 futurelargelinkrevs.remove(linkrev)
2144 futurelargelinkrevs.remove(linkrev)
2144
2145
2145 for p in self.parentrevs(strippoint):
2146 for p in self.parentrevs(strippoint):
2146 if p != nullrev:
2147 if p != nullrev:
2147 plinkrev = self.linkrev(p)
2148 plinkrev = self.linkrev(p)
2148 heads[p] = plinkrev
2149 heads[p] = plinkrev
2149 if plinkrev >= minlink:
2150 if plinkrev >= minlink:
2150 futurelargelinkrevs.add(plinkrev)
2151 futurelargelinkrevs.add(plinkrev)
2151
2152
2152 return strippoint, brokenrevs
2153 return strippoint, brokenrevs
2153
2154
2154 def strip(self, minlink, transaction):
2155 def strip(self, minlink, transaction):
2155 """truncate the revlog on the first revision with a linkrev >= minlink
2156 """truncate the revlog on the first revision with a linkrev >= minlink
2156
2157
2157 This function is called when we're stripping revision minlink and
2158 This function is called when we're stripping revision minlink and
2158 its descendants from the repository.
2159 its descendants from the repository.
2159
2160
2160 We have to remove all revisions with linkrev >= minlink, because
2161 We have to remove all revisions with linkrev >= minlink, because
2161 the equivalent changelog revisions will be renumbered after the
2162 the equivalent changelog revisions will be renumbered after the
2162 strip.
2163 strip.
2163
2164
2164 So we truncate the revlog on the first of these revisions, and
2165 So we truncate the revlog on the first of these revisions, and
2165 trust that the caller has saved the revisions that shouldn't be
2166 trust that the caller has saved the revisions that shouldn't be
2166 removed and that it'll re-add them after this truncation.
2167 removed and that it'll re-add them after this truncation.
2167 """
2168 """
2168 if len(self) == 0:
2169 if len(self) == 0:
2169 return
2170 return
2170
2171
2171 rev, _ = self.getstrippoint(minlink)
2172 rev, _ = self.getstrippoint(minlink)
2172 if rev == len(self):
2173 if rev == len(self):
2173 return
2174 return
2174
2175
2175 # first truncate the files on disk
2176 # first truncate the files on disk
2176 end = self.start(rev)
2177 end = self.start(rev)
2177 if not self._inline:
2178 if not self._inline:
2178 transaction.add(self.datafile, end)
2179 transaction.add(self.datafile, end)
2179 end = rev * self._io.size
2180 end = rev * self._io.size
2180 else:
2181 else:
2181 end += rev * self._io.size
2182 end += rev * self._io.size
2182
2183
2183 transaction.add(self.indexfile, end)
2184 transaction.add(self.indexfile, end)
2184
2185
2185 # then reset internal state in memory to forget those revisions
2186 # then reset internal state in memory to forget those revisions
2186 self._cache = None
2187 self._cache = None
2187 self._chaininfocache = {}
2188 self._chaininfocache = {}
2188 self._chunkclear()
2189 self._chunkclear()
2189 for x in xrange(rev, len(self)):
2190 for x in xrange(rev, len(self)):
2190 del self.nodemap[self.node(x)]
2191 del self.nodemap[self.node(x)]
2191
2192
2192 del self.index[rev:-1]
2193 del self.index[rev:-1]
2193
2194
2194 def checksize(self):
2195 def checksize(self):
2195 expected = 0
2196 expected = 0
2196 if len(self):
2197 if len(self):
2197 expected = max(0, self.end(len(self) - 1))
2198 expected = max(0, self.end(len(self) - 1))
2198
2199
2199 try:
2200 try:
2200 f = self.opener(self.datafile)
2201 f = self.opener(self.datafile)
2201 f.seek(0, 2)
2202 f.seek(0, 2)
2202 actual = f.tell()
2203 actual = f.tell()
2203 f.close()
2204 f.close()
2204 dd = actual - expected
2205 dd = actual - expected
2205 except IOError as inst:
2206 except IOError as inst:
2206 if inst.errno != errno.ENOENT:
2207 if inst.errno != errno.ENOENT:
2207 raise
2208 raise
2208 dd = 0
2209 dd = 0
2209
2210
2210 try:
2211 try:
2211 f = self.opener(self.indexfile)
2212 f = self.opener(self.indexfile)
2212 f.seek(0, 2)
2213 f.seek(0, 2)
2213 actual = f.tell()
2214 actual = f.tell()
2214 f.close()
2215 f.close()
2215 s = self._io.size
2216 s = self._io.size
2216 i = max(0, actual // s)
2217 i = max(0, actual // s)
2217 di = actual - (i * s)
2218 di = actual - (i * s)
2218 if self._inline:
2219 if self._inline:
2219 databytes = 0
2220 databytes = 0
2220 for r in self:
2221 for r in self:
2221 databytes += max(0, self.length(r))
2222 databytes += max(0, self.length(r))
2222 dd = 0
2223 dd = 0
2223 di = actual - len(self) * s - databytes
2224 di = actual - len(self) * s - databytes
2224 except IOError as inst:
2225 except IOError as inst:
2225 if inst.errno != errno.ENOENT:
2226 if inst.errno != errno.ENOENT:
2226 raise
2227 raise
2227 di = 0
2228 di = 0
2228
2229
2229 return (dd, di)
2230 return (dd, di)
2230
2231
2231 def files(self):
2232 def files(self):
2232 res = [self.indexfile]
2233 res = [self.indexfile]
2233 if not self._inline:
2234 if not self._inline:
2234 res.append(self.datafile)
2235 res.append(self.datafile)
2235 return res
2236 return res
2236
2237
2237 DELTAREUSEALWAYS = 'always'
2238 DELTAREUSEALWAYS = 'always'
2238 DELTAREUSESAMEREVS = 'samerevs'
2239 DELTAREUSESAMEREVS = 'samerevs'
2239 DELTAREUSENEVER = 'never'
2240 DELTAREUSENEVER = 'never'
2240
2241
2241 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2242 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2242
2243
2243 def clone(self, tr, destrevlog, addrevisioncb=None,
2244 def clone(self, tr, destrevlog, addrevisioncb=None,
2244 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2245 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2245 """Copy this revlog to another, possibly with format changes.
2246 """Copy this revlog to another, possibly with format changes.
2246
2247
2247 The destination revlog will contain the same revisions and nodes.
2248 The destination revlog will contain the same revisions and nodes.
2248 However, it may not be bit-for-bit identical due to e.g. delta encoding
2249 However, it may not be bit-for-bit identical due to e.g. delta encoding
2249 differences.
2250 differences.
2250
2251
2251 The ``deltareuse`` argument control how deltas from the existing revlog
2252 The ``deltareuse`` argument control how deltas from the existing revlog
2252 are preserved in the destination revlog. The argument can have the
2253 are preserved in the destination revlog. The argument can have the
2253 following values:
2254 following values:
2254
2255
2255 DELTAREUSEALWAYS
2256 DELTAREUSEALWAYS
2256 Deltas will always be reused (if possible), even if the destination
2257 Deltas will always be reused (if possible), even if the destination
2257 revlog would not select the same revisions for the delta. This is the
2258 revlog would not select the same revisions for the delta. This is the
2258 fastest mode of operation.
2259 fastest mode of operation.
2259 DELTAREUSESAMEREVS
2260 DELTAREUSESAMEREVS
2260 Deltas will be reused if the destination revlog would pick the same
2261 Deltas will be reused if the destination revlog would pick the same
2261 revisions for the delta. This mode strikes a balance between speed
2262 revisions for the delta. This mode strikes a balance between speed
2262 and optimization.
2263 and optimization.
2263 DELTAREUSENEVER
2264 DELTAREUSENEVER
2264 Deltas will never be reused. This is the slowest mode of execution.
2265 Deltas will never be reused. This is the slowest mode of execution.
2265 This mode can be used to recompute deltas (e.g. if the diff/delta
2266 This mode can be used to recompute deltas (e.g. if the diff/delta
2266 algorithm changes).
2267 algorithm changes).
2267
2268
2268 Delta computation can be slow, so the choice of delta reuse policy can
2269 Delta computation can be slow, so the choice of delta reuse policy can
2269 significantly affect run time.
2270 significantly affect run time.
2270
2271
2271 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2272 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2272 two extremes. Deltas will be reused if they are appropriate. But if the
2273 two extremes. Deltas will be reused if they are appropriate. But if the
2273 delta could choose a better revision, it will do so. This means if you
2274 delta could choose a better revision, it will do so. This means if you
2274 are converting a non-generaldelta revlog to a generaldelta revlog,
2275 are converting a non-generaldelta revlog to a generaldelta revlog,
2275 deltas will be recomputed if the delta's parent isn't a parent of the
2276 deltas will be recomputed if the delta's parent isn't a parent of the
2276 revision.
2277 revision.
2277
2278
2278 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2279 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2279 controls whether to compute deltas against both parents for merges.
2280 controls whether to compute deltas against both parents for merges.
2280 By default, the current default is used.
2281 By default, the current default is used.
2281 """
2282 """
2282 if deltareuse not in self.DELTAREUSEALL:
2283 if deltareuse not in self.DELTAREUSEALL:
2283 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2284 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2284
2285
2285 if len(destrevlog):
2286 if len(destrevlog):
2286 raise ValueError(_('destination revlog is not empty'))
2287 raise ValueError(_('destination revlog is not empty'))
2287
2288
2288 if getattr(self, 'filteredrevs', None):
2289 if getattr(self, 'filteredrevs', None):
2289 raise ValueError(_('source revlog has filtered revisions'))
2290 raise ValueError(_('source revlog has filtered revisions'))
2290 if getattr(destrevlog, 'filteredrevs', None):
2291 if getattr(destrevlog, 'filteredrevs', None):
2291 raise ValueError(_('destination revlog has filtered revisions'))
2292 raise ValueError(_('destination revlog has filtered revisions'))
2292
2293
2293 # lazydeltabase controls whether to reuse a cached delta, if possible.
2294 # lazydeltabase controls whether to reuse a cached delta, if possible.
2294 oldlazydeltabase = destrevlog._lazydeltabase
2295 oldlazydeltabase = destrevlog._lazydeltabase
2295 oldamd = destrevlog._aggressivemergedeltas
2296 oldamd = destrevlog._aggressivemergedeltas
2296
2297
2297 try:
2298 try:
2298 if deltareuse == self.DELTAREUSEALWAYS:
2299 if deltareuse == self.DELTAREUSEALWAYS:
2299 destrevlog._lazydeltabase = True
2300 destrevlog._lazydeltabase = True
2300 elif deltareuse == self.DELTAREUSESAMEREVS:
2301 elif deltareuse == self.DELTAREUSESAMEREVS:
2301 destrevlog._lazydeltabase = False
2302 destrevlog._lazydeltabase = False
2302
2303
2303 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2304 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2304
2305
2305 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2306 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2306 self.DELTAREUSESAMEREVS)
2307 self.DELTAREUSESAMEREVS)
2307
2308
2308 index = self.index
2309 index = self.index
2309 for rev in self:
2310 for rev in self:
2310 entry = index[rev]
2311 entry = index[rev]
2311
2312
2312 # Some classes override linkrev to take filtered revs into
2313 # Some classes override linkrev to take filtered revs into
2313 # account. Use raw entry from index.
2314 # account. Use raw entry from index.
2314 flags = entry[0] & 0xffff
2315 flags = entry[0] & 0xffff
2315 linkrev = entry[4]
2316 linkrev = entry[4]
2316 p1 = index[entry[5]][7]
2317 p1 = index[entry[5]][7]
2317 p2 = index[entry[6]][7]
2318 p2 = index[entry[6]][7]
2318 node = entry[7]
2319 node = entry[7]
2319
2320
2320 # (Possibly) reuse the delta from the revlog if allowed and
2321 # (Possibly) reuse the delta from the revlog if allowed and
2321 # the revlog chunk is a delta.
2322 # the revlog chunk is a delta.
2322 cachedelta = None
2323 cachedelta = None
2323 rawtext = None
2324 rawtext = None
2324 if populatecachedelta:
2325 if populatecachedelta:
2325 dp = self.deltaparent(rev)
2326 dp = self.deltaparent(rev)
2326 if dp != nullrev:
2327 if dp != nullrev:
2327 cachedelta = (dp, str(self._chunk(rev)))
2328 cachedelta = (dp, str(self._chunk(rev)))
2328
2329
2329 if not cachedelta:
2330 if not cachedelta:
2330 rawtext = self.revision(rev, raw=True)
2331 rawtext = self.revision(rev, raw=True)
2331
2332
2332 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2333 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2333 checkambig=False)
2334 checkambig=False)
2334 dfh = None
2335 dfh = None
2335 if not destrevlog._inline:
2336 if not destrevlog._inline:
2336 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2337 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2337 try:
2338 try:
2338 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2339 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2339 flags, cachedelta, ifh, dfh)
2340 flags, cachedelta, ifh, dfh)
2340 finally:
2341 finally:
2341 if dfh:
2342 if dfh:
2342 dfh.close()
2343 dfh.close()
2343 ifh.close()
2344 ifh.close()
2344
2345
2345 if addrevisioncb:
2346 if addrevisioncb:
2346 addrevisioncb(self, rev, node)
2347 addrevisioncb(self, rev, node)
2347 finally:
2348 finally:
2348 destrevlog._lazydeltabase = oldlazydeltabase
2349 destrevlog._lazydeltabase = oldlazydeltabase
2349 destrevlog._aggressivemergedeltas = oldamd
2350 destrevlog._aggressivemergedeltas = oldamd
General Comments 0
You need to be logged in to leave comments. Login now