##// END OF EJS Templates
configitems: register the 'experimental.mmapindexthreshold' config
Boris Feld -
r34521:ca5b833c default
parent child Browse files
Show More
@@ -1,704 +1,707 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17 def loadconfigtable(ui, extname, configtable):
17 def loadconfigtable(ui, extname, configtable):
18 """update config item known to the ui with the extension ones"""
18 """update config item known to the ui with the extension ones"""
19 for section, items in configtable.items():
19 for section, items in configtable.items():
20 knownitems = ui._knownconfig.setdefault(section, {})
20 knownitems = ui._knownconfig.setdefault(section, {})
21 knownkeys = set(knownitems)
21 knownkeys = set(knownitems)
22 newkeys = set(items)
22 newkeys = set(items)
23 for key in sorted(knownkeys & newkeys):
23 for key in sorted(knownkeys & newkeys):
24 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg %= (extname, section, key)
25 msg %= (extname, section, key)
26 ui.develwarn(msg, config='warn-config')
26 ui.develwarn(msg, config='warn-config')
27
27
28 knownitems.update(items)
28 knownitems.update(items)
29
29
30 class configitem(object):
30 class configitem(object):
31 """represent a known config item
31 """represent a known config item
32
32
33 :section: the official config section where to find this item,
33 :section: the official config section where to find this item,
34 :name: the official name within the section,
34 :name: the official name within the section,
35 :default: default value for this item,
35 :default: default value for this item,
36 :alias: optional list of tuples as alternatives.
36 :alias: optional list of tuples as alternatives.
37 """
37 """
38
38
39 def __init__(self, section, name, default=None, alias=()):
39 def __init__(self, section, name, default=None, alias=()):
40 self.section = section
40 self.section = section
41 self.name = name
41 self.name = name
42 self.default = default
42 self.default = default
43 self.alias = list(alias)
43 self.alias = list(alias)
44
44
45 coreitems = {}
45 coreitems = {}
46
46
47 def _register(configtable, *args, **kwargs):
47 def _register(configtable, *args, **kwargs):
48 item = configitem(*args, **kwargs)
48 item = configitem(*args, **kwargs)
49 section = configtable.setdefault(item.section, {})
49 section = configtable.setdefault(item.section, {})
50 if item.name in section:
50 if item.name in section:
51 msg = "duplicated config item registration for '%s.%s'"
51 msg = "duplicated config item registration for '%s.%s'"
52 raise error.ProgrammingError(msg % (item.section, item.name))
52 raise error.ProgrammingError(msg % (item.section, item.name))
53 section[item.name] = item
53 section[item.name] = item
54
54
55 # special value for case where the default is derived from other values
55 # special value for case where the default is derived from other values
56 dynamicdefault = object()
56 dynamicdefault = object()
57
57
58 # Registering actual config items
58 # Registering actual config items
59
59
60 def getitemregister(configtable):
60 def getitemregister(configtable):
61 return functools.partial(_register, configtable)
61 return functools.partial(_register, configtable)
62
62
63 coreconfigitem = getitemregister(coreitems)
63 coreconfigitem = getitemregister(coreitems)
64
64
65 coreconfigitem('auth', 'cookiefile',
65 coreconfigitem('auth', 'cookiefile',
66 default=None,
66 default=None,
67 )
67 )
68 # bookmarks.pushing: internal hack for discovery
68 # bookmarks.pushing: internal hack for discovery
69 coreconfigitem('bookmarks', 'pushing',
69 coreconfigitem('bookmarks', 'pushing',
70 default=list,
70 default=list,
71 )
71 )
72 # bundle.mainreporoot: internal hack for bundlerepo
72 # bundle.mainreporoot: internal hack for bundlerepo
73 coreconfigitem('bundle', 'mainreporoot',
73 coreconfigitem('bundle', 'mainreporoot',
74 default='',
74 default='',
75 )
75 )
76 # bundle.reorder: experimental config
76 # bundle.reorder: experimental config
77 coreconfigitem('bundle', 'reorder',
77 coreconfigitem('bundle', 'reorder',
78 default='auto',
78 default='auto',
79 )
79 )
80 coreconfigitem('censor', 'policy',
80 coreconfigitem('censor', 'policy',
81 default='abort',
81 default='abort',
82 )
82 )
83 coreconfigitem('chgserver', 'idletimeout',
83 coreconfigitem('chgserver', 'idletimeout',
84 default=3600,
84 default=3600,
85 )
85 )
86 coreconfigitem('chgserver', 'skiphash',
86 coreconfigitem('chgserver', 'skiphash',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('cmdserver', 'log',
89 coreconfigitem('cmdserver', 'log',
90 default=None,
90 default=None,
91 )
91 )
92 coreconfigitem('color', 'mode',
92 coreconfigitem('color', 'mode',
93 default='auto',
93 default='auto',
94 )
94 )
95 coreconfigitem('color', 'pagermode',
95 coreconfigitem('color', 'pagermode',
96 default=dynamicdefault,
96 default=dynamicdefault,
97 )
97 )
98 coreconfigitem('commands', 'status.relative',
98 coreconfigitem('commands', 'status.relative',
99 default=False,
99 default=False,
100 )
100 )
101 coreconfigitem('commands', 'status.skipstates',
101 coreconfigitem('commands', 'status.skipstates',
102 default=[],
102 default=[],
103 )
103 )
104 coreconfigitem('commands', 'status.verbose',
104 coreconfigitem('commands', 'status.verbose',
105 default=False,
105 default=False,
106 )
106 )
107 coreconfigitem('commands', 'update.requiredest',
107 coreconfigitem('commands', 'update.requiredest',
108 default=False,
108 default=False,
109 )
109 )
110 coreconfigitem('debug', 'dirstate.delaywrite',
110 coreconfigitem('debug', 'dirstate.delaywrite',
111 default=0,
111 default=0,
112 )
112 )
113 coreconfigitem('devel', 'all-warnings',
113 coreconfigitem('devel', 'all-warnings',
114 default=False,
114 default=False,
115 )
115 )
116 coreconfigitem('devel', 'bundle2.debug',
116 coreconfigitem('devel', 'bundle2.debug',
117 default=False,
117 default=False,
118 )
118 )
119 coreconfigitem('devel', 'check-locks',
119 coreconfigitem('devel', 'check-locks',
120 default=False,
120 default=False,
121 )
121 )
122 coreconfigitem('devel', 'check-relroot',
122 coreconfigitem('devel', 'check-relroot',
123 default=False,
123 default=False,
124 )
124 )
125 coreconfigitem('devel', 'default-date',
125 coreconfigitem('devel', 'default-date',
126 default=None,
126 default=None,
127 )
127 )
128 coreconfigitem('devel', 'deprec-warn',
128 coreconfigitem('devel', 'deprec-warn',
129 default=False,
129 default=False,
130 )
130 )
131 coreconfigitem('devel', 'disableloaddefaultcerts',
131 coreconfigitem('devel', 'disableloaddefaultcerts',
132 default=False,
132 default=False,
133 )
133 )
134 coreconfigitem('devel', 'legacy.exchange',
134 coreconfigitem('devel', 'legacy.exchange',
135 default=list,
135 default=list,
136 )
136 )
137 coreconfigitem('devel', 'servercafile',
137 coreconfigitem('devel', 'servercafile',
138 default='',
138 default='',
139 )
139 )
140 coreconfigitem('devel', 'serverexactprotocol',
140 coreconfigitem('devel', 'serverexactprotocol',
141 default='',
141 default='',
142 )
142 )
143 coreconfigitem('devel', 'serverrequirecert',
143 coreconfigitem('devel', 'serverrequirecert',
144 default=False,
144 default=False,
145 )
145 )
146 coreconfigitem('devel', 'strip-obsmarkers',
146 coreconfigitem('devel', 'strip-obsmarkers',
147 default=True,
147 default=True,
148 )
148 )
149 coreconfigitem('email', 'charsets',
149 coreconfigitem('email', 'charsets',
150 default=list,
150 default=list,
151 )
151 )
152 coreconfigitem('email', 'from',
152 coreconfigitem('email', 'from',
153 default=None,
153 default=None,
154 )
154 )
155 coreconfigitem('email', 'method',
155 coreconfigitem('email', 'method',
156 default='smtp',
156 default='smtp',
157 )
157 )
158 coreconfigitem('experimental', 'allowdivergence',
158 coreconfigitem('experimental', 'allowdivergence',
159 default=False,
159 default=False,
160 )
160 )
161 coreconfigitem('experimental', 'bundle-phases',
161 coreconfigitem('experimental', 'bundle-phases',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem('experimental', 'bundle2-advertise',
164 coreconfigitem('experimental', 'bundle2-advertise',
165 default=True,
165 default=True,
166 )
166 )
167 coreconfigitem('experimental', 'bundle2-output-capture',
167 coreconfigitem('experimental', 'bundle2-output-capture',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem('experimental', 'bundle2.pushback',
170 coreconfigitem('experimental', 'bundle2.pushback',
171 default=False,
171 default=False,
172 )
172 )
173 coreconfigitem('experimental', 'bundle2lazylocking',
173 coreconfigitem('experimental', 'bundle2lazylocking',
174 default=False,
174 default=False,
175 )
175 )
176 coreconfigitem('experimental', 'bundlecomplevel',
176 coreconfigitem('experimental', 'bundlecomplevel',
177 default=None,
177 default=None,
178 )
178 )
179 coreconfigitem('experimental', 'changegroup3',
179 coreconfigitem('experimental', 'changegroup3',
180 default=False,
180 default=False,
181 )
181 )
182 coreconfigitem('experimental', 'clientcompressionengines',
182 coreconfigitem('experimental', 'clientcompressionengines',
183 default=list,
183 default=list,
184 )
184 )
185 coreconfigitem('experimental', 'copytrace',
185 coreconfigitem('experimental', 'copytrace',
186 default='on',
186 default='on',
187 )
187 )
188 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
188 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
189 default=100,
189 default=100,
190 )
190 )
191 coreconfigitem('experimental', 'crecordtest',
191 coreconfigitem('experimental', 'crecordtest',
192 default=None,
192 default=None,
193 )
193 )
194 coreconfigitem('experimental', 'editortmpinhg',
194 coreconfigitem('experimental', 'editortmpinhg',
195 default=False,
195 default=False,
196 )
196 )
197 coreconfigitem('experimental', 'maxdeltachainspan',
197 coreconfigitem('experimental', 'maxdeltachainspan',
198 default=-1,
198 default=-1,
199 )
199 )
200 coreconfigitem('experimental', 'mmapindexthreshold',
201 default=None,
202 )
200 coreconfigitem('experimental', 'nonnormalparanoidcheck',
203 coreconfigitem('experimental', 'nonnormalparanoidcheck',
201 default=False,
204 default=False,
202 )
205 )
203 coreconfigitem('experimental', 'stabilization',
206 coreconfigitem('experimental', 'stabilization',
204 default=list,
207 default=list,
205 alias=[('experimental', 'evolution')],
208 alias=[('experimental', 'evolution')],
206 )
209 )
207 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
210 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
208 default=False,
211 default=False,
209 alias=[('experimental', 'evolution.bundle-obsmarker')],
212 alias=[('experimental', 'evolution.bundle-obsmarker')],
210 )
213 )
211 coreconfigitem('experimental', 'stabilization.track-operation',
214 coreconfigitem('experimental', 'stabilization.track-operation',
212 default=True,
215 default=True,
213 alias=[('experimental', 'evolution.track-operation')]
216 alias=[('experimental', 'evolution.track-operation')]
214 )
217 )
215 coreconfigitem('experimental', 'exportableenviron',
218 coreconfigitem('experimental', 'exportableenviron',
216 default=list,
219 default=list,
217 )
220 )
218 coreconfigitem('experimental', 'extendedheader.index',
221 coreconfigitem('experimental', 'extendedheader.index',
219 default=None,
222 default=None,
220 )
223 )
221 coreconfigitem('experimental', 'extendedheader.similarity',
224 coreconfigitem('experimental', 'extendedheader.similarity',
222 default=False,
225 default=False,
223 )
226 )
224 coreconfigitem('experimental', 'format.compression',
227 coreconfigitem('experimental', 'format.compression',
225 default='zlib',
228 default='zlib',
226 )
229 )
227 coreconfigitem('experimental', 'graphshorten',
230 coreconfigitem('experimental', 'graphshorten',
228 default=False,
231 default=False,
229 )
232 )
230 coreconfigitem('experimental', 'hook-track-tags',
233 coreconfigitem('experimental', 'hook-track-tags',
231 default=False,
234 default=False,
232 )
235 )
233 coreconfigitem('experimental', 'httppostargs',
236 coreconfigitem('experimental', 'httppostargs',
234 default=False,
237 default=False,
235 )
238 )
236 coreconfigitem('experimental', 'manifestv2',
239 coreconfigitem('experimental', 'manifestv2',
237 default=False,
240 default=False,
238 )
241 )
239 coreconfigitem('experimental', 'mergedriver',
242 coreconfigitem('experimental', 'mergedriver',
240 default=None,
243 default=None,
241 )
244 )
242 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
245 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
243 default=False,
246 default=False,
244 )
247 )
245 coreconfigitem('experimental', 'rebase.multidest',
248 coreconfigitem('experimental', 'rebase.multidest',
246 default=False,
249 default=False,
247 )
250 )
248 coreconfigitem('experimental', 'revertalternateinteractivemode',
251 coreconfigitem('experimental', 'revertalternateinteractivemode',
249 default=True,
252 default=True,
250 )
253 )
251 coreconfigitem('experimental', 'revlogv2',
254 coreconfigitem('experimental', 'revlogv2',
252 default=None,
255 default=None,
253 )
256 )
254 coreconfigitem('experimental', 'spacemovesdown',
257 coreconfigitem('experimental', 'spacemovesdown',
255 default=False,
258 default=False,
256 )
259 )
257 coreconfigitem('experimental', 'treemanifest',
260 coreconfigitem('experimental', 'treemanifest',
258 default=False,
261 default=False,
259 )
262 )
260 coreconfigitem('experimental', 'updatecheck',
263 coreconfigitem('experimental', 'updatecheck',
261 default=None,
264 default=None,
262 )
265 )
263 coreconfigitem('format', 'aggressivemergedeltas',
266 coreconfigitem('format', 'aggressivemergedeltas',
264 default=False,
267 default=False,
265 )
268 )
266 coreconfigitem('format', 'chunkcachesize',
269 coreconfigitem('format', 'chunkcachesize',
267 default=None,
270 default=None,
268 )
271 )
269 coreconfigitem('format', 'dotencode',
272 coreconfigitem('format', 'dotencode',
270 default=True,
273 default=True,
271 )
274 )
272 coreconfigitem('format', 'generaldelta',
275 coreconfigitem('format', 'generaldelta',
273 default=False,
276 default=False,
274 )
277 )
275 coreconfigitem('format', 'manifestcachesize',
278 coreconfigitem('format', 'manifestcachesize',
276 default=None,
279 default=None,
277 )
280 )
278 coreconfigitem('format', 'maxchainlen',
281 coreconfigitem('format', 'maxchainlen',
279 default=None,
282 default=None,
280 )
283 )
281 coreconfigitem('format', 'obsstore-version',
284 coreconfigitem('format', 'obsstore-version',
282 default=None,
285 default=None,
283 )
286 )
284 coreconfigitem('format', 'usefncache',
287 coreconfigitem('format', 'usefncache',
285 default=True,
288 default=True,
286 )
289 )
287 coreconfigitem('format', 'usegeneraldelta',
290 coreconfigitem('format', 'usegeneraldelta',
288 default=True,
291 default=True,
289 )
292 )
290 coreconfigitem('format', 'usestore',
293 coreconfigitem('format', 'usestore',
291 default=True,
294 default=True,
292 )
295 )
293 coreconfigitem('hostsecurity', 'ciphers',
296 coreconfigitem('hostsecurity', 'ciphers',
294 default=None,
297 default=None,
295 )
298 )
296 coreconfigitem('hostsecurity', 'disabletls10warning',
299 coreconfigitem('hostsecurity', 'disabletls10warning',
297 default=False,
300 default=False,
298 )
301 )
299 coreconfigitem('http_proxy', 'always',
302 coreconfigitem('http_proxy', 'always',
300 default=False,
303 default=False,
301 )
304 )
302 coreconfigitem('http_proxy', 'host',
305 coreconfigitem('http_proxy', 'host',
303 default=None,
306 default=None,
304 )
307 )
305 coreconfigitem('http_proxy', 'no',
308 coreconfigitem('http_proxy', 'no',
306 default=list,
309 default=list,
307 )
310 )
308 coreconfigitem('http_proxy', 'passwd',
311 coreconfigitem('http_proxy', 'passwd',
309 default=None,
312 default=None,
310 )
313 )
311 coreconfigitem('http_proxy', 'user',
314 coreconfigitem('http_proxy', 'user',
312 default=None,
315 default=None,
313 )
316 )
314 coreconfigitem('merge', 'followcopies',
317 coreconfigitem('merge', 'followcopies',
315 default=True,
318 default=True,
316 )
319 )
317 coreconfigitem('merge', 'preferancestor',
320 coreconfigitem('merge', 'preferancestor',
318 default=lambda: ['*'],
321 default=lambda: ['*'],
319 )
322 )
320 coreconfigitem('pager', 'ignore',
323 coreconfigitem('pager', 'ignore',
321 default=list,
324 default=list,
322 )
325 )
323 coreconfigitem('patch', 'eol',
326 coreconfigitem('patch', 'eol',
324 default='strict',
327 default='strict',
325 )
328 )
326 coreconfigitem('patch', 'fuzz',
329 coreconfigitem('patch', 'fuzz',
327 default=2,
330 default=2,
328 )
331 )
329 coreconfigitem('paths', 'default',
332 coreconfigitem('paths', 'default',
330 default=None,
333 default=None,
331 )
334 )
332 coreconfigitem('paths', 'default-push',
335 coreconfigitem('paths', 'default-push',
333 default=None,
336 default=None,
334 )
337 )
335 coreconfigitem('phases', 'checksubrepos',
338 coreconfigitem('phases', 'checksubrepos',
336 default='follow',
339 default='follow',
337 )
340 )
338 coreconfigitem('phases', 'new-commit',
341 coreconfigitem('phases', 'new-commit',
339 default=dynamicdefault,
342 default=dynamicdefault,
340 )
343 )
341 coreconfigitem('phases', 'publish',
344 coreconfigitem('phases', 'publish',
342 default=True,
345 default=True,
343 )
346 )
344 coreconfigitem('profiling', 'enabled',
347 coreconfigitem('profiling', 'enabled',
345 default=False,
348 default=False,
346 )
349 )
347 coreconfigitem('profiling', 'format',
350 coreconfigitem('profiling', 'format',
348 default='text',
351 default='text',
349 )
352 )
350 coreconfigitem('profiling', 'freq',
353 coreconfigitem('profiling', 'freq',
351 default=1000,
354 default=1000,
352 )
355 )
353 coreconfigitem('profiling', 'limit',
356 coreconfigitem('profiling', 'limit',
354 default=30,
357 default=30,
355 )
358 )
356 coreconfigitem('profiling', 'nested',
359 coreconfigitem('profiling', 'nested',
357 default=0,
360 default=0,
358 )
361 )
359 coreconfigitem('profiling', 'output',
362 coreconfigitem('profiling', 'output',
360 default=None,
363 default=None,
361 )
364 )
362 coreconfigitem('profiling', 'showmax',
365 coreconfigitem('profiling', 'showmax',
363 default=0.999,
366 default=0.999,
364 )
367 )
365 coreconfigitem('profiling', 'showmin',
368 coreconfigitem('profiling', 'showmin',
366 default=dynamicdefault,
369 default=dynamicdefault,
367 )
370 )
368 coreconfigitem('profiling', 'sort',
371 coreconfigitem('profiling', 'sort',
369 default='inlinetime',
372 default='inlinetime',
370 )
373 )
371 coreconfigitem('profiling', 'statformat',
374 coreconfigitem('profiling', 'statformat',
372 default='hotpath',
375 default='hotpath',
373 )
376 )
374 coreconfigitem('profiling', 'type',
377 coreconfigitem('profiling', 'type',
375 default='stat',
378 default='stat',
376 )
379 )
377 coreconfigitem('progress', 'assume-tty',
380 coreconfigitem('progress', 'assume-tty',
378 default=False,
381 default=False,
379 )
382 )
380 coreconfigitem('progress', 'changedelay',
383 coreconfigitem('progress', 'changedelay',
381 default=1,
384 default=1,
382 )
385 )
383 coreconfigitem('progress', 'clear-complete',
386 coreconfigitem('progress', 'clear-complete',
384 default=True,
387 default=True,
385 )
388 )
386 coreconfigitem('progress', 'debug',
389 coreconfigitem('progress', 'debug',
387 default=False,
390 default=False,
388 )
391 )
389 coreconfigitem('progress', 'delay',
392 coreconfigitem('progress', 'delay',
390 default=3,
393 default=3,
391 )
394 )
392 coreconfigitem('progress', 'disable',
395 coreconfigitem('progress', 'disable',
393 default=False,
396 default=False,
394 )
397 )
395 coreconfigitem('progress', 'estimateinterval',
398 coreconfigitem('progress', 'estimateinterval',
396 default=60.0,
399 default=60.0,
397 )
400 )
398 coreconfigitem('progress', 'refresh',
401 coreconfigitem('progress', 'refresh',
399 default=0.1,
402 default=0.1,
400 )
403 )
401 coreconfigitem('progress', 'width',
404 coreconfigitem('progress', 'width',
402 default=dynamicdefault,
405 default=dynamicdefault,
403 )
406 )
404 coreconfigitem('push', 'pushvars.server',
407 coreconfigitem('push', 'pushvars.server',
405 default=False,
408 default=False,
406 )
409 )
407 coreconfigitem('server', 'bundle1',
410 coreconfigitem('server', 'bundle1',
408 default=True,
411 default=True,
409 )
412 )
410 coreconfigitem('server', 'bundle1gd',
413 coreconfigitem('server', 'bundle1gd',
411 default=None,
414 default=None,
412 )
415 )
413 coreconfigitem('server', 'compressionengines',
416 coreconfigitem('server', 'compressionengines',
414 default=list,
417 default=list,
415 )
418 )
416 coreconfigitem('server', 'concurrent-push-mode',
419 coreconfigitem('server', 'concurrent-push-mode',
417 default='strict',
420 default='strict',
418 )
421 )
419 coreconfigitem('server', 'disablefullbundle',
422 coreconfigitem('server', 'disablefullbundle',
420 default=False,
423 default=False,
421 )
424 )
422 coreconfigitem('server', 'maxhttpheaderlen',
425 coreconfigitem('server', 'maxhttpheaderlen',
423 default=1024,
426 default=1024,
424 )
427 )
425 coreconfigitem('server', 'preferuncompressed',
428 coreconfigitem('server', 'preferuncompressed',
426 default=False,
429 default=False,
427 )
430 )
428 coreconfigitem('server', 'uncompressed',
431 coreconfigitem('server', 'uncompressed',
429 default=True,
432 default=True,
430 )
433 )
431 coreconfigitem('server', 'uncompressedallowsecret',
434 coreconfigitem('server', 'uncompressedallowsecret',
432 default=False,
435 default=False,
433 )
436 )
434 coreconfigitem('server', 'validate',
437 coreconfigitem('server', 'validate',
435 default=False,
438 default=False,
436 )
439 )
437 coreconfigitem('server', 'zliblevel',
440 coreconfigitem('server', 'zliblevel',
438 default=-1,
441 default=-1,
439 )
442 )
440 coreconfigitem('smtp', 'host',
443 coreconfigitem('smtp', 'host',
441 default=None,
444 default=None,
442 )
445 )
443 coreconfigitem('smtp', 'local_hostname',
446 coreconfigitem('smtp', 'local_hostname',
444 default=None,
447 default=None,
445 )
448 )
446 coreconfigitem('smtp', 'password',
449 coreconfigitem('smtp', 'password',
447 default=None,
450 default=None,
448 )
451 )
449 coreconfigitem('smtp', 'port',
452 coreconfigitem('smtp', 'port',
450 default=dynamicdefault,
453 default=dynamicdefault,
451 )
454 )
452 coreconfigitem('smtp', 'tls',
455 coreconfigitem('smtp', 'tls',
453 default='none',
456 default='none',
454 )
457 )
455 coreconfigitem('smtp', 'username',
458 coreconfigitem('smtp', 'username',
456 default=None,
459 default=None,
457 )
460 )
458 coreconfigitem('sparse', 'missingwarning',
461 coreconfigitem('sparse', 'missingwarning',
459 default=True,
462 default=True,
460 )
463 )
461 coreconfigitem('trusted', 'groups',
464 coreconfigitem('trusted', 'groups',
462 default=list,
465 default=list,
463 )
466 )
464 coreconfigitem('trusted', 'users',
467 coreconfigitem('trusted', 'users',
465 default=list,
468 default=list,
466 )
469 )
467 coreconfigitem('ui', '_usedassubrepo',
470 coreconfigitem('ui', '_usedassubrepo',
468 default=False,
471 default=False,
469 )
472 )
470 coreconfigitem('ui', 'allowemptycommit',
473 coreconfigitem('ui', 'allowemptycommit',
471 default=False,
474 default=False,
472 )
475 )
473 coreconfigitem('ui', 'archivemeta',
476 coreconfigitem('ui', 'archivemeta',
474 default=True,
477 default=True,
475 )
478 )
476 coreconfigitem('ui', 'askusername',
479 coreconfigitem('ui', 'askusername',
477 default=False,
480 default=False,
478 )
481 )
479 coreconfigitem('ui', 'clonebundlefallback',
482 coreconfigitem('ui', 'clonebundlefallback',
480 default=False,
483 default=False,
481 )
484 )
482 coreconfigitem('ui', 'clonebundleprefers',
485 coreconfigitem('ui', 'clonebundleprefers',
483 default=list,
486 default=list,
484 )
487 )
485 coreconfigitem('ui', 'clonebundles',
488 coreconfigitem('ui', 'clonebundles',
486 default=True,
489 default=True,
487 )
490 )
488 coreconfigitem('ui', 'color',
491 coreconfigitem('ui', 'color',
489 default='auto',
492 default='auto',
490 )
493 )
491 coreconfigitem('ui', 'commitsubrepos',
494 coreconfigitem('ui', 'commitsubrepos',
492 default=False,
495 default=False,
493 )
496 )
494 coreconfigitem('ui', 'debug',
497 coreconfigitem('ui', 'debug',
495 default=False,
498 default=False,
496 )
499 )
497 coreconfigitem('ui', 'debugger',
500 coreconfigitem('ui', 'debugger',
498 default=None,
501 default=None,
499 )
502 )
500 coreconfigitem('ui', 'fallbackencoding',
503 coreconfigitem('ui', 'fallbackencoding',
501 default=None,
504 default=None,
502 )
505 )
503 coreconfigitem('ui', 'forcecwd',
506 coreconfigitem('ui', 'forcecwd',
504 default=None,
507 default=None,
505 )
508 )
506 coreconfigitem('ui', 'forcemerge',
509 coreconfigitem('ui', 'forcemerge',
507 default=None,
510 default=None,
508 )
511 )
509 coreconfigitem('ui', 'formatdebug',
512 coreconfigitem('ui', 'formatdebug',
510 default=False,
513 default=False,
511 )
514 )
512 coreconfigitem('ui', 'formatjson',
515 coreconfigitem('ui', 'formatjson',
513 default=False,
516 default=False,
514 )
517 )
515 coreconfigitem('ui', 'formatted',
518 coreconfigitem('ui', 'formatted',
516 default=None,
519 default=None,
517 )
520 )
518 coreconfigitem('ui', 'graphnodetemplate',
521 coreconfigitem('ui', 'graphnodetemplate',
519 default=None,
522 default=None,
520 )
523 )
521 coreconfigitem('ui', 'http2debuglevel',
524 coreconfigitem('ui', 'http2debuglevel',
522 default=None,
525 default=None,
523 )
526 )
524 coreconfigitem('ui', 'interactive',
527 coreconfigitem('ui', 'interactive',
525 default=None,
528 default=None,
526 )
529 )
527 coreconfigitem('ui', 'interface',
530 coreconfigitem('ui', 'interface',
528 default=None,
531 default=None,
529 )
532 )
530 coreconfigitem('ui', 'logblockedtimes',
533 coreconfigitem('ui', 'logblockedtimes',
531 default=False,
534 default=False,
532 )
535 )
533 coreconfigitem('ui', 'logtemplate',
536 coreconfigitem('ui', 'logtemplate',
534 default=None,
537 default=None,
535 )
538 )
536 coreconfigitem('ui', 'merge',
539 coreconfigitem('ui', 'merge',
537 default=None,
540 default=None,
538 )
541 )
539 coreconfigitem('ui', 'mergemarkers',
542 coreconfigitem('ui', 'mergemarkers',
540 default='basic',
543 default='basic',
541 )
544 )
542 coreconfigitem('ui', 'mergemarkertemplate',
545 coreconfigitem('ui', 'mergemarkertemplate',
543 default=('{node|short} '
546 default=('{node|short} '
544 '{ifeq(tags, "tip", "", '
547 '{ifeq(tags, "tip", "", '
545 'ifeq(tags, "", "", "{tags} "))}'
548 'ifeq(tags, "", "", "{tags} "))}'
546 '{if(bookmarks, "{bookmarks} ")}'
549 '{if(bookmarks, "{bookmarks} ")}'
547 '{ifeq(branch, "default", "", "{branch} ")}'
550 '{ifeq(branch, "default", "", "{branch} ")}'
548 '- {author|user}: {desc|firstline}')
551 '- {author|user}: {desc|firstline}')
549 )
552 )
550 coreconfigitem('ui', 'nontty',
553 coreconfigitem('ui', 'nontty',
551 default=False,
554 default=False,
552 )
555 )
553 coreconfigitem('ui', 'origbackuppath',
556 coreconfigitem('ui', 'origbackuppath',
554 default=None,
557 default=None,
555 )
558 )
556 coreconfigitem('ui', 'paginate',
559 coreconfigitem('ui', 'paginate',
557 default=True,
560 default=True,
558 )
561 )
559 coreconfigitem('ui', 'patch',
562 coreconfigitem('ui', 'patch',
560 default=None,
563 default=None,
561 )
564 )
562 coreconfigitem('ui', 'portablefilenames',
565 coreconfigitem('ui', 'portablefilenames',
563 default='warn',
566 default='warn',
564 )
567 )
565 coreconfigitem('ui', 'promptecho',
568 coreconfigitem('ui', 'promptecho',
566 default=False,
569 default=False,
567 )
570 )
568 coreconfigitem('ui', 'quiet',
571 coreconfigitem('ui', 'quiet',
569 default=False,
572 default=False,
570 )
573 )
571 coreconfigitem('ui', 'quietbookmarkmove',
574 coreconfigitem('ui', 'quietbookmarkmove',
572 default=False,
575 default=False,
573 )
576 )
574 coreconfigitem('ui', 'remotecmd',
577 coreconfigitem('ui', 'remotecmd',
575 default='hg',
578 default='hg',
576 )
579 )
577 coreconfigitem('ui', 'report_untrusted',
580 coreconfigitem('ui', 'report_untrusted',
578 default=True,
581 default=True,
579 )
582 )
580 coreconfigitem('ui', 'rollback',
583 coreconfigitem('ui', 'rollback',
581 default=True,
584 default=True,
582 )
585 )
583 coreconfigitem('ui', 'slash',
586 coreconfigitem('ui', 'slash',
584 default=False,
587 default=False,
585 )
588 )
586 coreconfigitem('ui', 'ssh',
589 coreconfigitem('ui', 'ssh',
587 default='ssh',
590 default='ssh',
588 )
591 )
589 coreconfigitem('ui', 'statuscopies',
592 coreconfigitem('ui', 'statuscopies',
590 default=False,
593 default=False,
591 )
594 )
592 coreconfigitem('ui', 'strict',
595 coreconfigitem('ui', 'strict',
593 default=False,
596 default=False,
594 )
597 )
595 coreconfigitem('ui', 'style',
598 coreconfigitem('ui', 'style',
596 default='',
599 default='',
597 )
600 )
598 coreconfigitem('ui', 'supportcontact',
601 coreconfigitem('ui', 'supportcontact',
599 default=None,
602 default=None,
600 )
603 )
601 coreconfigitem('ui', 'textwidth',
604 coreconfigitem('ui', 'textwidth',
602 default=78,
605 default=78,
603 )
606 )
604 coreconfigitem('ui', 'timeout',
607 coreconfigitem('ui', 'timeout',
605 default='600',
608 default='600',
606 )
609 )
607 coreconfigitem('ui', 'traceback',
610 coreconfigitem('ui', 'traceback',
608 default=False,
611 default=False,
609 )
612 )
610 coreconfigitem('ui', 'tweakdefaults',
613 coreconfigitem('ui', 'tweakdefaults',
611 default=False,
614 default=False,
612 )
615 )
613 coreconfigitem('ui', 'usehttp2',
616 coreconfigitem('ui', 'usehttp2',
614 default=False,
617 default=False,
615 )
618 )
616 coreconfigitem('ui', 'username',
619 coreconfigitem('ui', 'username',
617 alias=[('ui', 'user')]
620 alias=[('ui', 'user')]
618 )
621 )
619 coreconfigitem('ui', 'verbose',
622 coreconfigitem('ui', 'verbose',
620 default=False,
623 default=False,
621 )
624 )
622 coreconfigitem('verify', 'skipflags',
625 coreconfigitem('verify', 'skipflags',
623 default=None,
626 default=None,
624 )
627 )
625 coreconfigitem('web', 'accesslog',
628 coreconfigitem('web', 'accesslog',
626 default='-',
629 default='-',
627 )
630 )
628 coreconfigitem('web', 'address',
631 coreconfigitem('web', 'address',
629 default='',
632 default='',
630 )
633 )
631 coreconfigitem('web', 'allow_archive',
634 coreconfigitem('web', 'allow_archive',
632 default=list,
635 default=list,
633 )
636 )
634 coreconfigitem('web', 'allow_read',
637 coreconfigitem('web', 'allow_read',
635 default=list,
638 default=list,
636 )
639 )
637 coreconfigitem('web', 'baseurl',
640 coreconfigitem('web', 'baseurl',
638 default=None,
641 default=None,
639 )
642 )
640 coreconfigitem('web', 'cacerts',
643 coreconfigitem('web', 'cacerts',
641 default=None,
644 default=None,
642 )
645 )
643 coreconfigitem('web', 'certificate',
646 coreconfigitem('web', 'certificate',
644 default=None,
647 default=None,
645 )
648 )
646 coreconfigitem('web', 'collapse',
649 coreconfigitem('web', 'collapse',
647 default=False,
650 default=False,
648 )
651 )
649 coreconfigitem('web', 'csp',
652 coreconfigitem('web', 'csp',
650 default=None,
653 default=None,
651 )
654 )
652 coreconfigitem('web', 'deny_read',
655 coreconfigitem('web', 'deny_read',
653 default=list,
656 default=list,
654 )
657 )
655 coreconfigitem('web', 'descend',
658 coreconfigitem('web', 'descend',
656 default=True,
659 default=True,
657 )
660 )
658 coreconfigitem('web', 'description',
661 coreconfigitem('web', 'description',
659 default="",
662 default="",
660 )
663 )
661 coreconfigitem('web', 'encoding',
664 coreconfigitem('web', 'encoding',
662 default=lambda: encoding.encoding,
665 default=lambda: encoding.encoding,
663 )
666 )
664 coreconfigitem('web', 'errorlog',
667 coreconfigitem('web', 'errorlog',
665 default='-',
668 default='-',
666 )
669 )
667 coreconfigitem('web', 'ipv6',
670 coreconfigitem('web', 'ipv6',
668 default=False,
671 default=False,
669 )
672 )
670 coreconfigitem('web', 'port',
673 coreconfigitem('web', 'port',
671 default=8000,
674 default=8000,
672 )
675 )
673 coreconfigitem('web', 'prefix',
676 coreconfigitem('web', 'prefix',
674 default='',
677 default='',
675 )
678 )
676 coreconfigitem('web', 'refreshinterval',
679 coreconfigitem('web', 'refreshinterval',
677 default=20,
680 default=20,
678 )
681 )
679 coreconfigitem('web', 'stripes',
682 coreconfigitem('web', 'stripes',
680 default=1,
683 default=1,
681 )
684 )
682 coreconfigitem('web', 'style',
685 coreconfigitem('web', 'style',
683 default='paper',
686 default='paper',
684 )
687 )
685 coreconfigitem('web', 'templates',
688 coreconfigitem('web', 'templates',
686 default=None,
689 default=None,
687 )
690 )
688 coreconfigitem('worker', 'backgroundclose',
691 coreconfigitem('worker', 'backgroundclose',
689 default=dynamicdefault,
692 default=dynamicdefault,
690 )
693 )
691 # Windows defaults to a limit of 512 open files. A buffer of 128
694 # Windows defaults to a limit of 512 open files. A buffer of 128
692 # should give us enough headway.
695 # should give us enough headway.
693 coreconfigitem('worker', 'backgroundclosemaxqueue',
696 coreconfigitem('worker', 'backgroundclosemaxqueue',
694 default=384,
697 default=384,
695 )
698 )
696 coreconfigitem('worker', 'backgroundcloseminfilecount',
699 coreconfigitem('worker', 'backgroundcloseminfilecount',
697 default=2048,
700 default=2048,
698 )
701 )
699 coreconfigitem('worker', 'backgroundclosethreadcount',
702 coreconfigitem('worker', 'backgroundclosethreadcount',
700 default=4,
703 default=4,
701 )
704 )
702 coreconfigitem('worker', 'numcpus',
705 coreconfigitem('worker', 'numcpus',
703 default=None,
706 default=None,
704 )
707 )
@@ -1,2302 +1,2302 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # These auditor are not used by the vfs,
367 # These auditor are not used by the vfs,
368 # only used when writing this comment: basectx.match
368 # only used when writing this comment: basectx.match
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
371 realfs=False, cached=True)
371 realfs=False, cached=True)
372 self.baseui = baseui
372 self.baseui = baseui
373 self.ui = baseui.copy()
373 self.ui = baseui.copy()
374 self.ui.copy = baseui.copy # prevent copying repo configuration
374 self.ui.copy = baseui.copy # prevent copying repo configuration
375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 if (self.ui.configbool('devel', 'all-warnings') or
376 if (self.ui.configbool('devel', 'all-warnings') or
377 self.ui.configbool('devel', 'check-locks')):
377 self.ui.configbool('devel', 'check-locks')):
378 self.vfs.audit = self._getvfsward(self.vfs.audit)
378 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 # A list of callback to shape the phase if no data were found.
379 # A list of callback to shape the phase if no data were found.
380 # Callback are in the form: func(repo, roots) --> processed root.
380 # Callback are in the form: func(repo, roots) --> processed root.
381 # This list it to be filled by extension during repo setup
381 # This list it to be filled by extension during repo setup
382 self._phasedefaults = []
382 self._phasedefaults = []
383 try:
383 try:
384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 self._loadextensions()
385 self._loadextensions()
386 except IOError:
386 except IOError:
387 pass
387 pass
388
388
389 if self.featuresetupfuncs:
389 if self.featuresetupfuncs:
390 self.supported = set(self._basesupported) # use private copy
390 self.supported = set(self._basesupported) # use private copy
391 extmods = set(m.__name__ for n, m
391 extmods = set(m.__name__ for n, m
392 in extensions.extensions(self.ui))
392 in extensions.extensions(self.ui))
393 for setupfunc in self.featuresetupfuncs:
393 for setupfunc in self.featuresetupfuncs:
394 if setupfunc.__module__ in extmods:
394 if setupfunc.__module__ in extmods:
395 setupfunc(self.ui, self.supported)
395 setupfunc(self.ui, self.supported)
396 else:
396 else:
397 self.supported = self._basesupported
397 self.supported = self._basesupported
398 color.setup(self.ui)
398 color.setup(self.ui)
399
399
400 # Add compression engines.
400 # Add compression engines.
401 for name in util.compengines:
401 for name in util.compengines:
402 engine = util.compengines[name]
402 engine = util.compengines[name]
403 if engine.revlogheader():
403 if engine.revlogheader():
404 self.supported.add('exp-compression-%s' % name)
404 self.supported.add('exp-compression-%s' % name)
405
405
406 if not self.vfs.isdir():
406 if not self.vfs.isdir():
407 if create:
407 if create:
408 self.requirements = newreporequirements(self)
408 self.requirements = newreporequirements(self)
409
409
410 if not self.wvfs.exists():
410 if not self.wvfs.exists():
411 self.wvfs.makedirs()
411 self.wvfs.makedirs()
412 self.vfs.makedir(notindexed=True)
412 self.vfs.makedir(notindexed=True)
413
413
414 if 'store' in self.requirements:
414 if 'store' in self.requirements:
415 self.vfs.mkdir("store")
415 self.vfs.mkdir("store")
416
416
417 # create an invalid changelog
417 # create an invalid changelog
418 self.vfs.append(
418 self.vfs.append(
419 "00changelog.i",
419 "00changelog.i",
420 '\0\0\0\2' # represents revlogv2
420 '\0\0\0\2' # represents revlogv2
421 ' dummy changelog to prevent using the old repo layout'
421 ' dummy changelog to prevent using the old repo layout'
422 )
422 )
423 else:
423 else:
424 raise error.RepoError(_("repository %s not found") % path)
424 raise error.RepoError(_("repository %s not found") % path)
425 elif create:
425 elif create:
426 raise error.RepoError(_("repository %s already exists") % path)
426 raise error.RepoError(_("repository %s already exists") % path)
427 else:
427 else:
428 try:
428 try:
429 self.requirements = scmutil.readrequires(
429 self.requirements = scmutil.readrequires(
430 self.vfs, self.supported)
430 self.vfs, self.supported)
431 except IOError as inst:
431 except IOError as inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 cachepath = self.vfs.join('cache')
435 cachepath = self.vfs.join('cache')
436 self.sharedpath = self.path
436 self.sharedpath = self.path
437 try:
437 try:
438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 if 'relshared' in self.requirements:
439 if 'relshared' in self.requirements:
440 sharedpath = self.vfs.join(sharedpath)
440 sharedpath = self.vfs.join(sharedpath)
441 vfs = vfsmod.vfs(sharedpath, realpath=True)
441 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 cachepath = vfs.join('cache')
442 cachepath = vfs.join('cache')
443 s = vfs.base
443 s = vfs.base
444 if not vfs.exists():
444 if not vfs.exists():
445 raise error.RepoError(
445 raise error.RepoError(
446 _('.hg/sharedpath points to nonexistent directory %s') % s)
446 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 self.sharedpath = s
447 self.sharedpath = s
448 except IOError as inst:
448 except IOError as inst:
449 if inst.errno != errno.ENOENT:
449 if inst.errno != errno.ENOENT:
450 raise
450 raise
451
451
452 if 'exp-sparse' in self.requirements and not sparse.enabled:
452 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 raise error.RepoError(_('repository is using sparse feature but '
453 raise error.RepoError(_('repository is using sparse feature but '
454 'sparse is not enabled; enable the '
454 'sparse is not enabled; enable the '
455 '"sparse" extensions to access'))
455 '"sparse" extensions to access'))
456
456
457 self.store = store.store(
457 self.store = store.store(
458 self.requirements, self.sharedpath,
458 self.requirements, self.sharedpath,
459 lambda base: vfsmod.vfs(base, cacheaudited=True))
459 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 self.spath = self.store.path
460 self.spath = self.store.path
461 self.svfs = self.store.vfs
461 self.svfs = self.store.vfs
462 self.sjoin = self.store.join
462 self.sjoin = self.store.join
463 self.vfs.createmode = self.store.createmode
463 self.vfs.createmode = self.store.createmode
464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 self.cachevfs.createmode = self.store.createmode
465 self.cachevfs.createmode = self.store.createmode
466 if (self.ui.configbool('devel', 'all-warnings') or
466 if (self.ui.configbool('devel', 'all-warnings') or
467 self.ui.configbool('devel', 'check-locks')):
467 self.ui.configbool('devel', 'check-locks')):
468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 else: # standard vfs
470 else: # standard vfs
471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 self._applyopenerreqs()
472 self._applyopenerreqs()
473 if create:
473 if create:
474 self._writerequirements()
474 self._writerequirements()
475
475
476 self._dirstatevalidatewarned = False
476 self._dirstatevalidatewarned = False
477
477
478 self._branchcaches = {}
478 self._branchcaches = {}
479 self._revbranchcache = None
479 self._revbranchcache = None
480 self.filterpats = {}
480 self.filterpats = {}
481 self._datafilters = {}
481 self._datafilters = {}
482 self._transref = self._lockref = self._wlockref = None
482 self._transref = self._lockref = self._wlockref = None
483
483
484 # A cache for various files under .hg/ that tracks file changes,
484 # A cache for various files under .hg/ that tracks file changes,
485 # (used by the filecache decorator)
485 # (used by the filecache decorator)
486 #
486 #
487 # Maps a property name to its util.filecacheentry
487 # Maps a property name to its util.filecacheentry
488 self._filecache = {}
488 self._filecache = {}
489
489
490 # hold sets of revision to be filtered
490 # hold sets of revision to be filtered
491 # should be cleared when something might have changed the filter value:
491 # should be cleared when something might have changed the filter value:
492 # - new changesets,
492 # - new changesets,
493 # - phase change,
493 # - phase change,
494 # - new obsolescence marker,
494 # - new obsolescence marker,
495 # - working directory parent change,
495 # - working directory parent change,
496 # - bookmark changes
496 # - bookmark changes
497 self.filteredrevcache = {}
497 self.filteredrevcache = {}
498
498
499 # post-dirstate-status hooks
499 # post-dirstate-status hooks
500 self._postdsstatus = []
500 self._postdsstatus = []
501
501
502 # Cache of types representing filtered repos.
502 # Cache of types representing filtered repos.
503 self._filteredrepotypes = weakref.WeakKeyDictionary()
503 self._filteredrepotypes = weakref.WeakKeyDictionary()
504
504
505 # generic mapping between names and nodes
505 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
506 self.names = namespaces.namespaces()
507
507
508 # Key to signature value.
508 # Key to signature value.
509 self._sparsesignaturecache = {}
509 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
510 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
511 self._sparsematchercache = {}
512
512
513 def _getvfsward(self, origfunc):
513 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
514 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
515 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
516 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
517 ret = origfunc(path, mode=mode)
518 repo = rref()
518 repo = rref()
519 if (repo is None
519 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
521 or not util.safehasattr(repo, '_lockref')):
522 return
522 return
523 if mode in (None, 'r', 'rb'):
523 if mode in (None, 'r', 'rb'):
524 return
524 return
525 if path.startswith(repo.path):
525 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
526 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
527 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
528 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
531 if path.startswith('journal.'):
532 # journal is covered by 'lock'
532 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
533 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
535 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
536 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
537 # rest of vfs files are covered by 'wlock'
538 #
538 #
539 # exclude special files
539 # exclude special files
540 for prefix in self._wlockfreeprefix:
540 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
541 if path.startswith(prefix):
542 return
542 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
544 stacklevel=2, config='check-locks')
545 return ret
545 return ret
546 return checkvfs
546 return checkvfs
547
547
548 def _getsvfsward(self, origfunc):
548 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
549 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
550 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
551 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
552 ret = origfunc(path, mode=mode)
553 repo = rref()
553 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
555 return
556 if mode in (None, 'r', 'rb'):
556 if mode in (None, 'r', 'rb'):
557 return
557 return
558 if path.startswith(repo.sharedpath):
558 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
559 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
560 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
561 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
563 stacklevel=3)
564 return ret
564 return ret
565 return checksvfs
565 return checksvfs
566
566
567 def close(self):
567 def close(self):
568 self._writecaches()
568 self._writecaches()
569
569
570 def _loadextensions(self):
570 def _loadextensions(self):
571 extensions.loadall(self.ui)
571 extensions.loadall(self.ui)
572
572
573 def _writecaches(self):
573 def _writecaches(self):
574 if self._revbranchcache:
574 if self._revbranchcache:
575 self._revbranchcache.write()
575 self._revbranchcache.write()
576
576
577 def _restrictcapabilities(self, caps):
577 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
579 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
582 return caps
583
583
584 def _applyopenerreqs(self):
584 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
585 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
586 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
587 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
589 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
591 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
593 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
594 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
595 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
597 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
599 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
600 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
601 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
605 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
606 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
607 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold', None)
608 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
609 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611
611
612 for r in self.requirements:
612 for r in self.requirements:
613 if r.startswith('exp-compression-'):
613 if r.startswith('exp-compression-'):
614 self.svfs.options['compengine'] = r[len('exp-compression-'):]
614 self.svfs.options['compengine'] = r[len('exp-compression-'):]
615
615
616 # TODO move "revlogv2" to openerreqs once finalized.
616 # TODO move "revlogv2" to openerreqs once finalized.
617 if REVLOGV2_REQUIREMENT in self.requirements:
617 if REVLOGV2_REQUIREMENT in self.requirements:
618 self.svfs.options['revlogv2'] = True
618 self.svfs.options['revlogv2'] = True
619
619
620 def _writerequirements(self):
620 def _writerequirements(self):
621 scmutil.writerequires(self.vfs, self.requirements)
621 scmutil.writerequires(self.vfs, self.requirements)
622
622
623 def _checknested(self, path):
623 def _checknested(self, path):
624 """Determine if path is a legal nested repository."""
624 """Determine if path is a legal nested repository."""
625 if not path.startswith(self.root):
625 if not path.startswith(self.root):
626 return False
626 return False
627 subpath = path[len(self.root) + 1:]
627 subpath = path[len(self.root) + 1:]
628 normsubpath = util.pconvert(subpath)
628 normsubpath = util.pconvert(subpath)
629
629
630 # XXX: Checking against the current working copy is wrong in
630 # XXX: Checking against the current working copy is wrong in
631 # the sense that it can reject things like
631 # the sense that it can reject things like
632 #
632 #
633 # $ hg cat -r 10 sub/x.txt
633 # $ hg cat -r 10 sub/x.txt
634 #
634 #
635 # if sub/ is no longer a subrepository in the working copy
635 # if sub/ is no longer a subrepository in the working copy
636 # parent revision.
636 # parent revision.
637 #
637 #
638 # However, it can of course also allow things that would have
638 # However, it can of course also allow things that would have
639 # been rejected before, such as the above cat command if sub/
639 # been rejected before, such as the above cat command if sub/
640 # is a subrepository now, but was a normal directory before.
640 # is a subrepository now, but was a normal directory before.
641 # The old path auditor would have rejected by mistake since it
641 # The old path auditor would have rejected by mistake since it
642 # panics when it sees sub/.hg/.
642 # panics when it sees sub/.hg/.
643 #
643 #
644 # All in all, checking against the working copy seems sensible
644 # All in all, checking against the working copy seems sensible
645 # since we want to prevent access to nested repositories on
645 # since we want to prevent access to nested repositories on
646 # the filesystem *now*.
646 # the filesystem *now*.
647 ctx = self[None]
647 ctx = self[None]
648 parts = util.splitpath(subpath)
648 parts = util.splitpath(subpath)
649 while parts:
649 while parts:
650 prefix = '/'.join(parts)
650 prefix = '/'.join(parts)
651 if prefix in ctx.substate:
651 if prefix in ctx.substate:
652 if prefix == normsubpath:
652 if prefix == normsubpath:
653 return True
653 return True
654 else:
654 else:
655 sub = ctx.sub(prefix)
655 sub = ctx.sub(prefix)
656 return sub.checknested(subpath[len(prefix) + 1:])
656 return sub.checknested(subpath[len(prefix) + 1:])
657 else:
657 else:
658 parts.pop()
658 parts.pop()
659 return False
659 return False
660
660
661 def peer(self):
661 def peer(self):
662 return localpeer(self) # not cached to avoid reference cycle
662 return localpeer(self) # not cached to avoid reference cycle
663
663
664 def unfiltered(self):
664 def unfiltered(self):
665 """Return unfiltered version of the repository
665 """Return unfiltered version of the repository
666
666
667 Intended to be overwritten by filtered repo."""
667 Intended to be overwritten by filtered repo."""
668 return self
668 return self
669
669
670 def filtered(self, name):
670 def filtered(self, name):
671 """Return a filtered version of a repository"""
671 """Return a filtered version of a repository"""
672 # Python <3.4 easily leaks types via __mro__. See
672 # Python <3.4 easily leaks types via __mro__. See
673 # https://bugs.python.org/issue17950. We cache dynamically
673 # https://bugs.python.org/issue17950. We cache dynamically
674 # created types so this method doesn't leak on every
674 # created types so this method doesn't leak on every
675 # invocation.
675 # invocation.
676
676
677 key = self.unfiltered().__class__
677 key = self.unfiltered().__class__
678 if key not in self._filteredrepotypes:
678 if key not in self._filteredrepotypes:
679 # Build a new type with the repoview mixin and the base
679 # Build a new type with the repoview mixin and the base
680 # class of this repo. Give it a name containing the
680 # class of this repo. Give it a name containing the
681 # filter name to aid debugging.
681 # filter name to aid debugging.
682 bases = (repoview.repoview, key)
682 bases = (repoview.repoview, key)
683 cls = type(r'%sfilteredrepo' % name, bases, {})
683 cls = type(r'%sfilteredrepo' % name, bases, {})
684 self._filteredrepotypes[key] = cls
684 self._filteredrepotypes[key] = cls
685
685
686 return self._filteredrepotypes[key](self, name)
686 return self._filteredrepotypes[key](self, name)
687
687
688 @repofilecache('bookmarks', 'bookmarks.current')
688 @repofilecache('bookmarks', 'bookmarks.current')
689 def _bookmarks(self):
689 def _bookmarks(self):
690 return bookmarks.bmstore(self)
690 return bookmarks.bmstore(self)
691
691
692 @property
692 @property
693 def _activebookmark(self):
693 def _activebookmark(self):
694 return self._bookmarks.active
694 return self._bookmarks.active
695
695
696 # _phaserevs and _phasesets depend on changelog. what we need is to
696 # _phaserevs and _phasesets depend on changelog. what we need is to
697 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
697 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
698 # can't be easily expressed in filecache mechanism.
698 # can't be easily expressed in filecache mechanism.
699 @storecache('phaseroots', '00changelog.i')
699 @storecache('phaseroots', '00changelog.i')
700 def _phasecache(self):
700 def _phasecache(self):
701 return phases.phasecache(self, self._phasedefaults)
701 return phases.phasecache(self, self._phasedefaults)
702
702
703 @storecache('obsstore')
703 @storecache('obsstore')
704 def obsstore(self):
704 def obsstore(self):
705 return obsolete.makestore(self.ui, self)
705 return obsolete.makestore(self.ui, self)
706
706
707 @storecache('00changelog.i')
707 @storecache('00changelog.i')
708 def changelog(self):
708 def changelog(self):
709 return changelog.changelog(self.svfs,
709 return changelog.changelog(self.svfs,
710 trypending=txnutil.mayhavepending(self.root))
710 trypending=txnutil.mayhavepending(self.root))
711
711
712 def _constructmanifest(self):
712 def _constructmanifest(self):
713 # This is a temporary function while we migrate from manifest to
713 # This is a temporary function while we migrate from manifest to
714 # manifestlog. It allows bundlerepo and unionrepo to intercept the
714 # manifestlog. It allows bundlerepo and unionrepo to intercept the
715 # manifest creation.
715 # manifest creation.
716 return manifest.manifestrevlog(self.svfs)
716 return manifest.manifestrevlog(self.svfs)
717
717
718 @storecache('00manifest.i')
718 @storecache('00manifest.i')
719 def manifestlog(self):
719 def manifestlog(self):
720 return manifest.manifestlog(self.svfs, self)
720 return manifest.manifestlog(self.svfs, self)
721
721
722 @repofilecache('dirstate')
722 @repofilecache('dirstate')
723 def dirstate(self):
723 def dirstate(self):
724 sparsematchfn = lambda: sparse.matcher(self)
724 sparsematchfn = lambda: sparse.matcher(self)
725
725
726 return dirstate.dirstate(self.vfs, self.ui, self.root,
726 return dirstate.dirstate(self.vfs, self.ui, self.root,
727 self._dirstatevalidate, sparsematchfn)
727 self._dirstatevalidate, sparsematchfn)
728
728
729 def _dirstatevalidate(self, node):
729 def _dirstatevalidate(self, node):
730 try:
730 try:
731 self.changelog.rev(node)
731 self.changelog.rev(node)
732 return node
732 return node
733 except error.LookupError:
733 except error.LookupError:
734 if not self._dirstatevalidatewarned:
734 if not self._dirstatevalidatewarned:
735 self._dirstatevalidatewarned = True
735 self._dirstatevalidatewarned = True
736 self.ui.warn(_("warning: ignoring unknown"
736 self.ui.warn(_("warning: ignoring unknown"
737 " working parent %s!\n") % short(node))
737 " working parent %s!\n") % short(node))
738 return nullid
738 return nullid
739
739
740 def __getitem__(self, changeid):
740 def __getitem__(self, changeid):
741 if changeid is None:
741 if changeid is None:
742 return context.workingctx(self)
742 return context.workingctx(self)
743 if isinstance(changeid, slice):
743 if isinstance(changeid, slice):
744 # wdirrev isn't contiguous so the slice shouldn't include it
744 # wdirrev isn't contiguous so the slice shouldn't include it
745 return [context.changectx(self, i)
745 return [context.changectx(self, i)
746 for i in xrange(*changeid.indices(len(self)))
746 for i in xrange(*changeid.indices(len(self)))
747 if i not in self.changelog.filteredrevs]
747 if i not in self.changelog.filteredrevs]
748 try:
748 try:
749 return context.changectx(self, changeid)
749 return context.changectx(self, changeid)
750 except error.WdirUnsupported:
750 except error.WdirUnsupported:
751 return context.workingctx(self)
751 return context.workingctx(self)
752
752
753 def __contains__(self, changeid):
753 def __contains__(self, changeid):
754 """True if the given changeid exists
754 """True if the given changeid exists
755
755
756 error.LookupError is raised if an ambiguous node specified.
756 error.LookupError is raised if an ambiguous node specified.
757 """
757 """
758 try:
758 try:
759 self[changeid]
759 self[changeid]
760 return True
760 return True
761 except error.RepoLookupError:
761 except error.RepoLookupError:
762 return False
762 return False
763
763
764 def __nonzero__(self):
764 def __nonzero__(self):
765 return True
765 return True
766
766
767 __bool__ = __nonzero__
767 __bool__ = __nonzero__
768
768
769 def __len__(self):
769 def __len__(self):
770 return len(self.changelog)
770 return len(self.changelog)
771
771
772 def __iter__(self):
772 def __iter__(self):
773 return iter(self.changelog)
773 return iter(self.changelog)
774
774
775 def revs(self, expr, *args):
775 def revs(self, expr, *args):
776 '''Find revisions matching a revset.
776 '''Find revisions matching a revset.
777
777
778 The revset is specified as a string ``expr`` that may contain
778 The revset is specified as a string ``expr`` that may contain
779 %-formatting to escape certain types. See ``revsetlang.formatspec``.
779 %-formatting to escape certain types. See ``revsetlang.formatspec``.
780
780
781 Revset aliases from the configuration are not expanded. To expand
781 Revset aliases from the configuration are not expanded. To expand
782 user aliases, consider calling ``scmutil.revrange()`` or
782 user aliases, consider calling ``scmutil.revrange()`` or
783 ``repo.anyrevs([expr], user=True)``.
783 ``repo.anyrevs([expr], user=True)``.
784
784
785 Returns a revset.abstractsmartset, which is a list-like interface
785 Returns a revset.abstractsmartset, which is a list-like interface
786 that contains integer revisions.
786 that contains integer revisions.
787 '''
787 '''
788 expr = revsetlang.formatspec(expr, *args)
788 expr = revsetlang.formatspec(expr, *args)
789 m = revset.match(None, expr)
789 m = revset.match(None, expr)
790 return m(self)
790 return m(self)
791
791
792 def set(self, expr, *args):
792 def set(self, expr, *args):
793 '''Find revisions matching a revset and emit changectx instances.
793 '''Find revisions matching a revset and emit changectx instances.
794
794
795 This is a convenience wrapper around ``revs()`` that iterates the
795 This is a convenience wrapper around ``revs()`` that iterates the
796 result and is a generator of changectx instances.
796 result and is a generator of changectx instances.
797
797
798 Revset aliases from the configuration are not expanded. To expand
798 Revset aliases from the configuration are not expanded. To expand
799 user aliases, consider calling ``scmutil.revrange()``.
799 user aliases, consider calling ``scmutil.revrange()``.
800 '''
800 '''
801 for r in self.revs(expr, *args):
801 for r in self.revs(expr, *args):
802 yield self[r]
802 yield self[r]
803
803
804 def anyrevs(self, specs, user=False, localalias=None):
804 def anyrevs(self, specs, user=False, localalias=None):
805 '''Find revisions matching one of the given revsets.
805 '''Find revisions matching one of the given revsets.
806
806
807 Revset aliases from the configuration are not expanded by default. To
807 Revset aliases from the configuration are not expanded by default. To
808 expand user aliases, specify ``user=True``. To provide some local
808 expand user aliases, specify ``user=True``. To provide some local
809 definitions overriding user aliases, set ``localalias`` to
809 definitions overriding user aliases, set ``localalias`` to
810 ``{name: definitionstring}``.
810 ``{name: definitionstring}``.
811 '''
811 '''
812 if user:
812 if user:
813 m = revset.matchany(self.ui, specs, repo=self,
813 m = revset.matchany(self.ui, specs, repo=self,
814 localalias=localalias)
814 localalias=localalias)
815 else:
815 else:
816 m = revset.matchany(None, specs, localalias=localalias)
816 m = revset.matchany(None, specs, localalias=localalias)
817 return m(self)
817 return m(self)
818
818
819 def url(self):
819 def url(self):
820 return 'file:' + self.root
820 return 'file:' + self.root
821
821
822 def hook(self, name, throw=False, **args):
822 def hook(self, name, throw=False, **args):
823 """Call a hook, passing this repo instance.
823 """Call a hook, passing this repo instance.
824
824
825 This a convenience method to aid invoking hooks. Extensions likely
825 This a convenience method to aid invoking hooks. Extensions likely
826 won't call this unless they have registered a custom hook or are
826 won't call this unless they have registered a custom hook or are
827 replacing code that is expected to call a hook.
827 replacing code that is expected to call a hook.
828 """
828 """
829 return hook.hook(self.ui, self, name, throw, **args)
829 return hook.hook(self.ui, self, name, throw, **args)
830
830
831 @filteredpropertycache
831 @filteredpropertycache
832 def _tagscache(self):
832 def _tagscache(self):
833 '''Returns a tagscache object that contains various tags related
833 '''Returns a tagscache object that contains various tags related
834 caches.'''
834 caches.'''
835
835
836 # This simplifies its cache management by having one decorated
836 # This simplifies its cache management by having one decorated
837 # function (this one) and the rest simply fetch things from it.
837 # function (this one) and the rest simply fetch things from it.
838 class tagscache(object):
838 class tagscache(object):
839 def __init__(self):
839 def __init__(self):
840 # These two define the set of tags for this repository. tags
840 # These two define the set of tags for this repository. tags
841 # maps tag name to node; tagtypes maps tag name to 'global' or
841 # maps tag name to node; tagtypes maps tag name to 'global' or
842 # 'local'. (Global tags are defined by .hgtags across all
842 # 'local'. (Global tags are defined by .hgtags across all
843 # heads, and local tags are defined in .hg/localtags.)
843 # heads, and local tags are defined in .hg/localtags.)
844 # They constitute the in-memory cache of tags.
844 # They constitute the in-memory cache of tags.
845 self.tags = self.tagtypes = None
845 self.tags = self.tagtypes = None
846
846
847 self.nodetagscache = self.tagslist = None
847 self.nodetagscache = self.tagslist = None
848
848
849 cache = tagscache()
849 cache = tagscache()
850 cache.tags, cache.tagtypes = self._findtags()
850 cache.tags, cache.tagtypes = self._findtags()
851
851
852 return cache
852 return cache
853
853
854 def tags(self):
854 def tags(self):
855 '''return a mapping of tag to node'''
855 '''return a mapping of tag to node'''
856 t = {}
856 t = {}
857 if self.changelog.filteredrevs:
857 if self.changelog.filteredrevs:
858 tags, tt = self._findtags()
858 tags, tt = self._findtags()
859 else:
859 else:
860 tags = self._tagscache.tags
860 tags = self._tagscache.tags
861 for k, v in tags.iteritems():
861 for k, v in tags.iteritems():
862 try:
862 try:
863 # ignore tags to unknown nodes
863 # ignore tags to unknown nodes
864 self.changelog.rev(v)
864 self.changelog.rev(v)
865 t[k] = v
865 t[k] = v
866 except (error.LookupError, ValueError):
866 except (error.LookupError, ValueError):
867 pass
867 pass
868 return t
868 return t
869
869
870 def _findtags(self):
870 def _findtags(self):
871 '''Do the hard work of finding tags. Return a pair of dicts
871 '''Do the hard work of finding tags. Return a pair of dicts
872 (tags, tagtypes) where tags maps tag name to node, and tagtypes
872 (tags, tagtypes) where tags maps tag name to node, and tagtypes
873 maps tag name to a string like \'global\' or \'local\'.
873 maps tag name to a string like \'global\' or \'local\'.
874 Subclasses or extensions are free to add their own tags, but
874 Subclasses or extensions are free to add their own tags, but
875 should be aware that the returned dicts will be retained for the
875 should be aware that the returned dicts will be retained for the
876 duration of the localrepo object.'''
876 duration of the localrepo object.'''
877
877
878 # XXX what tagtype should subclasses/extensions use? Currently
878 # XXX what tagtype should subclasses/extensions use? Currently
879 # mq and bookmarks add tags, but do not set the tagtype at all.
879 # mq and bookmarks add tags, but do not set the tagtype at all.
880 # Should each extension invent its own tag type? Should there
880 # Should each extension invent its own tag type? Should there
881 # be one tagtype for all such "virtual" tags? Or is the status
881 # be one tagtype for all such "virtual" tags? Or is the status
882 # quo fine?
882 # quo fine?
883
883
884
884
885 # map tag name to (node, hist)
885 # map tag name to (node, hist)
886 alltags = tagsmod.findglobaltags(self.ui, self)
886 alltags = tagsmod.findglobaltags(self.ui, self)
887 # map tag name to tag type
887 # map tag name to tag type
888 tagtypes = dict((tag, 'global') for tag in alltags)
888 tagtypes = dict((tag, 'global') for tag in alltags)
889
889
890 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
890 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
891
891
892 # Build the return dicts. Have to re-encode tag names because
892 # Build the return dicts. Have to re-encode tag names because
893 # the tags module always uses UTF-8 (in order not to lose info
893 # the tags module always uses UTF-8 (in order not to lose info
894 # writing to the cache), but the rest of Mercurial wants them in
894 # writing to the cache), but the rest of Mercurial wants them in
895 # local encoding.
895 # local encoding.
896 tags = {}
896 tags = {}
897 for (name, (node, hist)) in alltags.iteritems():
897 for (name, (node, hist)) in alltags.iteritems():
898 if node != nullid:
898 if node != nullid:
899 tags[encoding.tolocal(name)] = node
899 tags[encoding.tolocal(name)] = node
900 tags['tip'] = self.changelog.tip()
900 tags['tip'] = self.changelog.tip()
901 tagtypes = dict([(encoding.tolocal(name), value)
901 tagtypes = dict([(encoding.tolocal(name), value)
902 for (name, value) in tagtypes.iteritems()])
902 for (name, value) in tagtypes.iteritems()])
903 return (tags, tagtypes)
903 return (tags, tagtypes)
904
904
905 def tagtype(self, tagname):
905 def tagtype(self, tagname):
906 '''
906 '''
907 return the type of the given tag. result can be:
907 return the type of the given tag. result can be:
908
908
909 'local' : a local tag
909 'local' : a local tag
910 'global' : a global tag
910 'global' : a global tag
911 None : tag does not exist
911 None : tag does not exist
912 '''
912 '''
913
913
914 return self._tagscache.tagtypes.get(tagname)
914 return self._tagscache.tagtypes.get(tagname)
915
915
916 def tagslist(self):
916 def tagslist(self):
917 '''return a list of tags ordered by revision'''
917 '''return a list of tags ordered by revision'''
918 if not self._tagscache.tagslist:
918 if not self._tagscache.tagslist:
919 l = []
919 l = []
920 for t, n in self.tags().iteritems():
920 for t, n in self.tags().iteritems():
921 l.append((self.changelog.rev(n), t, n))
921 l.append((self.changelog.rev(n), t, n))
922 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
922 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
923
923
924 return self._tagscache.tagslist
924 return self._tagscache.tagslist
925
925
926 def nodetags(self, node):
926 def nodetags(self, node):
927 '''return the tags associated with a node'''
927 '''return the tags associated with a node'''
928 if not self._tagscache.nodetagscache:
928 if not self._tagscache.nodetagscache:
929 nodetagscache = {}
929 nodetagscache = {}
930 for t, n in self._tagscache.tags.iteritems():
930 for t, n in self._tagscache.tags.iteritems():
931 nodetagscache.setdefault(n, []).append(t)
931 nodetagscache.setdefault(n, []).append(t)
932 for tags in nodetagscache.itervalues():
932 for tags in nodetagscache.itervalues():
933 tags.sort()
933 tags.sort()
934 self._tagscache.nodetagscache = nodetagscache
934 self._tagscache.nodetagscache = nodetagscache
935 return self._tagscache.nodetagscache.get(node, [])
935 return self._tagscache.nodetagscache.get(node, [])
936
936
937 def nodebookmarks(self, node):
937 def nodebookmarks(self, node):
938 """return the list of bookmarks pointing to the specified node"""
938 """return the list of bookmarks pointing to the specified node"""
939 marks = []
939 marks = []
940 for bookmark, n in self._bookmarks.iteritems():
940 for bookmark, n in self._bookmarks.iteritems():
941 if n == node:
941 if n == node:
942 marks.append(bookmark)
942 marks.append(bookmark)
943 return sorted(marks)
943 return sorted(marks)
944
944
945 def branchmap(self):
945 def branchmap(self):
946 '''returns a dictionary {branch: [branchheads]} with branchheads
946 '''returns a dictionary {branch: [branchheads]} with branchheads
947 ordered by increasing revision number'''
947 ordered by increasing revision number'''
948 branchmap.updatecache(self)
948 branchmap.updatecache(self)
949 return self._branchcaches[self.filtername]
949 return self._branchcaches[self.filtername]
950
950
951 @unfilteredmethod
951 @unfilteredmethod
952 def revbranchcache(self):
952 def revbranchcache(self):
953 if not self._revbranchcache:
953 if not self._revbranchcache:
954 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
954 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
955 return self._revbranchcache
955 return self._revbranchcache
956
956
957 def branchtip(self, branch, ignoremissing=False):
957 def branchtip(self, branch, ignoremissing=False):
958 '''return the tip node for a given branch
958 '''return the tip node for a given branch
959
959
960 If ignoremissing is True, then this method will not raise an error.
960 If ignoremissing is True, then this method will not raise an error.
961 This is helpful for callers that only expect None for a missing branch
961 This is helpful for callers that only expect None for a missing branch
962 (e.g. namespace).
962 (e.g. namespace).
963
963
964 '''
964 '''
965 try:
965 try:
966 return self.branchmap().branchtip(branch)
966 return self.branchmap().branchtip(branch)
967 except KeyError:
967 except KeyError:
968 if not ignoremissing:
968 if not ignoremissing:
969 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
969 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
970 else:
970 else:
971 pass
971 pass
972
972
973 def lookup(self, key):
973 def lookup(self, key):
974 return self[key].node()
974 return self[key].node()
975
975
976 def lookupbranch(self, key, remote=None):
976 def lookupbranch(self, key, remote=None):
977 repo = remote or self
977 repo = remote or self
978 if key in repo.branchmap():
978 if key in repo.branchmap():
979 return key
979 return key
980
980
981 repo = (remote and remote.local()) and remote or self
981 repo = (remote and remote.local()) and remote or self
982 return repo[key].branch()
982 return repo[key].branch()
983
983
984 def known(self, nodes):
984 def known(self, nodes):
985 cl = self.changelog
985 cl = self.changelog
986 nm = cl.nodemap
986 nm = cl.nodemap
987 filtered = cl.filteredrevs
987 filtered = cl.filteredrevs
988 result = []
988 result = []
989 for n in nodes:
989 for n in nodes:
990 r = nm.get(n)
990 r = nm.get(n)
991 resp = not (r is None or r in filtered)
991 resp = not (r is None or r in filtered)
992 result.append(resp)
992 result.append(resp)
993 return result
993 return result
994
994
995 def local(self):
995 def local(self):
996 return self
996 return self
997
997
998 def publishing(self):
998 def publishing(self):
999 # it's safe (and desirable) to trust the publish flag unconditionally
999 # it's safe (and desirable) to trust the publish flag unconditionally
1000 # so that we don't finalize changes shared between users via ssh or nfs
1000 # so that we don't finalize changes shared between users via ssh or nfs
1001 return self.ui.configbool('phases', 'publish', untrusted=True)
1001 return self.ui.configbool('phases', 'publish', untrusted=True)
1002
1002
1003 def cancopy(self):
1003 def cancopy(self):
1004 # so statichttprepo's override of local() works
1004 # so statichttprepo's override of local() works
1005 if not self.local():
1005 if not self.local():
1006 return False
1006 return False
1007 if not self.publishing():
1007 if not self.publishing():
1008 return True
1008 return True
1009 # if publishing we can't copy if there is filtered content
1009 # if publishing we can't copy if there is filtered content
1010 return not self.filtered('visible').changelog.filteredrevs
1010 return not self.filtered('visible').changelog.filteredrevs
1011
1011
1012 def shared(self):
1012 def shared(self):
1013 '''the type of shared repository (None if not shared)'''
1013 '''the type of shared repository (None if not shared)'''
1014 if self.sharedpath != self.path:
1014 if self.sharedpath != self.path:
1015 return 'store'
1015 return 'store'
1016 return None
1016 return None
1017
1017
1018 def wjoin(self, f, *insidef):
1018 def wjoin(self, f, *insidef):
1019 return self.vfs.reljoin(self.root, f, *insidef)
1019 return self.vfs.reljoin(self.root, f, *insidef)
1020
1020
1021 def file(self, f):
1021 def file(self, f):
1022 if f[0] == '/':
1022 if f[0] == '/':
1023 f = f[1:]
1023 f = f[1:]
1024 return filelog.filelog(self.svfs, f)
1024 return filelog.filelog(self.svfs, f)
1025
1025
1026 def changectx(self, changeid):
1026 def changectx(self, changeid):
1027 return self[changeid]
1027 return self[changeid]
1028
1028
1029 def setparents(self, p1, p2=nullid):
1029 def setparents(self, p1, p2=nullid):
1030 with self.dirstate.parentchange():
1030 with self.dirstate.parentchange():
1031 copies = self.dirstate.setparents(p1, p2)
1031 copies = self.dirstate.setparents(p1, p2)
1032 pctx = self[p1]
1032 pctx = self[p1]
1033 if copies:
1033 if copies:
1034 # Adjust copy records, the dirstate cannot do it, it
1034 # Adjust copy records, the dirstate cannot do it, it
1035 # requires access to parents manifests. Preserve them
1035 # requires access to parents manifests. Preserve them
1036 # only for entries added to first parent.
1036 # only for entries added to first parent.
1037 for f in copies:
1037 for f in copies:
1038 if f not in pctx and copies[f] in pctx:
1038 if f not in pctx and copies[f] in pctx:
1039 self.dirstate.copy(copies[f], f)
1039 self.dirstate.copy(copies[f], f)
1040 if p2 == nullid:
1040 if p2 == nullid:
1041 for f, s in sorted(self.dirstate.copies().items()):
1041 for f, s in sorted(self.dirstate.copies().items()):
1042 if f not in pctx and s not in pctx:
1042 if f not in pctx and s not in pctx:
1043 self.dirstate.copy(None, f)
1043 self.dirstate.copy(None, f)
1044
1044
1045 def filectx(self, path, changeid=None, fileid=None):
1045 def filectx(self, path, changeid=None, fileid=None):
1046 """changeid can be a changeset revision, node, or tag.
1046 """changeid can be a changeset revision, node, or tag.
1047 fileid can be a file revision or node."""
1047 fileid can be a file revision or node."""
1048 return context.filectx(self, path, changeid, fileid)
1048 return context.filectx(self, path, changeid, fileid)
1049
1049
1050 def getcwd(self):
1050 def getcwd(self):
1051 return self.dirstate.getcwd()
1051 return self.dirstate.getcwd()
1052
1052
1053 def pathto(self, f, cwd=None):
1053 def pathto(self, f, cwd=None):
1054 return self.dirstate.pathto(f, cwd)
1054 return self.dirstate.pathto(f, cwd)
1055
1055
1056 def _loadfilter(self, filter):
1056 def _loadfilter(self, filter):
1057 if filter not in self.filterpats:
1057 if filter not in self.filterpats:
1058 l = []
1058 l = []
1059 for pat, cmd in self.ui.configitems(filter):
1059 for pat, cmd in self.ui.configitems(filter):
1060 if cmd == '!':
1060 if cmd == '!':
1061 continue
1061 continue
1062 mf = matchmod.match(self.root, '', [pat])
1062 mf = matchmod.match(self.root, '', [pat])
1063 fn = None
1063 fn = None
1064 params = cmd
1064 params = cmd
1065 for name, filterfn in self._datafilters.iteritems():
1065 for name, filterfn in self._datafilters.iteritems():
1066 if cmd.startswith(name):
1066 if cmd.startswith(name):
1067 fn = filterfn
1067 fn = filterfn
1068 params = cmd[len(name):].lstrip()
1068 params = cmd[len(name):].lstrip()
1069 break
1069 break
1070 if not fn:
1070 if not fn:
1071 fn = lambda s, c, **kwargs: util.filter(s, c)
1071 fn = lambda s, c, **kwargs: util.filter(s, c)
1072 # Wrap old filters not supporting keyword arguments
1072 # Wrap old filters not supporting keyword arguments
1073 if not inspect.getargspec(fn)[2]:
1073 if not inspect.getargspec(fn)[2]:
1074 oldfn = fn
1074 oldfn = fn
1075 fn = lambda s, c, **kwargs: oldfn(s, c)
1075 fn = lambda s, c, **kwargs: oldfn(s, c)
1076 l.append((mf, fn, params))
1076 l.append((mf, fn, params))
1077 self.filterpats[filter] = l
1077 self.filterpats[filter] = l
1078 return self.filterpats[filter]
1078 return self.filterpats[filter]
1079
1079
1080 def _filter(self, filterpats, filename, data):
1080 def _filter(self, filterpats, filename, data):
1081 for mf, fn, cmd in filterpats:
1081 for mf, fn, cmd in filterpats:
1082 if mf(filename):
1082 if mf(filename):
1083 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1083 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1084 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1084 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1085 break
1085 break
1086
1086
1087 return data
1087 return data
1088
1088
1089 @unfilteredpropertycache
1089 @unfilteredpropertycache
1090 def _encodefilterpats(self):
1090 def _encodefilterpats(self):
1091 return self._loadfilter('encode')
1091 return self._loadfilter('encode')
1092
1092
1093 @unfilteredpropertycache
1093 @unfilteredpropertycache
1094 def _decodefilterpats(self):
1094 def _decodefilterpats(self):
1095 return self._loadfilter('decode')
1095 return self._loadfilter('decode')
1096
1096
1097 def adddatafilter(self, name, filter):
1097 def adddatafilter(self, name, filter):
1098 self._datafilters[name] = filter
1098 self._datafilters[name] = filter
1099
1099
1100 def wread(self, filename):
1100 def wread(self, filename):
1101 if self.wvfs.islink(filename):
1101 if self.wvfs.islink(filename):
1102 data = self.wvfs.readlink(filename)
1102 data = self.wvfs.readlink(filename)
1103 else:
1103 else:
1104 data = self.wvfs.read(filename)
1104 data = self.wvfs.read(filename)
1105 return self._filter(self._encodefilterpats, filename, data)
1105 return self._filter(self._encodefilterpats, filename, data)
1106
1106
1107 def wwrite(self, filename, data, flags, backgroundclose=False):
1107 def wwrite(self, filename, data, flags, backgroundclose=False):
1108 """write ``data`` into ``filename`` in the working directory
1108 """write ``data`` into ``filename`` in the working directory
1109
1109
1110 This returns length of written (maybe decoded) data.
1110 This returns length of written (maybe decoded) data.
1111 """
1111 """
1112 data = self._filter(self._decodefilterpats, filename, data)
1112 data = self._filter(self._decodefilterpats, filename, data)
1113 if 'l' in flags:
1113 if 'l' in flags:
1114 self.wvfs.symlink(data, filename)
1114 self.wvfs.symlink(data, filename)
1115 else:
1115 else:
1116 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1116 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1117 if 'x' in flags:
1117 if 'x' in flags:
1118 self.wvfs.setflags(filename, False, True)
1118 self.wvfs.setflags(filename, False, True)
1119 return len(data)
1119 return len(data)
1120
1120
1121 def wwritedata(self, filename, data):
1121 def wwritedata(self, filename, data):
1122 return self._filter(self._decodefilterpats, filename, data)
1122 return self._filter(self._decodefilterpats, filename, data)
1123
1123
1124 def currenttransaction(self):
1124 def currenttransaction(self):
1125 """return the current transaction or None if non exists"""
1125 """return the current transaction or None if non exists"""
1126 if self._transref:
1126 if self._transref:
1127 tr = self._transref()
1127 tr = self._transref()
1128 else:
1128 else:
1129 tr = None
1129 tr = None
1130
1130
1131 if tr and tr.running():
1131 if tr and tr.running():
1132 return tr
1132 return tr
1133 return None
1133 return None
1134
1134
1135 def transaction(self, desc, report=None):
1135 def transaction(self, desc, report=None):
1136 if (self.ui.configbool('devel', 'all-warnings')
1136 if (self.ui.configbool('devel', 'all-warnings')
1137 or self.ui.configbool('devel', 'check-locks')):
1137 or self.ui.configbool('devel', 'check-locks')):
1138 if self._currentlock(self._lockref) is None:
1138 if self._currentlock(self._lockref) is None:
1139 raise error.ProgrammingError('transaction requires locking')
1139 raise error.ProgrammingError('transaction requires locking')
1140 tr = self.currenttransaction()
1140 tr = self.currenttransaction()
1141 if tr is not None:
1141 if tr is not None:
1142 scmutil.registersummarycallback(self, tr, desc)
1142 scmutil.registersummarycallback(self, tr, desc)
1143 return tr.nest()
1143 return tr.nest()
1144
1144
1145 # abort here if the journal already exists
1145 # abort here if the journal already exists
1146 if self.svfs.exists("journal"):
1146 if self.svfs.exists("journal"):
1147 raise error.RepoError(
1147 raise error.RepoError(
1148 _("abandoned transaction found"),
1148 _("abandoned transaction found"),
1149 hint=_("run 'hg recover' to clean up transaction"))
1149 hint=_("run 'hg recover' to clean up transaction"))
1150
1150
1151 idbase = "%.40f#%f" % (random.random(), time.time())
1151 idbase = "%.40f#%f" % (random.random(), time.time())
1152 ha = hex(hashlib.sha1(idbase).digest())
1152 ha = hex(hashlib.sha1(idbase).digest())
1153 txnid = 'TXN:' + ha
1153 txnid = 'TXN:' + ha
1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1155
1155
1156 self._writejournal(desc)
1156 self._writejournal(desc)
1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1158 if report:
1158 if report:
1159 rp = report
1159 rp = report
1160 else:
1160 else:
1161 rp = self.ui.warn
1161 rp = self.ui.warn
1162 vfsmap = {'plain': self.vfs} # root of .hg/
1162 vfsmap = {'plain': self.vfs} # root of .hg/
1163 # we must avoid cyclic reference between repo and transaction.
1163 # we must avoid cyclic reference between repo and transaction.
1164 reporef = weakref.ref(self)
1164 reporef = weakref.ref(self)
1165 # Code to track tag movement
1165 # Code to track tag movement
1166 #
1166 #
1167 # Since tags are all handled as file content, it is actually quite hard
1167 # Since tags are all handled as file content, it is actually quite hard
1168 # to track these movement from a code perspective. So we fallback to a
1168 # to track these movement from a code perspective. So we fallback to a
1169 # tracking at the repository level. One could envision to track changes
1169 # tracking at the repository level. One could envision to track changes
1170 # to the '.hgtags' file through changegroup apply but that fails to
1170 # to the '.hgtags' file through changegroup apply but that fails to
1171 # cope with case where transaction expose new heads without changegroup
1171 # cope with case where transaction expose new heads without changegroup
1172 # being involved (eg: phase movement).
1172 # being involved (eg: phase movement).
1173 #
1173 #
1174 # For now, We gate the feature behind a flag since this likely comes
1174 # For now, We gate the feature behind a flag since this likely comes
1175 # with performance impacts. The current code run more often than needed
1175 # with performance impacts. The current code run more often than needed
1176 # and do not use caches as much as it could. The current focus is on
1176 # and do not use caches as much as it could. The current focus is on
1177 # the behavior of the feature so we disable it by default. The flag
1177 # the behavior of the feature so we disable it by default. The flag
1178 # will be removed when we are happy with the performance impact.
1178 # will be removed when we are happy with the performance impact.
1179 #
1179 #
1180 # Once this feature is no longer experimental move the following
1180 # Once this feature is no longer experimental move the following
1181 # documentation to the appropriate help section:
1181 # documentation to the appropriate help section:
1182 #
1182 #
1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1184 # tags (new or changed or deleted tags). In addition the details of
1184 # tags (new or changed or deleted tags). In addition the details of
1185 # these changes are made available in a file at:
1185 # these changes are made available in a file at:
1186 # ``REPOROOT/.hg/changes/tags.changes``.
1186 # ``REPOROOT/.hg/changes/tags.changes``.
1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1188 # might exist from a previous transaction even if no tag were touched
1188 # might exist from a previous transaction even if no tag were touched
1189 # in this one. Changes are recorded in a line base format::
1189 # in this one. Changes are recorded in a line base format::
1190 #
1190 #
1191 # <action> <hex-node> <tag-name>\n
1191 # <action> <hex-node> <tag-name>\n
1192 #
1192 #
1193 # Actions are defined as follow:
1193 # Actions are defined as follow:
1194 # "-R": tag is removed,
1194 # "-R": tag is removed,
1195 # "+A": tag is added,
1195 # "+A": tag is added,
1196 # "-M": tag is moved (old value),
1196 # "-M": tag is moved (old value),
1197 # "+M": tag is moved (new value),
1197 # "+M": tag is moved (new value),
1198 tracktags = lambda x: None
1198 tracktags = lambda x: None
1199 # experimental config: experimental.hook-track-tags
1199 # experimental config: experimental.hook-track-tags
1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1201 if desc != 'strip' and shouldtracktags:
1201 if desc != 'strip' and shouldtracktags:
1202 oldheads = self.changelog.headrevs()
1202 oldheads = self.changelog.headrevs()
1203 def tracktags(tr2):
1203 def tracktags(tr2):
1204 repo = reporef()
1204 repo = reporef()
1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1206 newheads = repo.changelog.headrevs()
1206 newheads = repo.changelog.headrevs()
1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1208 # notes: we compare lists here.
1208 # notes: we compare lists here.
1209 # As we do it only once buiding set would not be cheaper
1209 # As we do it only once buiding set would not be cheaper
1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1211 if changes:
1211 if changes:
1212 tr2.hookargs['tag_moved'] = '1'
1212 tr2.hookargs['tag_moved'] = '1'
1213 with repo.vfs('changes/tags.changes', 'w',
1213 with repo.vfs('changes/tags.changes', 'w',
1214 atomictemp=True) as changesfile:
1214 atomictemp=True) as changesfile:
1215 # note: we do not register the file to the transaction
1215 # note: we do not register the file to the transaction
1216 # because we needs it to still exist on the transaction
1216 # because we needs it to still exist on the transaction
1217 # is close (for txnclose hooks)
1217 # is close (for txnclose hooks)
1218 tagsmod.writediff(changesfile, changes)
1218 tagsmod.writediff(changesfile, changes)
1219 def validate(tr2):
1219 def validate(tr2):
1220 """will run pre-closing hooks"""
1220 """will run pre-closing hooks"""
1221 # XXX the transaction API is a bit lacking here so we take a hacky
1221 # XXX the transaction API is a bit lacking here so we take a hacky
1222 # path for now
1222 # path for now
1223 #
1223 #
1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1225 # dict is copied before these run. In addition we needs the data
1225 # dict is copied before these run. In addition we needs the data
1226 # available to in memory hooks too.
1226 # available to in memory hooks too.
1227 #
1227 #
1228 # Moreover, we also need to make sure this runs before txnclose
1228 # Moreover, we also need to make sure this runs before txnclose
1229 # hooks and there is no "pending" mechanism that would execute
1229 # hooks and there is no "pending" mechanism that would execute
1230 # logic only if hooks are about to run.
1230 # logic only if hooks are about to run.
1231 #
1231 #
1232 # Fixing this limitation of the transaction is also needed to track
1232 # Fixing this limitation of the transaction is also needed to track
1233 # other families of changes (bookmarks, phases, obsolescence).
1233 # other families of changes (bookmarks, phases, obsolescence).
1234 #
1234 #
1235 # This will have to be fixed before we remove the experimental
1235 # This will have to be fixed before we remove the experimental
1236 # gating.
1236 # gating.
1237 tracktags(tr2)
1237 tracktags(tr2)
1238 reporef().hook('pretxnclose', throw=True,
1238 reporef().hook('pretxnclose', throw=True,
1239 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1239 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1240 def releasefn(tr, success):
1240 def releasefn(tr, success):
1241 repo = reporef()
1241 repo = reporef()
1242 if success:
1242 if success:
1243 # this should be explicitly invoked here, because
1243 # this should be explicitly invoked here, because
1244 # in-memory changes aren't written out at closing
1244 # in-memory changes aren't written out at closing
1245 # transaction, if tr.addfilegenerator (via
1245 # transaction, if tr.addfilegenerator (via
1246 # dirstate.write or so) isn't invoked while
1246 # dirstate.write or so) isn't invoked while
1247 # transaction running
1247 # transaction running
1248 repo.dirstate.write(None)
1248 repo.dirstate.write(None)
1249 else:
1249 else:
1250 # discard all changes (including ones already written
1250 # discard all changes (including ones already written
1251 # out) in this transaction
1251 # out) in this transaction
1252 repo.dirstate.restorebackup(None, 'journal.dirstate')
1252 repo.dirstate.restorebackup(None, 'journal.dirstate')
1253
1253
1254 repo.invalidate(clearfilecache=True)
1254 repo.invalidate(clearfilecache=True)
1255
1255
1256 tr = transaction.transaction(rp, self.svfs, vfsmap,
1256 tr = transaction.transaction(rp, self.svfs, vfsmap,
1257 "journal",
1257 "journal",
1258 "undo",
1258 "undo",
1259 aftertrans(renames),
1259 aftertrans(renames),
1260 self.store.createmode,
1260 self.store.createmode,
1261 validator=validate,
1261 validator=validate,
1262 releasefn=releasefn,
1262 releasefn=releasefn,
1263 checkambigfiles=_cachedfiles)
1263 checkambigfiles=_cachedfiles)
1264 tr.changes['revs'] = set()
1264 tr.changes['revs'] = set()
1265 tr.changes['obsmarkers'] = set()
1265 tr.changes['obsmarkers'] = set()
1266 tr.changes['phases'] = {}
1266 tr.changes['phases'] = {}
1267 tr.changes['bookmarks'] = {}
1267 tr.changes['bookmarks'] = {}
1268
1268
1269 tr.hookargs['txnid'] = txnid
1269 tr.hookargs['txnid'] = txnid
1270 # note: writing the fncache only during finalize mean that the file is
1270 # note: writing the fncache only during finalize mean that the file is
1271 # outdated when running hooks. As fncache is used for streaming clone,
1271 # outdated when running hooks. As fncache is used for streaming clone,
1272 # this is not expected to break anything that happen during the hooks.
1272 # this is not expected to break anything that happen during the hooks.
1273 tr.addfinalize('flush-fncache', self.store.write)
1273 tr.addfinalize('flush-fncache', self.store.write)
1274 def txnclosehook(tr2):
1274 def txnclosehook(tr2):
1275 """To be run if transaction is successful, will schedule a hook run
1275 """To be run if transaction is successful, will schedule a hook run
1276 """
1276 """
1277 # Don't reference tr2 in hook() so we don't hold a reference.
1277 # Don't reference tr2 in hook() so we don't hold a reference.
1278 # This reduces memory consumption when there are multiple
1278 # This reduces memory consumption when there are multiple
1279 # transactions per lock. This can likely go away if issue5045
1279 # transactions per lock. This can likely go away if issue5045
1280 # fixes the function accumulation.
1280 # fixes the function accumulation.
1281 hookargs = tr2.hookargs
1281 hookargs = tr2.hookargs
1282
1282
1283 def hook():
1283 def hook():
1284 reporef().hook('txnclose', throw=False, txnname=desc,
1284 reporef().hook('txnclose', throw=False, txnname=desc,
1285 **pycompat.strkwargs(hookargs))
1285 **pycompat.strkwargs(hookargs))
1286 reporef()._afterlock(hook)
1286 reporef()._afterlock(hook)
1287 tr.addfinalize('txnclose-hook', txnclosehook)
1287 tr.addfinalize('txnclose-hook', txnclosehook)
1288 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1288 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1289 def txnaborthook(tr2):
1289 def txnaborthook(tr2):
1290 """To be run if transaction is aborted
1290 """To be run if transaction is aborted
1291 """
1291 """
1292 reporef().hook('txnabort', throw=False, txnname=desc,
1292 reporef().hook('txnabort', throw=False, txnname=desc,
1293 **tr2.hookargs)
1293 **tr2.hookargs)
1294 tr.addabort('txnabort-hook', txnaborthook)
1294 tr.addabort('txnabort-hook', txnaborthook)
1295 # avoid eager cache invalidation. in-memory data should be identical
1295 # avoid eager cache invalidation. in-memory data should be identical
1296 # to stored data if transaction has no error.
1296 # to stored data if transaction has no error.
1297 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1297 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1298 self._transref = weakref.ref(tr)
1298 self._transref = weakref.ref(tr)
1299 scmutil.registersummarycallback(self, tr, desc)
1299 scmutil.registersummarycallback(self, tr, desc)
1300 return tr
1300 return tr
1301
1301
1302 def _journalfiles(self):
1302 def _journalfiles(self):
1303 return ((self.svfs, 'journal'),
1303 return ((self.svfs, 'journal'),
1304 (self.vfs, 'journal.dirstate'),
1304 (self.vfs, 'journal.dirstate'),
1305 (self.vfs, 'journal.branch'),
1305 (self.vfs, 'journal.branch'),
1306 (self.vfs, 'journal.desc'),
1306 (self.vfs, 'journal.desc'),
1307 (self.vfs, 'journal.bookmarks'),
1307 (self.vfs, 'journal.bookmarks'),
1308 (self.svfs, 'journal.phaseroots'))
1308 (self.svfs, 'journal.phaseroots'))
1309
1309
1310 def undofiles(self):
1310 def undofiles(self):
1311 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1311 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1312
1312
1313 @unfilteredmethod
1313 @unfilteredmethod
1314 def _writejournal(self, desc):
1314 def _writejournal(self, desc):
1315 self.dirstate.savebackup(None, 'journal.dirstate')
1315 self.dirstate.savebackup(None, 'journal.dirstate')
1316 self.vfs.write("journal.branch",
1316 self.vfs.write("journal.branch",
1317 encoding.fromlocal(self.dirstate.branch()))
1317 encoding.fromlocal(self.dirstate.branch()))
1318 self.vfs.write("journal.desc",
1318 self.vfs.write("journal.desc",
1319 "%d\n%s\n" % (len(self), desc))
1319 "%d\n%s\n" % (len(self), desc))
1320 self.vfs.write("journal.bookmarks",
1320 self.vfs.write("journal.bookmarks",
1321 self.vfs.tryread("bookmarks"))
1321 self.vfs.tryread("bookmarks"))
1322 self.svfs.write("journal.phaseroots",
1322 self.svfs.write("journal.phaseroots",
1323 self.svfs.tryread("phaseroots"))
1323 self.svfs.tryread("phaseroots"))
1324
1324
1325 def recover(self):
1325 def recover(self):
1326 with self.lock():
1326 with self.lock():
1327 if self.svfs.exists("journal"):
1327 if self.svfs.exists("journal"):
1328 self.ui.status(_("rolling back interrupted transaction\n"))
1328 self.ui.status(_("rolling back interrupted transaction\n"))
1329 vfsmap = {'': self.svfs,
1329 vfsmap = {'': self.svfs,
1330 'plain': self.vfs,}
1330 'plain': self.vfs,}
1331 transaction.rollback(self.svfs, vfsmap, "journal",
1331 transaction.rollback(self.svfs, vfsmap, "journal",
1332 self.ui.warn,
1332 self.ui.warn,
1333 checkambigfiles=_cachedfiles)
1333 checkambigfiles=_cachedfiles)
1334 self.invalidate()
1334 self.invalidate()
1335 return True
1335 return True
1336 else:
1336 else:
1337 self.ui.warn(_("no interrupted transaction available\n"))
1337 self.ui.warn(_("no interrupted transaction available\n"))
1338 return False
1338 return False
1339
1339
1340 def rollback(self, dryrun=False, force=False):
1340 def rollback(self, dryrun=False, force=False):
1341 wlock = lock = dsguard = None
1341 wlock = lock = dsguard = None
1342 try:
1342 try:
1343 wlock = self.wlock()
1343 wlock = self.wlock()
1344 lock = self.lock()
1344 lock = self.lock()
1345 if self.svfs.exists("undo"):
1345 if self.svfs.exists("undo"):
1346 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1346 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1347
1347
1348 return self._rollback(dryrun, force, dsguard)
1348 return self._rollback(dryrun, force, dsguard)
1349 else:
1349 else:
1350 self.ui.warn(_("no rollback information available\n"))
1350 self.ui.warn(_("no rollback information available\n"))
1351 return 1
1351 return 1
1352 finally:
1352 finally:
1353 release(dsguard, lock, wlock)
1353 release(dsguard, lock, wlock)
1354
1354
1355 @unfilteredmethod # Until we get smarter cache management
1355 @unfilteredmethod # Until we get smarter cache management
1356 def _rollback(self, dryrun, force, dsguard):
1356 def _rollback(self, dryrun, force, dsguard):
1357 ui = self.ui
1357 ui = self.ui
1358 try:
1358 try:
1359 args = self.vfs.read('undo.desc').splitlines()
1359 args = self.vfs.read('undo.desc').splitlines()
1360 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1360 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1361 if len(args) >= 3:
1361 if len(args) >= 3:
1362 detail = args[2]
1362 detail = args[2]
1363 oldtip = oldlen - 1
1363 oldtip = oldlen - 1
1364
1364
1365 if detail and ui.verbose:
1365 if detail and ui.verbose:
1366 msg = (_('repository tip rolled back to revision %d'
1366 msg = (_('repository tip rolled back to revision %d'
1367 ' (undo %s: %s)\n')
1367 ' (undo %s: %s)\n')
1368 % (oldtip, desc, detail))
1368 % (oldtip, desc, detail))
1369 else:
1369 else:
1370 msg = (_('repository tip rolled back to revision %d'
1370 msg = (_('repository tip rolled back to revision %d'
1371 ' (undo %s)\n')
1371 ' (undo %s)\n')
1372 % (oldtip, desc))
1372 % (oldtip, desc))
1373 except IOError:
1373 except IOError:
1374 msg = _('rolling back unknown transaction\n')
1374 msg = _('rolling back unknown transaction\n')
1375 desc = None
1375 desc = None
1376
1376
1377 if not force and self['.'] != self['tip'] and desc == 'commit':
1377 if not force and self['.'] != self['tip'] and desc == 'commit':
1378 raise error.Abort(
1378 raise error.Abort(
1379 _('rollback of last commit while not checked out '
1379 _('rollback of last commit while not checked out '
1380 'may lose data'), hint=_('use -f to force'))
1380 'may lose data'), hint=_('use -f to force'))
1381
1381
1382 ui.status(msg)
1382 ui.status(msg)
1383 if dryrun:
1383 if dryrun:
1384 return 0
1384 return 0
1385
1385
1386 parents = self.dirstate.parents()
1386 parents = self.dirstate.parents()
1387 self.destroying()
1387 self.destroying()
1388 vfsmap = {'plain': self.vfs, '': self.svfs}
1388 vfsmap = {'plain': self.vfs, '': self.svfs}
1389 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1389 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1390 checkambigfiles=_cachedfiles)
1390 checkambigfiles=_cachedfiles)
1391 if self.vfs.exists('undo.bookmarks'):
1391 if self.vfs.exists('undo.bookmarks'):
1392 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1392 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1393 if self.svfs.exists('undo.phaseroots'):
1393 if self.svfs.exists('undo.phaseroots'):
1394 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1394 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1395 self.invalidate()
1395 self.invalidate()
1396
1396
1397 parentgone = (parents[0] not in self.changelog.nodemap or
1397 parentgone = (parents[0] not in self.changelog.nodemap or
1398 parents[1] not in self.changelog.nodemap)
1398 parents[1] not in self.changelog.nodemap)
1399 if parentgone:
1399 if parentgone:
1400 # prevent dirstateguard from overwriting already restored one
1400 # prevent dirstateguard from overwriting already restored one
1401 dsguard.close()
1401 dsguard.close()
1402
1402
1403 self.dirstate.restorebackup(None, 'undo.dirstate')
1403 self.dirstate.restorebackup(None, 'undo.dirstate')
1404 try:
1404 try:
1405 branch = self.vfs.read('undo.branch')
1405 branch = self.vfs.read('undo.branch')
1406 self.dirstate.setbranch(encoding.tolocal(branch))
1406 self.dirstate.setbranch(encoding.tolocal(branch))
1407 except IOError:
1407 except IOError:
1408 ui.warn(_('named branch could not be reset: '
1408 ui.warn(_('named branch could not be reset: '
1409 'current branch is still \'%s\'\n')
1409 'current branch is still \'%s\'\n')
1410 % self.dirstate.branch())
1410 % self.dirstate.branch())
1411
1411
1412 parents = tuple([p.rev() for p in self[None].parents()])
1412 parents = tuple([p.rev() for p in self[None].parents()])
1413 if len(parents) > 1:
1413 if len(parents) > 1:
1414 ui.status(_('working directory now based on '
1414 ui.status(_('working directory now based on '
1415 'revisions %d and %d\n') % parents)
1415 'revisions %d and %d\n') % parents)
1416 else:
1416 else:
1417 ui.status(_('working directory now based on '
1417 ui.status(_('working directory now based on '
1418 'revision %d\n') % parents)
1418 'revision %d\n') % parents)
1419 mergemod.mergestate.clean(self, self['.'].node())
1419 mergemod.mergestate.clean(self, self['.'].node())
1420
1420
1421 # TODO: if we know which new heads may result from this rollback, pass
1421 # TODO: if we know which new heads may result from this rollback, pass
1422 # them to destroy(), which will prevent the branchhead cache from being
1422 # them to destroy(), which will prevent the branchhead cache from being
1423 # invalidated.
1423 # invalidated.
1424 self.destroyed()
1424 self.destroyed()
1425 return 0
1425 return 0
1426
1426
1427 def _buildcacheupdater(self, newtransaction):
1427 def _buildcacheupdater(self, newtransaction):
1428 """called during transaction to build the callback updating cache
1428 """called during transaction to build the callback updating cache
1429
1429
1430 Lives on the repository to help extension who might want to augment
1430 Lives on the repository to help extension who might want to augment
1431 this logic. For this purpose, the created transaction is passed to the
1431 this logic. For this purpose, the created transaction is passed to the
1432 method.
1432 method.
1433 """
1433 """
1434 # we must avoid cyclic reference between repo and transaction.
1434 # we must avoid cyclic reference between repo and transaction.
1435 reporef = weakref.ref(self)
1435 reporef = weakref.ref(self)
1436 def updater(tr):
1436 def updater(tr):
1437 repo = reporef()
1437 repo = reporef()
1438 repo.updatecaches(tr)
1438 repo.updatecaches(tr)
1439 return updater
1439 return updater
1440
1440
1441 @unfilteredmethod
1441 @unfilteredmethod
1442 def updatecaches(self, tr=None):
1442 def updatecaches(self, tr=None):
1443 """warm appropriate caches
1443 """warm appropriate caches
1444
1444
1445 If this function is called after a transaction closed. The transaction
1445 If this function is called after a transaction closed. The transaction
1446 will be available in the 'tr' argument. This can be used to selectively
1446 will be available in the 'tr' argument. This can be used to selectively
1447 update caches relevant to the changes in that transaction.
1447 update caches relevant to the changes in that transaction.
1448 """
1448 """
1449 if tr is not None and tr.hookargs.get('source') == 'strip':
1449 if tr is not None and tr.hookargs.get('source') == 'strip':
1450 # During strip, many caches are invalid but
1450 # During strip, many caches are invalid but
1451 # later call to `destroyed` will refresh them.
1451 # later call to `destroyed` will refresh them.
1452 return
1452 return
1453
1453
1454 if tr is None or tr.changes['revs']:
1454 if tr is None or tr.changes['revs']:
1455 # updating the unfiltered branchmap should refresh all the others,
1455 # updating the unfiltered branchmap should refresh all the others,
1456 self.ui.debug('updating the branch cache\n')
1456 self.ui.debug('updating the branch cache\n')
1457 branchmap.updatecache(self.filtered('served'))
1457 branchmap.updatecache(self.filtered('served'))
1458
1458
1459 def invalidatecaches(self):
1459 def invalidatecaches(self):
1460
1460
1461 if '_tagscache' in vars(self):
1461 if '_tagscache' in vars(self):
1462 # can't use delattr on proxy
1462 # can't use delattr on proxy
1463 del self.__dict__['_tagscache']
1463 del self.__dict__['_tagscache']
1464
1464
1465 self.unfiltered()._branchcaches.clear()
1465 self.unfiltered()._branchcaches.clear()
1466 self.invalidatevolatilesets()
1466 self.invalidatevolatilesets()
1467 self._sparsesignaturecache.clear()
1467 self._sparsesignaturecache.clear()
1468
1468
1469 def invalidatevolatilesets(self):
1469 def invalidatevolatilesets(self):
1470 self.filteredrevcache.clear()
1470 self.filteredrevcache.clear()
1471 obsolete.clearobscaches(self)
1471 obsolete.clearobscaches(self)
1472
1472
1473 def invalidatedirstate(self):
1473 def invalidatedirstate(self):
1474 '''Invalidates the dirstate, causing the next call to dirstate
1474 '''Invalidates the dirstate, causing the next call to dirstate
1475 to check if it was modified since the last time it was read,
1475 to check if it was modified since the last time it was read,
1476 rereading it if it has.
1476 rereading it if it has.
1477
1477
1478 This is different to dirstate.invalidate() that it doesn't always
1478 This is different to dirstate.invalidate() that it doesn't always
1479 rereads the dirstate. Use dirstate.invalidate() if you want to
1479 rereads the dirstate. Use dirstate.invalidate() if you want to
1480 explicitly read the dirstate again (i.e. restoring it to a previous
1480 explicitly read the dirstate again (i.e. restoring it to a previous
1481 known good state).'''
1481 known good state).'''
1482 if hasunfilteredcache(self, 'dirstate'):
1482 if hasunfilteredcache(self, 'dirstate'):
1483 for k in self.dirstate._filecache:
1483 for k in self.dirstate._filecache:
1484 try:
1484 try:
1485 delattr(self.dirstate, k)
1485 delattr(self.dirstate, k)
1486 except AttributeError:
1486 except AttributeError:
1487 pass
1487 pass
1488 delattr(self.unfiltered(), 'dirstate')
1488 delattr(self.unfiltered(), 'dirstate')
1489
1489
1490 def invalidate(self, clearfilecache=False):
1490 def invalidate(self, clearfilecache=False):
1491 '''Invalidates both store and non-store parts other than dirstate
1491 '''Invalidates both store and non-store parts other than dirstate
1492
1492
1493 If a transaction is running, invalidation of store is omitted,
1493 If a transaction is running, invalidation of store is omitted,
1494 because discarding in-memory changes might cause inconsistency
1494 because discarding in-memory changes might cause inconsistency
1495 (e.g. incomplete fncache causes unintentional failure, but
1495 (e.g. incomplete fncache causes unintentional failure, but
1496 redundant one doesn't).
1496 redundant one doesn't).
1497 '''
1497 '''
1498 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1498 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1499 for k in list(self._filecache.keys()):
1499 for k in list(self._filecache.keys()):
1500 # dirstate is invalidated separately in invalidatedirstate()
1500 # dirstate is invalidated separately in invalidatedirstate()
1501 if k == 'dirstate':
1501 if k == 'dirstate':
1502 continue
1502 continue
1503 if (k == 'changelog' and
1503 if (k == 'changelog' and
1504 self.currenttransaction() and
1504 self.currenttransaction() and
1505 self.changelog._delayed):
1505 self.changelog._delayed):
1506 # The changelog object may store unwritten revisions. We don't
1506 # The changelog object may store unwritten revisions. We don't
1507 # want to lose them.
1507 # want to lose them.
1508 # TODO: Solve the problem instead of working around it.
1508 # TODO: Solve the problem instead of working around it.
1509 continue
1509 continue
1510
1510
1511 if clearfilecache:
1511 if clearfilecache:
1512 del self._filecache[k]
1512 del self._filecache[k]
1513 try:
1513 try:
1514 delattr(unfiltered, k)
1514 delattr(unfiltered, k)
1515 except AttributeError:
1515 except AttributeError:
1516 pass
1516 pass
1517 self.invalidatecaches()
1517 self.invalidatecaches()
1518 if not self.currenttransaction():
1518 if not self.currenttransaction():
1519 # TODO: Changing contents of store outside transaction
1519 # TODO: Changing contents of store outside transaction
1520 # causes inconsistency. We should make in-memory store
1520 # causes inconsistency. We should make in-memory store
1521 # changes detectable, and abort if changed.
1521 # changes detectable, and abort if changed.
1522 self.store.invalidatecaches()
1522 self.store.invalidatecaches()
1523
1523
1524 def invalidateall(self):
1524 def invalidateall(self):
1525 '''Fully invalidates both store and non-store parts, causing the
1525 '''Fully invalidates both store and non-store parts, causing the
1526 subsequent operation to reread any outside changes.'''
1526 subsequent operation to reread any outside changes.'''
1527 # extension should hook this to invalidate its caches
1527 # extension should hook this to invalidate its caches
1528 self.invalidate()
1528 self.invalidate()
1529 self.invalidatedirstate()
1529 self.invalidatedirstate()
1530
1530
1531 @unfilteredmethod
1531 @unfilteredmethod
1532 def _refreshfilecachestats(self, tr):
1532 def _refreshfilecachestats(self, tr):
1533 """Reload stats of cached files so that they are flagged as valid"""
1533 """Reload stats of cached files so that they are flagged as valid"""
1534 for k, ce in self._filecache.items():
1534 for k, ce in self._filecache.items():
1535 if k == 'dirstate' or k not in self.__dict__:
1535 if k == 'dirstate' or k not in self.__dict__:
1536 continue
1536 continue
1537 ce.refresh()
1537 ce.refresh()
1538
1538
1539 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1539 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1540 inheritchecker=None, parentenvvar=None):
1540 inheritchecker=None, parentenvvar=None):
1541 parentlock = None
1541 parentlock = None
1542 # the contents of parentenvvar are used by the underlying lock to
1542 # the contents of parentenvvar are used by the underlying lock to
1543 # determine whether it can be inherited
1543 # determine whether it can be inherited
1544 if parentenvvar is not None:
1544 if parentenvvar is not None:
1545 parentlock = encoding.environ.get(parentenvvar)
1545 parentlock = encoding.environ.get(parentenvvar)
1546 try:
1546 try:
1547 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1547 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1548 acquirefn=acquirefn, desc=desc,
1548 acquirefn=acquirefn, desc=desc,
1549 inheritchecker=inheritchecker,
1549 inheritchecker=inheritchecker,
1550 parentlock=parentlock)
1550 parentlock=parentlock)
1551 except error.LockHeld as inst:
1551 except error.LockHeld as inst:
1552 if not wait:
1552 if not wait:
1553 raise
1553 raise
1554 # show more details for new-style locks
1554 # show more details for new-style locks
1555 if ':' in inst.locker:
1555 if ':' in inst.locker:
1556 host, pid = inst.locker.split(":", 1)
1556 host, pid = inst.locker.split(":", 1)
1557 self.ui.warn(
1557 self.ui.warn(
1558 _("waiting for lock on %s held by process %r "
1558 _("waiting for lock on %s held by process %r "
1559 "on host %r\n") % (desc, pid, host))
1559 "on host %r\n") % (desc, pid, host))
1560 else:
1560 else:
1561 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1561 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1562 (desc, inst.locker))
1562 (desc, inst.locker))
1563 # default to 600 seconds timeout
1563 # default to 600 seconds timeout
1564 l = lockmod.lock(vfs, lockname,
1564 l = lockmod.lock(vfs, lockname,
1565 int(self.ui.config("ui", "timeout")),
1565 int(self.ui.config("ui", "timeout")),
1566 releasefn=releasefn, acquirefn=acquirefn,
1566 releasefn=releasefn, acquirefn=acquirefn,
1567 desc=desc)
1567 desc=desc)
1568 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1568 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1569 return l
1569 return l
1570
1570
1571 def _afterlock(self, callback):
1571 def _afterlock(self, callback):
1572 """add a callback to be run when the repository is fully unlocked
1572 """add a callback to be run when the repository is fully unlocked
1573
1573
1574 The callback will be executed when the outermost lock is released
1574 The callback will be executed when the outermost lock is released
1575 (with wlock being higher level than 'lock')."""
1575 (with wlock being higher level than 'lock')."""
1576 for ref in (self._wlockref, self._lockref):
1576 for ref in (self._wlockref, self._lockref):
1577 l = ref and ref()
1577 l = ref and ref()
1578 if l and l.held:
1578 if l and l.held:
1579 l.postrelease.append(callback)
1579 l.postrelease.append(callback)
1580 break
1580 break
1581 else: # no lock have been found.
1581 else: # no lock have been found.
1582 callback()
1582 callback()
1583
1583
1584 def lock(self, wait=True):
1584 def lock(self, wait=True):
1585 '''Lock the repository store (.hg/store) and return a weak reference
1585 '''Lock the repository store (.hg/store) and return a weak reference
1586 to the lock. Use this before modifying the store (e.g. committing or
1586 to the lock. Use this before modifying the store (e.g. committing or
1587 stripping). If you are opening a transaction, get a lock as well.)
1587 stripping). If you are opening a transaction, get a lock as well.)
1588
1588
1589 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1589 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1590 'wlock' first to avoid a dead-lock hazard.'''
1590 'wlock' first to avoid a dead-lock hazard.'''
1591 l = self._currentlock(self._lockref)
1591 l = self._currentlock(self._lockref)
1592 if l is not None:
1592 if l is not None:
1593 l.lock()
1593 l.lock()
1594 return l
1594 return l
1595
1595
1596 l = self._lock(self.svfs, "lock", wait, None,
1596 l = self._lock(self.svfs, "lock", wait, None,
1597 self.invalidate, _('repository %s') % self.origroot)
1597 self.invalidate, _('repository %s') % self.origroot)
1598 self._lockref = weakref.ref(l)
1598 self._lockref = weakref.ref(l)
1599 return l
1599 return l
1600
1600
1601 def _wlockchecktransaction(self):
1601 def _wlockchecktransaction(self):
1602 if self.currenttransaction() is not None:
1602 if self.currenttransaction() is not None:
1603 raise error.LockInheritanceContractViolation(
1603 raise error.LockInheritanceContractViolation(
1604 'wlock cannot be inherited in the middle of a transaction')
1604 'wlock cannot be inherited in the middle of a transaction')
1605
1605
1606 def wlock(self, wait=True):
1606 def wlock(self, wait=True):
1607 '''Lock the non-store parts of the repository (everything under
1607 '''Lock the non-store parts of the repository (everything under
1608 .hg except .hg/store) and return a weak reference to the lock.
1608 .hg except .hg/store) and return a weak reference to the lock.
1609
1609
1610 Use this before modifying files in .hg.
1610 Use this before modifying files in .hg.
1611
1611
1612 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1612 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1613 'wlock' first to avoid a dead-lock hazard.'''
1613 'wlock' first to avoid a dead-lock hazard.'''
1614 l = self._wlockref and self._wlockref()
1614 l = self._wlockref and self._wlockref()
1615 if l is not None and l.held:
1615 if l is not None and l.held:
1616 l.lock()
1616 l.lock()
1617 return l
1617 return l
1618
1618
1619 # We do not need to check for non-waiting lock acquisition. Such
1619 # We do not need to check for non-waiting lock acquisition. Such
1620 # acquisition would not cause dead-lock as they would just fail.
1620 # acquisition would not cause dead-lock as they would just fail.
1621 if wait and (self.ui.configbool('devel', 'all-warnings')
1621 if wait and (self.ui.configbool('devel', 'all-warnings')
1622 or self.ui.configbool('devel', 'check-locks')):
1622 or self.ui.configbool('devel', 'check-locks')):
1623 if self._currentlock(self._lockref) is not None:
1623 if self._currentlock(self._lockref) is not None:
1624 self.ui.develwarn('"wlock" acquired after "lock"')
1624 self.ui.develwarn('"wlock" acquired after "lock"')
1625
1625
1626 def unlock():
1626 def unlock():
1627 if self.dirstate.pendingparentchange():
1627 if self.dirstate.pendingparentchange():
1628 self.dirstate.invalidate()
1628 self.dirstate.invalidate()
1629 else:
1629 else:
1630 self.dirstate.write(None)
1630 self.dirstate.write(None)
1631
1631
1632 self._filecache['dirstate'].refresh()
1632 self._filecache['dirstate'].refresh()
1633
1633
1634 l = self._lock(self.vfs, "wlock", wait, unlock,
1634 l = self._lock(self.vfs, "wlock", wait, unlock,
1635 self.invalidatedirstate, _('working directory of %s') %
1635 self.invalidatedirstate, _('working directory of %s') %
1636 self.origroot,
1636 self.origroot,
1637 inheritchecker=self._wlockchecktransaction,
1637 inheritchecker=self._wlockchecktransaction,
1638 parentenvvar='HG_WLOCK_LOCKER')
1638 parentenvvar='HG_WLOCK_LOCKER')
1639 self._wlockref = weakref.ref(l)
1639 self._wlockref = weakref.ref(l)
1640 return l
1640 return l
1641
1641
1642 def _currentlock(self, lockref):
1642 def _currentlock(self, lockref):
1643 """Returns the lock if it's held, or None if it's not."""
1643 """Returns the lock if it's held, or None if it's not."""
1644 if lockref is None:
1644 if lockref is None:
1645 return None
1645 return None
1646 l = lockref()
1646 l = lockref()
1647 if l is None or not l.held:
1647 if l is None or not l.held:
1648 return None
1648 return None
1649 return l
1649 return l
1650
1650
1651 def currentwlock(self):
1651 def currentwlock(self):
1652 """Returns the wlock if it's held, or None if it's not."""
1652 """Returns the wlock if it's held, or None if it's not."""
1653 return self._currentlock(self._wlockref)
1653 return self._currentlock(self._wlockref)
1654
1654
1655 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1655 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1656 """
1656 """
1657 commit an individual file as part of a larger transaction
1657 commit an individual file as part of a larger transaction
1658 """
1658 """
1659
1659
1660 fname = fctx.path()
1660 fname = fctx.path()
1661 fparent1 = manifest1.get(fname, nullid)
1661 fparent1 = manifest1.get(fname, nullid)
1662 fparent2 = manifest2.get(fname, nullid)
1662 fparent2 = manifest2.get(fname, nullid)
1663 if isinstance(fctx, context.filectx):
1663 if isinstance(fctx, context.filectx):
1664 node = fctx.filenode()
1664 node = fctx.filenode()
1665 if node in [fparent1, fparent2]:
1665 if node in [fparent1, fparent2]:
1666 self.ui.debug('reusing %s filelog entry\n' % fname)
1666 self.ui.debug('reusing %s filelog entry\n' % fname)
1667 if manifest1.flags(fname) != fctx.flags():
1667 if manifest1.flags(fname) != fctx.flags():
1668 changelist.append(fname)
1668 changelist.append(fname)
1669 return node
1669 return node
1670
1670
1671 flog = self.file(fname)
1671 flog = self.file(fname)
1672 meta = {}
1672 meta = {}
1673 copy = fctx.renamed()
1673 copy = fctx.renamed()
1674 if copy and copy[0] != fname:
1674 if copy and copy[0] != fname:
1675 # Mark the new revision of this file as a copy of another
1675 # Mark the new revision of this file as a copy of another
1676 # file. This copy data will effectively act as a parent
1676 # file. This copy data will effectively act as a parent
1677 # of this new revision. If this is a merge, the first
1677 # of this new revision. If this is a merge, the first
1678 # parent will be the nullid (meaning "look up the copy data")
1678 # parent will be the nullid (meaning "look up the copy data")
1679 # and the second one will be the other parent. For example:
1679 # and the second one will be the other parent. For example:
1680 #
1680 #
1681 # 0 --- 1 --- 3 rev1 changes file foo
1681 # 0 --- 1 --- 3 rev1 changes file foo
1682 # \ / rev2 renames foo to bar and changes it
1682 # \ / rev2 renames foo to bar and changes it
1683 # \- 2 -/ rev3 should have bar with all changes and
1683 # \- 2 -/ rev3 should have bar with all changes and
1684 # should record that bar descends from
1684 # should record that bar descends from
1685 # bar in rev2 and foo in rev1
1685 # bar in rev2 and foo in rev1
1686 #
1686 #
1687 # this allows this merge to succeed:
1687 # this allows this merge to succeed:
1688 #
1688 #
1689 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1689 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1690 # \ / merging rev3 and rev4 should use bar@rev2
1690 # \ / merging rev3 and rev4 should use bar@rev2
1691 # \- 2 --- 4 as the merge base
1691 # \- 2 --- 4 as the merge base
1692 #
1692 #
1693
1693
1694 cfname = copy[0]
1694 cfname = copy[0]
1695 crev = manifest1.get(cfname)
1695 crev = manifest1.get(cfname)
1696 newfparent = fparent2
1696 newfparent = fparent2
1697
1697
1698 if manifest2: # branch merge
1698 if manifest2: # branch merge
1699 if fparent2 == nullid or crev is None: # copied on remote side
1699 if fparent2 == nullid or crev is None: # copied on remote side
1700 if cfname in manifest2:
1700 if cfname in manifest2:
1701 crev = manifest2[cfname]
1701 crev = manifest2[cfname]
1702 newfparent = fparent1
1702 newfparent = fparent1
1703
1703
1704 # Here, we used to search backwards through history to try to find
1704 # Here, we used to search backwards through history to try to find
1705 # where the file copy came from if the source of a copy was not in
1705 # where the file copy came from if the source of a copy was not in
1706 # the parent directory. However, this doesn't actually make sense to
1706 # the parent directory. However, this doesn't actually make sense to
1707 # do (what does a copy from something not in your working copy even
1707 # do (what does a copy from something not in your working copy even
1708 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1708 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1709 # the user that copy information was dropped, so if they didn't
1709 # the user that copy information was dropped, so if they didn't
1710 # expect this outcome it can be fixed, but this is the correct
1710 # expect this outcome it can be fixed, but this is the correct
1711 # behavior in this circumstance.
1711 # behavior in this circumstance.
1712
1712
1713 if crev:
1713 if crev:
1714 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1714 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1715 meta["copy"] = cfname
1715 meta["copy"] = cfname
1716 meta["copyrev"] = hex(crev)
1716 meta["copyrev"] = hex(crev)
1717 fparent1, fparent2 = nullid, newfparent
1717 fparent1, fparent2 = nullid, newfparent
1718 else:
1718 else:
1719 self.ui.warn(_("warning: can't find ancestor for '%s' "
1719 self.ui.warn(_("warning: can't find ancestor for '%s' "
1720 "copied from '%s'!\n") % (fname, cfname))
1720 "copied from '%s'!\n") % (fname, cfname))
1721
1721
1722 elif fparent1 == nullid:
1722 elif fparent1 == nullid:
1723 fparent1, fparent2 = fparent2, nullid
1723 fparent1, fparent2 = fparent2, nullid
1724 elif fparent2 != nullid:
1724 elif fparent2 != nullid:
1725 # is one parent an ancestor of the other?
1725 # is one parent an ancestor of the other?
1726 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1726 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1727 if fparent1 in fparentancestors:
1727 if fparent1 in fparentancestors:
1728 fparent1, fparent2 = fparent2, nullid
1728 fparent1, fparent2 = fparent2, nullid
1729 elif fparent2 in fparentancestors:
1729 elif fparent2 in fparentancestors:
1730 fparent2 = nullid
1730 fparent2 = nullid
1731
1731
1732 # is the file changed?
1732 # is the file changed?
1733 text = fctx.data()
1733 text = fctx.data()
1734 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1734 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1735 changelist.append(fname)
1735 changelist.append(fname)
1736 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1736 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1737 # are just the flags changed during merge?
1737 # are just the flags changed during merge?
1738 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1738 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1739 changelist.append(fname)
1739 changelist.append(fname)
1740
1740
1741 return fparent1
1741 return fparent1
1742
1742
1743 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1743 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1744 """check for commit arguments that aren't committable"""
1744 """check for commit arguments that aren't committable"""
1745 if match.isexact() or match.prefix():
1745 if match.isexact() or match.prefix():
1746 matched = set(status.modified + status.added + status.removed)
1746 matched = set(status.modified + status.added + status.removed)
1747
1747
1748 for f in match.files():
1748 for f in match.files():
1749 f = self.dirstate.normalize(f)
1749 f = self.dirstate.normalize(f)
1750 if f == '.' or f in matched or f in wctx.substate:
1750 if f == '.' or f in matched or f in wctx.substate:
1751 continue
1751 continue
1752 if f in status.deleted:
1752 if f in status.deleted:
1753 fail(f, _('file not found!'))
1753 fail(f, _('file not found!'))
1754 if f in vdirs: # visited directory
1754 if f in vdirs: # visited directory
1755 d = f + '/'
1755 d = f + '/'
1756 for mf in matched:
1756 for mf in matched:
1757 if mf.startswith(d):
1757 if mf.startswith(d):
1758 break
1758 break
1759 else:
1759 else:
1760 fail(f, _("no match under directory!"))
1760 fail(f, _("no match under directory!"))
1761 elif f not in self.dirstate:
1761 elif f not in self.dirstate:
1762 fail(f, _("file not tracked!"))
1762 fail(f, _("file not tracked!"))
1763
1763
1764 @unfilteredmethod
1764 @unfilteredmethod
1765 def commit(self, text="", user=None, date=None, match=None, force=False,
1765 def commit(self, text="", user=None, date=None, match=None, force=False,
1766 editor=False, extra=None):
1766 editor=False, extra=None):
1767 """Add a new revision to current repository.
1767 """Add a new revision to current repository.
1768
1768
1769 Revision information is gathered from the working directory,
1769 Revision information is gathered from the working directory,
1770 match can be used to filter the committed files. If editor is
1770 match can be used to filter the committed files. If editor is
1771 supplied, it is called to get a commit message.
1771 supplied, it is called to get a commit message.
1772 """
1772 """
1773 if extra is None:
1773 if extra is None:
1774 extra = {}
1774 extra = {}
1775
1775
1776 def fail(f, msg):
1776 def fail(f, msg):
1777 raise error.Abort('%s: %s' % (f, msg))
1777 raise error.Abort('%s: %s' % (f, msg))
1778
1778
1779 if not match:
1779 if not match:
1780 match = matchmod.always(self.root, '')
1780 match = matchmod.always(self.root, '')
1781
1781
1782 if not force:
1782 if not force:
1783 vdirs = []
1783 vdirs = []
1784 match.explicitdir = vdirs.append
1784 match.explicitdir = vdirs.append
1785 match.bad = fail
1785 match.bad = fail
1786
1786
1787 wlock = lock = tr = None
1787 wlock = lock = tr = None
1788 try:
1788 try:
1789 wlock = self.wlock()
1789 wlock = self.wlock()
1790 lock = self.lock() # for recent changelog (see issue4368)
1790 lock = self.lock() # for recent changelog (see issue4368)
1791
1791
1792 wctx = self[None]
1792 wctx = self[None]
1793 merge = len(wctx.parents()) > 1
1793 merge = len(wctx.parents()) > 1
1794
1794
1795 if not force and merge and not match.always():
1795 if not force and merge and not match.always():
1796 raise error.Abort(_('cannot partially commit a merge '
1796 raise error.Abort(_('cannot partially commit a merge '
1797 '(do not specify files or patterns)'))
1797 '(do not specify files or patterns)'))
1798
1798
1799 status = self.status(match=match, clean=force)
1799 status = self.status(match=match, clean=force)
1800 if force:
1800 if force:
1801 status.modified.extend(status.clean) # mq may commit clean files
1801 status.modified.extend(status.clean) # mq may commit clean files
1802
1802
1803 # check subrepos
1803 # check subrepos
1804 subs = []
1804 subs = []
1805 commitsubs = set()
1805 commitsubs = set()
1806 newstate = wctx.substate.copy()
1806 newstate = wctx.substate.copy()
1807 # only manage subrepos and .hgsubstate if .hgsub is present
1807 # only manage subrepos and .hgsubstate if .hgsub is present
1808 if '.hgsub' in wctx:
1808 if '.hgsub' in wctx:
1809 # we'll decide whether to track this ourselves, thanks
1809 # we'll decide whether to track this ourselves, thanks
1810 for c in status.modified, status.added, status.removed:
1810 for c in status.modified, status.added, status.removed:
1811 if '.hgsubstate' in c:
1811 if '.hgsubstate' in c:
1812 c.remove('.hgsubstate')
1812 c.remove('.hgsubstate')
1813
1813
1814 # compare current state to last committed state
1814 # compare current state to last committed state
1815 # build new substate based on last committed state
1815 # build new substate based on last committed state
1816 oldstate = wctx.p1().substate
1816 oldstate = wctx.p1().substate
1817 for s in sorted(newstate.keys()):
1817 for s in sorted(newstate.keys()):
1818 if not match(s):
1818 if not match(s):
1819 # ignore working copy, use old state if present
1819 # ignore working copy, use old state if present
1820 if s in oldstate:
1820 if s in oldstate:
1821 newstate[s] = oldstate[s]
1821 newstate[s] = oldstate[s]
1822 continue
1822 continue
1823 if not force:
1823 if not force:
1824 raise error.Abort(
1824 raise error.Abort(
1825 _("commit with new subrepo %s excluded") % s)
1825 _("commit with new subrepo %s excluded") % s)
1826 dirtyreason = wctx.sub(s).dirtyreason(True)
1826 dirtyreason = wctx.sub(s).dirtyreason(True)
1827 if dirtyreason:
1827 if dirtyreason:
1828 if not self.ui.configbool('ui', 'commitsubrepos'):
1828 if not self.ui.configbool('ui', 'commitsubrepos'):
1829 raise error.Abort(dirtyreason,
1829 raise error.Abort(dirtyreason,
1830 hint=_("use --subrepos for recursive commit"))
1830 hint=_("use --subrepos for recursive commit"))
1831 subs.append(s)
1831 subs.append(s)
1832 commitsubs.add(s)
1832 commitsubs.add(s)
1833 else:
1833 else:
1834 bs = wctx.sub(s).basestate()
1834 bs = wctx.sub(s).basestate()
1835 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1835 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1836 if oldstate.get(s, (None, None, None))[1] != bs:
1836 if oldstate.get(s, (None, None, None))[1] != bs:
1837 subs.append(s)
1837 subs.append(s)
1838
1838
1839 # check for removed subrepos
1839 # check for removed subrepos
1840 for p in wctx.parents():
1840 for p in wctx.parents():
1841 r = [s for s in p.substate if s not in newstate]
1841 r = [s for s in p.substate if s not in newstate]
1842 subs += [s for s in r if match(s)]
1842 subs += [s for s in r if match(s)]
1843 if subs:
1843 if subs:
1844 if (not match('.hgsub') and
1844 if (not match('.hgsub') and
1845 '.hgsub' in (wctx.modified() + wctx.added())):
1845 '.hgsub' in (wctx.modified() + wctx.added())):
1846 raise error.Abort(
1846 raise error.Abort(
1847 _("can't commit subrepos without .hgsub"))
1847 _("can't commit subrepos without .hgsub"))
1848 status.modified.insert(0, '.hgsubstate')
1848 status.modified.insert(0, '.hgsubstate')
1849
1849
1850 elif '.hgsub' in status.removed:
1850 elif '.hgsub' in status.removed:
1851 # clean up .hgsubstate when .hgsub is removed
1851 # clean up .hgsubstate when .hgsub is removed
1852 if ('.hgsubstate' in wctx and
1852 if ('.hgsubstate' in wctx and
1853 '.hgsubstate' not in (status.modified + status.added +
1853 '.hgsubstate' not in (status.modified + status.added +
1854 status.removed)):
1854 status.removed)):
1855 status.removed.insert(0, '.hgsubstate')
1855 status.removed.insert(0, '.hgsubstate')
1856
1856
1857 # make sure all explicit patterns are matched
1857 # make sure all explicit patterns are matched
1858 if not force:
1858 if not force:
1859 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1859 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1860
1860
1861 cctx = context.workingcommitctx(self, status,
1861 cctx = context.workingcommitctx(self, status,
1862 text, user, date, extra)
1862 text, user, date, extra)
1863
1863
1864 # internal config: ui.allowemptycommit
1864 # internal config: ui.allowemptycommit
1865 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1865 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1866 or extra.get('close') or merge or cctx.files()
1866 or extra.get('close') or merge or cctx.files()
1867 or self.ui.configbool('ui', 'allowemptycommit'))
1867 or self.ui.configbool('ui', 'allowemptycommit'))
1868 if not allowemptycommit:
1868 if not allowemptycommit:
1869 return None
1869 return None
1870
1870
1871 if merge and cctx.deleted():
1871 if merge and cctx.deleted():
1872 raise error.Abort(_("cannot commit merge with missing files"))
1872 raise error.Abort(_("cannot commit merge with missing files"))
1873
1873
1874 ms = mergemod.mergestate.read(self)
1874 ms = mergemod.mergestate.read(self)
1875 mergeutil.checkunresolved(ms)
1875 mergeutil.checkunresolved(ms)
1876
1876
1877 if editor:
1877 if editor:
1878 cctx._text = editor(self, cctx, subs)
1878 cctx._text = editor(self, cctx, subs)
1879 edited = (text != cctx._text)
1879 edited = (text != cctx._text)
1880
1880
1881 # Save commit message in case this transaction gets rolled back
1881 # Save commit message in case this transaction gets rolled back
1882 # (e.g. by a pretxncommit hook). Leave the content alone on
1882 # (e.g. by a pretxncommit hook). Leave the content alone on
1883 # the assumption that the user will use the same editor again.
1883 # the assumption that the user will use the same editor again.
1884 msgfn = self.savecommitmessage(cctx._text)
1884 msgfn = self.savecommitmessage(cctx._text)
1885
1885
1886 # commit subs and write new state
1886 # commit subs and write new state
1887 if subs:
1887 if subs:
1888 for s in sorted(commitsubs):
1888 for s in sorted(commitsubs):
1889 sub = wctx.sub(s)
1889 sub = wctx.sub(s)
1890 self.ui.status(_('committing subrepository %s\n') %
1890 self.ui.status(_('committing subrepository %s\n') %
1891 subrepo.subrelpath(sub))
1891 subrepo.subrelpath(sub))
1892 sr = sub.commit(cctx._text, user, date)
1892 sr = sub.commit(cctx._text, user, date)
1893 newstate[s] = (newstate[s][0], sr)
1893 newstate[s] = (newstate[s][0], sr)
1894 subrepo.writestate(self, newstate)
1894 subrepo.writestate(self, newstate)
1895
1895
1896 p1, p2 = self.dirstate.parents()
1896 p1, p2 = self.dirstate.parents()
1897 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1897 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1898 try:
1898 try:
1899 self.hook("precommit", throw=True, parent1=hookp1,
1899 self.hook("precommit", throw=True, parent1=hookp1,
1900 parent2=hookp2)
1900 parent2=hookp2)
1901 tr = self.transaction('commit')
1901 tr = self.transaction('commit')
1902 ret = self.commitctx(cctx, True)
1902 ret = self.commitctx(cctx, True)
1903 except: # re-raises
1903 except: # re-raises
1904 if edited:
1904 if edited:
1905 self.ui.write(
1905 self.ui.write(
1906 _('note: commit message saved in %s\n') % msgfn)
1906 _('note: commit message saved in %s\n') % msgfn)
1907 raise
1907 raise
1908 # update bookmarks, dirstate and mergestate
1908 # update bookmarks, dirstate and mergestate
1909 bookmarks.update(self, [p1, p2], ret)
1909 bookmarks.update(self, [p1, p2], ret)
1910 cctx.markcommitted(ret)
1910 cctx.markcommitted(ret)
1911 ms.reset()
1911 ms.reset()
1912 tr.close()
1912 tr.close()
1913
1913
1914 finally:
1914 finally:
1915 lockmod.release(tr, lock, wlock)
1915 lockmod.release(tr, lock, wlock)
1916
1916
1917 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1917 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1918 # hack for command that use a temporary commit (eg: histedit)
1918 # hack for command that use a temporary commit (eg: histedit)
1919 # temporary commit got stripped before hook release
1919 # temporary commit got stripped before hook release
1920 if self.changelog.hasnode(ret):
1920 if self.changelog.hasnode(ret):
1921 self.hook("commit", node=node, parent1=parent1,
1921 self.hook("commit", node=node, parent1=parent1,
1922 parent2=parent2)
1922 parent2=parent2)
1923 self._afterlock(commithook)
1923 self._afterlock(commithook)
1924 return ret
1924 return ret
1925
1925
1926 @unfilteredmethod
1926 @unfilteredmethod
1927 def commitctx(self, ctx, error=False):
1927 def commitctx(self, ctx, error=False):
1928 """Add a new revision to current repository.
1928 """Add a new revision to current repository.
1929 Revision information is passed via the context argument.
1929 Revision information is passed via the context argument.
1930 """
1930 """
1931
1931
1932 tr = None
1932 tr = None
1933 p1, p2 = ctx.p1(), ctx.p2()
1933 p1, p2 = ctx.p1(), ctx.p2()
1934 user = ctx.user()
1934 user = ctx.user()
1935
1935
1936 lock = self.lock()
1936 lock = self.lock()
1937 try:
1937 try:
1938 tr = self.transaction("commit")
1938 tr = self.transaction("commit")
1939 trp = weakref.proxy(tr)
1939 trp = weakref.proxy(tr)
1940
1940
1941 if ctx.manifestnode():
1941 if ctx.manifestnode():
1942 # reuse an existing manifest revision
1942 # reuse an existing manifest revision
1943 mn = ctx.manifestnode()
1943 mn = ctx.manifestnode()
1944 files = ctx.files()
1944 files = ctx.files()
1945 elif ctx.files():
1945 elif ctx.files():
1946 m1ctx = p1.manifestctx()
1946 m1ctx = p1.manifestctx()
1947 m2ctx = p2.manifestctx()
1947 m2ctx = p2.manifestctx()
1948 mctx = m1ctx.copy()
1948 mctx = m1ctx.copy()
1949
1949
1950 m = mctx.read()
1950 m = mctx.read()
1951 m1 = m1ctx.read()
1951 m1 = m1ctx.read()
1952 m2 = m2ctx.read()
1952 m2 = m2ctx.read()
1953
1953
1954 # check in files
1954 # check in files
1955 added = []
1955 added = []
1956 changed = []
1956 changed = []
1957 removed = list(ctx.removed())
1957 removed = list(ctx.removed())
1958 linkrev = len(self)
1958 linkrev = len(self)
1959 self.ui.note(_("committing files:\n"))
1959 self.ui.note(_("committing files:\n"))
1960 for f in sorted(ctx.modified() + ctx.added()):
1960 for f in sorted(ctx.modified() + ctx.added()):
1961 self.ui.note(f + "\n")
1961 self.ui.note(f + "\n")
1962 try:
1962 try:
1963 fctx = ctx[f]
1963 fctx = ctx[f]
1964 if fctx is None:
1964 if fctx is None:
1965 removed.append(f)
1965 removed.append(f)
1966 else:
1966 else:
1967 added.append(f)
1967 added.append(f)
1968 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1968 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1969 trp, changed)
1969 trp, changed)
1970 m.setflag(f, fctx.flags())
1970 m.setflag(f, fctx.flags())
1971 except OSError as inst:
1971 except OSError as inst:
1972 self.ui.warn(_("trouble committing %s!\n") % f)
1972 self.ui.warn(_("trouble committing %s!\n") % f)
1973 raise
1973 raise
1974 except IOError as inst:
1974 except IOError as inst:
1975 errcode = getattr(inst, 'errno', errno.ENOENT)
1975 errcode = getattr(inst, 'errno', errno.ENOENT)
1976 if error or errcode and errcode != errno.ENOENT:
1976 if error or errcode and errcode != errno.ENOENT:
1977 self.ui.warn(_("trouble committing %s!\n") % f)
1977 self.ui.warn(_("trouble committing %s!\n") % f)
1978 raise
1978 raise
1979
1979
1980 # update manifest
1980 # update manifest
1981 self.ui.note(_("committing manifest\n"))
1981 self.ui.note(_("committing manifest\n"))
1982 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1982 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1983 drop = [f for f in removed if f in m]
1983 drop = [f for f in removed if f in m]
1984 for f in drop:
1984 for f in drop:
1985 del m[f]
1985 del m[f]
1986 mn = mctx.write(trp, linkrev,
1986 mn = mctx.write(trp, linkrev,
1987 p1.manifestnode(), p2.manifestnode(),
1987 p1.manifestnode(), p2.manifestnode(),
1988 added, drop)
1988 added, drop)
1989 files = changed + removed
1989 files = changed + removed
1990 else:
1990 else:
1991 mn = p1.manifestnode()
1991 mn = p1.manifestnode()
1992 files = []
1992 files = []
1993
1993
1994 # update changelog
1994 # update changelog
1995 self.ui.note(_("committing changelog\n"))
1995 self.ui.note(_("committing changelog\n"))
1996 self.changelog.delayupdate(tr)
1996 self.changelog.delayupdate(tr)
1997 n = self.changelog.add(mn, files, ctx.description(),
1997 n = self.changelog.add(mn, files, ctx.description(),
1998 trp, p1.node(), p2.node(),
1998 trp, p1.node(), p2.node(),
1999 user, ctx.date(), ctx.extra().copy())
1999 user, ctx.date(), ctx.extra().copy())
2000 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2000 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2001 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2001 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2002 parent2=xp2)
2002 parent2=xp2)
2003 # set the new commit is proper phase
2003 # set the new commit is proper phase
2004 targetphase = subrepo.newcommitphase(self.ui, ctx)
2004 targetphase = subrepo.newcommitphase(self.ui, ctx)
2005 if targetphase:
2005 if targetphase:
2006 # retract boundary do not alter parent changeset.
2006 # retract boundary do not alter parent changeset.
2007 # if a parent have higher the resulting phase will
2007 # if a parent have higher the resulting phase will
2008 # be compliant anyway
2008 # be compliant anyway
2009 #
2009 #
2010 # if minimal phase was 0 we don't need to retract anything
2010 # if minimal phase was 0 we don't need to retract anything
2011 phases.registernew(self, tr, targetphase, [n])
2011 phases.registernew(self, tr, targetphase, [n])
2012 tr.close()
2012 tr.close()
2013 return n
2013 return n
2014 finally:
2014 finally:
2015 if tr:
2015 if tr:
2016 tr.release()
2016 tr.release()
2017 lock.release()
2017 lock.release()
2018
2018
2019 @unfilteredmethod
2019 @unfilteredmethod
2020 def destroying(self):
2020 def destroying(self):
2021 '''Inform the repository that nodes are about to be destroyed.
2021 '''Inform the repository that nodes are about to be destroyed.
2022 Intended for use by strip and rollback, so there's a common
2022 Intended for use by strip and rollback, so there's a common
2023 place for anything that has to be done before destroying history.
2023 place for anything that has to be done before destroying history.
2024
2024
2025 This is mostly useful for saving state that is in memory and waiting
2025 This is mostly useful for saving state that is in memory and waiting
2026 to be flushed when the current lock is released. Because a call to
2026 to be flushed when the current lock is released. Because a call to
2027 destroyed is imminent, the repo will be invalidated causing those
2027 destroyed is imminent, the repo will be invalidated causing those
2028 changes to stay in memory (waiting for the next unlock), or vanish
2028 changes to stay in memory (waiting for the next unlock), or vanish
2029 completely.
2029 completely.
2030 '''
2030 '''
2031 # When using the same lock to commit and strip, the phasecache is left
2031 # When using the same lock to commit and strip, the phasecache is left
2032 # dirty after committing. Then when we strip, the repo is invalidated,
2032 # dirty after committing. Then when we strip, the repo is invalidated,
2033 # causing those changes to disappear.
2033 # causing those changes to disappear.
2034 if '_phasecache' in vars(self):
2034 if '_phasecache' in vars(self):
2035 self._phasecache.write()
2035 self._phasecache.write()
2036
2036
2037 @unfilteredmethod
2037 @unfilteredmethod
2038 def destroyed(self):
2038 def destroyed(self):
2039 '''Inform the repository that nodes have been destroyed.
2039 '''Inform the repository that nodes have been destroyed.
2040 Intended for use by strip and rollback, so there's a common
2040 Intended for use by strip and rollback, so there's a common
2041 place for anything that has to be done after destroying history.
2041 place for anything that has to be done after destroying history.
2042 '''
2042 '''
2043 # When one tries to:
2043 # When one tries to:
2044 # 1) destroy nodes thus calling this method (e.g. strip)
2044 # 1) destroy nodes thus calling this method (e.g. strip)
2045 # 2) use phasecache somewhere (e.g. commit)
2045 # 2) use phasecache somewhere (e.g. commit)
2046 #
2046 #
2047 # then 2) will fail because the phasecache contains nodes that were
2047 # then 2) will fail because the phasecache contains nodes that were
2048 # removed. We can either remove phasecache from the filecache,
2048 # removed. We can either remove phasecache from the filecache,
2049 # causing it to reload next time it is accessed, or simply filter
2049 # causing it to reload next time it is accessed, or simply filter
2050 # the removed nodes now and write the updated cache.
2050 # the removed nodes now and write the updated cache.
2051 self._phasecache.filterunknown(self)
2051 self._phasecache.filterunknown(self)
2052 self._phasecache.write()
2052 self._phasecache.write()
2053
2053
2054 # refresh all repository caches
2054 # refresh all repository caches
2055 self.updatecaches()
2055 self.updatecaches()
2056
2056
2057 # Ensure the persistent tag cache is updated. Doing it now
2057 # Ensure the persistent tag cache is updated. Doing it now
2058 # means that the tag cache only has to worry about destroyed
2058 # means that the tag cache only has to worry about destroyed
2059 # heads immediately after a strip/rollback. That in turn
2059 # heads immediately after a strip/rollback. That in turn
2060 # guarantees that "cachetip == currenttip" (comparing both rev
2060 # guarantees that "cachetip == currenttip" (comparing both rev
2061 # and node) always means no nodes have been added or destroyed.
2061 # and node) always means no nodes have been added or destroyed.
2062
2062
2063 # XXX this is suboptimal when qrefresh'ing: we strip the current
2063 # XXX this is suboptimal when qrefresh'ing: we strip the current
2064 # head, refresh the tag cache, then immediately add a new head.
2064 # head, refresh the tag cache, then immediately add a new head.
2065 # But I think doing it this way is necessary for the "instant
2065 # But I think doing it this way is necessary for the "instant
2066 # tag cache retrieval" case to work.
2066 # tag cache retrieval" case to work.
2067 self.invalidate()
2067 self.invalidate()
2068
2068
2069 def walk(self, match, node=None):
2069 def walk(self, match, node=None):
2070 '''
2070 '''
2071 walk recursively through the directory tree or a given
2071 walk recursively through the directory tree or a given
2072 changeset, finding all files matched by the match
2072 changeset, finding all files matched by the match
2073 function
2073 function
2074 '''
2074 '''
2075 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2075 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2076 return self[node].walk(match)
2076 return self[node].walk(match)
2077
2077
2078 def status(self, node1='.', node2=None, match=None,
2078 def status(self, node1='.', node2=None, match=None,
2079 ignored=False, clean=False, unknown=False,
2079 ignored=False, clean=False, unknown=False,
2080 listsubrepos=False):
2080 listsubrepos=False):
2081 '''a convenience method that calls node1.status(node2)'''
2081 '''a convenience method that calls node1.status(node2)'''
2082 return self[node1].status(node2, match, ignored, clean, unknown,
2082 return self[node1].status(node2, match, ignored, clean, unknown,
2083 listsubrepos)
2083 listsubrepos)
2084
2084
2085 def addpostdsstatus(self, ps):
2085 def addpostdsstatus(self, ps):
2086 """Add a callback to run within the wlock, at the point at which status
2086 """Add a callback to run within the wlock, at the point at which status
2087 fixups happen.
2087 fixups happen.
2088
2088
2089 On status completion, callback(wctx, status) will be called with the
2089 On status completion, callback(wctx, status) will be called with the
2090 wlock held, unless the dirstate has changed from underneath or the wlock
2090 wlock held, unless the dirstate has changed from underneath or the wlock
2091 couldn't be grabbed.
2091 couldn't be grabbed.
2092
2092
2093 Callbacks should not capture and use a cached copy of the dirstate --
2093 Callbacks should not capture and use a cached copy of the dirstate --
2094 it might change in the meanwhile. Instead, they should access the
2094 it might change in the meanwhile. Instead, they should access the
2095 dirstate via wctx.repo().dirstate.
2095 dirstate via wctx.repo().dirstate.
2096
2096
2097 This list is emptied out after each status run -- extensions should
2097 This list is emptied out after each status run -- extensions should
2098 make sure it adds to this list each time dirstate.status is called.
2098 make sure it adds to this list each time dirstate.status is called.
2099 Extensions should also make sure they don't call this for statuses
2099 Extensions should also make sure they don't call this for statuses
2100 that don't involve the dirstate.
2100 that don't involve the dirstate.
2101 """
2101 """
2102
2102
2103 # The list is located here for uniqueness reasons -- it is actually
2103 # The list is located here for uniqueness reasons -- it is actually
2104 # managed by the workingctx, but that isn't unique per-repo.
2104 # managed by the workingctx, but that isn't unique per-repo.
2105 self._postdsstatus.append(ps)
2105 self._postdsstatus.append(ps)
2106
2106
2107 def postdsstatus(self):
2107 def postdsstatus(self):
2108 """Used by workingctx to get the list of post-dirstate-status hooks."""
2108 """Used by workingctx to get the list of post-dirstate-status hooks."""
2109 return self._postdsstatus
2109 return self._postdsstatus
2110
2110
2111 def clearpostdsstatus(self):
2111 def clearpostdsstatus(self):
2112 """Used by workingctx to clear post-dirstate-status hooks."""
2112 """Used by workingctx to clear post-dirstate-status hooks."""
2113 del self._postdsstatus[:]
2113 del self._postdsstatus[:]
2114
2114
2115 def heads(self, start=None):
2115 def heads(self, start=None):
2116 if start is None:
2116 if start is None:
2117 cl = self.changelog
2117 cl = self.changelog
2118 headrevs = reversed(cl.headrevs())
2118 headrevs = reversed(cl.headrevs())
2119 return [cl.node(rev) for rev in headrevs]
2119 return [cl.node(rev) for rev in headrevs]
2120
2120
2121 heads = self.changelog.heads(start)
2121 heads = self.changelog.heads(start)
2122 # sort the output in rev descending order
2122 # sort the output in rev descending order
2123 return sorted(heads, key=self.changelog.rev, reverse=True)
2123 return sorted(heads, key=self.changelog.rev, reverse=True)
2124
2124
2125 def branchheads(self, branch=None, start=None, closed=False):
2125 def branchheads(self, branch=None, start=None, closed=False):
2126 '''return a (possibly filtered) list of heads for the given branch
2126 '''return a (possibly filtered) list of heads for the given branch
2127
2127
2128 Heads are returned in topological order, from newest to oldest.
2128 Heads are returned in topological order, from newest to oldest.
2129 If branch is None, use the dirstate branch.
2129 If branch is None, use the dirstate branch.
2130 If start is not None, return only heads reachable from start.
2130 If start is not None, return only heads reachable from start.
2131 If closed is True, return heads that are marked as closed as well.
2131 If closed is True, return heads that are marked as closed as well.
2132 '''
2132 '''
2133 if branch is None:
2133 if branch is None:
2134 branch = self[None].branch()
2134 branch = self[None].branch()
2135 branches = self.branchmap()
2135 branches = self.branchmap()
2136 if branch not in branches:
2136 if branch not in branches:
2137 return []
2137 return []
2138 # the cache returns heads ordered lowest to highest
2138 # the cache returns heads ordered lowest to highest
2139 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2139 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2140 if start is not None:
2140 if start is not None:
2141 # filter out the heads that cannot be reached from startrev
2141 # filter out the heads that cannot be reached from startrev
2142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2143 bheads = [h for h in bheads if h in fbheads]
2143 bheads = [h for h in bheads if h in fbheads]
2144 return bheads
2144 return bheads
2145
2145
2146 def branches(self, nodes):
2146 def branches(self, nodes):
2147 if not nodes:
2147 if not nodes:
2148 nodes = [self.changelog.tip()]
2148 nodes = [self.changelog.tip()]
2149 b = []
2149 b = []
2150 for n in nodes:
2150 for n in nodes:
2151 t = n
2151 t = n
2152 while True:
2152 while True:
2153 p = self.changelog.parents(n)
2153 p = self.changelog.parents(n)
2154 if p[1] != nullid or p[0] == nullid:
2154 if p[1] != nullid or p[0] == nullid:
2155 b.append((t, n, p[0], p[1]))
2155 b.append((t, n, p[0], p[1]))
2156 break
2156 break
2157 n = p[0]
2157 n = p[0]
2158 return b
2158 return b
2159
2159
2160 def between(self, pairs):
2160 def between(self, pairs):
2161 r = []
2161 r = []
2162
2162
2163 for top, bottom in pairs:
2163 for top, bottom in pairs:
2164 n, l, i = top, [], 0
2164 n, l, i = top, [], 0
2165 f = 1
2165 f = 1
2166
2166
2167 while n != bottom and n != nullid:
2167 while n != bottom and n != nullid:
2168 p = self.changelog.parents(n)[0]
2168 p = self.changelog.parents(n)[0]
2169 if i == f:
2169 if i == f:
2170 l.append(n)
2170 l.append(n)
2171 f = f * 2
2171 f = f * 2
2172 n = p
2172 n = p
2173 i += 1
2173 i += 1
2174
2174
2175 r.append(l)
2175 r.append(l)
2176
2176
2177 return r
2177 return r
2178
2178
2179 def checkpush(self, pushop):
2179 def checkpush(self, pushop):
2180 """Extensions can override this function if additional checks have
2180 """Extensions can override this function if additional checks have
2181 to be performed before pushing, or call it if they override push
2181 to be performed before pushing, or call it if they override push
2182 command.
2182 command.
2183 """
2183 """
2184
2184
2185 @unfilteredpropertycache
2185 @unfilteredpropertycache
2186 def prepushoutgoinghooks(self):
2186 def prepushoutgoinghooks(self):
2187 """Return util.hooks consists of a pushop with repo, remote, outgoing
2187 """Return util.hooks consists of a pushop with repo, remote, outgoing
2188 methods, which are called before pushing changesets.
2188 methods, which are called before pushing changesets.
2189 """
2189 """
2190 return util.hooks()
2190 return util.hooks()
2191
2191
2192 def pushkey(self, namespace, key, old, new):
2192 def pushkey(self, namespace, key, old, new):
2193 try:
2193 try:
2194 tr = self.currenttransaction()
2194 tr = self.currenttransaction()
2195 hookargs = {}
2195 hookargs = {}
2196 if tr is not None:
2196 if tr is not None:
2197 hookargs.update(tr.hookargs)
2197 hookargs.update(tr.hookargs)
2198 hookargs['namespace'] = namespace
2198 hookargs['namespace'] = namespace
2199 hookargs['key'] = key
2199 hookargs['key'] = key
2200 hookargs['old'] = old
2200 hookargs['old'] = old
2201 hookargs['new'] = new
2201 hookargs['new'] = new
2202 self.hook('prepushkey', throw=True, **hookargs)
2202 self.hook('prepushkey', throw=True, **hookargs)
2203 except error.HookAbort as exc:
2203 except error.HookAbort as exc:
2204 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2204 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2205 if exc.hint:
2205 if exc.hint:
2206 self.ui.write_err(_("(%s)\n") % exc.hint)
2206 self.ui.write_err(_("(%s)\n") % exc.hint)
2207 return False
2207 return False
2208 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2208 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2209 ret = pushkey.push(self, namespace, key, old, new)
2209 ret = pushkey.push(self, namespace, key, old, new)
2210 def runhook():
2210 def runhook():
2211 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2211 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2212 ret=ret)
2212 ret=ret)
2213 self._afterlock(runhook)
2213 self._afterlock(runhook)
2214 return ret
2214 return ret
2215
2215
2216 def listkeys(self, namespace):
2216 def listkeys(self, namespace):
2217 self.hook('prelistkeys', throw=True, namespace=namespace)
2217 self.hook('prelistkeys', throw=True, namespace=namespace)
2218 self.ui.debug('listing keys for "%s"\n' % namespace)
2218 self.ui.debug('listing keys for "%s"\n' % namespace)
2219 values = pushkey.list(self, namespace)
2219 values = pushkey.list(self, namespace)
2220 self.hook('listkeys', namespace=namespace, values=values)
2220 self.hook('listkeys', namespace=namespace, values=values)
2221 return values
2221 return values
2222
2222
2223 def debugwireargs(self, one, two, three=None, four=None, five=None):
2223 def debugwireargs(self, one, two, three=None, four=None, five=None):
2224 '''used to test argument passing over the wire'''
2224 '''used to test argument passing over the wire'''
2225 return "%s %s %s %s %s" % (one, two, three, four, five)
2225 return "%s %s %s %s %s" % (one, two, three, four, five)
2226
2226
2227 def savecommitmessage(self, text):
2227 def savecommitmessage(self, text):
2228 fp = self.vfs('last-message.txt', 'wb')
2228 fp = self.vfs('last-message.txt', 'wb')
2229 try:
2229 try:
2230 fp.write(text)
2230 fp.write(text)
2231 finally:
2231 finally:
2232 fp.close()
2232 fp.close()
2233 return self.pathto(fp.name[len(self.root) + 1:])
2233 return self.pathto(fp.name[len(self.root) + 1:])
2234
2234
2235 # used to avoid circular references so destructors work
2235 # used to avoid circular references so destructors work
2236 def aftertrans(files):
2236 def aftertrans(files):
2237 renamefiles = [tuple(t) for t in files]
2237 renamefiles = [tuple(t) for t in files]
2238 def a():
2238 def a():
2239 for vfs, src, dest in renamefiles:
2239 for vfs, src, dest in renamefiles:
2240 # if src and dest refer to a same file, vfs.rename is a no-op,
2240 # if src and dest refer to a same file, vfs.rename is a no-op,
2241 # leaving both src and dest on disk. delete dest to make sure
2241 # leaving both src and dest on disk. delete dest to make sure
2242 # the rename couldn't be such a no-op.
2242 # the rename couldn't be such a no-op.
2243 vfs.tryunlink(dest)
2243 vfs.tryunlink(dest)
2244 try:
2244 try:
2245 vfs.rename(src, dest)
2245 vfs.rename(src, dest)
2246 except OSError: # journal file does not yet exist
2246 except OSError: # journal file does not yet exist
2247 pass
2247 pass
2248 return a
2248 return a
2249
2249
2250 def undoname(fn):
2250 def undoname(fn):
2251 base, name = os.path.split(fn)
2251 base, name = os.path.split(fn)
2252 assert name.startswith('journal')
2252 assert name.startswith('journal')
2253 return os.path.join(base, name.replace('journal', 'undo', 1))
2253 return os.path.join(base, name.replace('journal', 'undo', 1))
2254
2254
2255 def instance(ui, path, create):
2255 def instance(ui, path, create):
2256 return localrepository(ui, util.urllocalpath(path), create)
2256 return localrepository(ui, util.urllocalpath(path), create)
2257
2257
2258 def islocal(path):
2258 def islocal(path):
2259 return True
2259 return True
2260
2260
2261 def newreporequirements(repo):
2261 def newreporequirements(repo):
2262 """Determine the set of requirements for a new local repository.
2262 """Determine the set of requirements for a new local repository.
2263
2263
2264 Extensions can wrap this function to specify custom requirements for
2264 Extensions can wrap this function to specify custom requirements for
2265 new repositories.
2265 new repositories.
2266 """
2266 """
2267 ui = repo.ui
2267 ui = repo.ui
2268 requirements = {'revlogv1'}
2268 requirements = {'revlogv1'}
2269 if ui.configbool('format', 'usestore'):
2269 if ui.configbool('format', 'usestore'):
2270 requirements.add('store')
2270 requirements.add('store')
2271 if ui.configbool('format', 'usefncache'):
2271 if ui.configbool('format', 'usefncache'):
2272 requirements.add('fncache')
2272 requirements.add('fncache')
2273 if ui.configbool('format', 'dotencode'):
2273 if ui.configbool('format', 'dotencode'):
2274 requirements.add('dotencode')
2274 requirements.add('dotencode')
2275
2275
2276 compengine = ui.config('experimental', 'format.compression')
2276 compengine = ui.config('experimental', 'format.compression')
2277 if compengine not in util.compengines:
2277 if compengine not in util.compengines:
2278 raise error.Abort(_('compression engine %s defined by '
2278 raise error.Abort(_('compression engine %s defined by '
2279 'experimental.format.compression not available') %
2279 'experimental.format.compression not available') %
2280 compengine,
2280 compengine,
2281 hint=_('run "hg debuginstall" to list available '
2281 hint=_('run "hg debuginstall" to list available '
2282 'compression engines'))
2282 'compression engines'))
2283
2283
2284 # zlib is the historical default and doesn't need an explicit requirement.
2284 # zlib is the historical default and doesn't need an explicit requirement.
2285 if compengine != 'zlib':
2285 if compengine != 'zlib':
2286 requirements.add('exp-compression-%s' % compengine)
2286 requirements.add('exp-compression-%s' % compengine)
2287
2287
2288 if scmutil.gdinitconfig(ui):
2288 if scmutil.gdinitconfig(ui):
2289 requirements.add('generaldelta')
2289 requirements.add('generaldelta')
2290 if ui.configbool('experimental', 'treemanifest'):
2290 if ui.configbool('experimental', 'treemanifest'):
2291 requirements.add('treemanifest')
2291 requirements.add('treemanifest')
2292 if ui.configbool('experimental', 'manifestv2'):
2292 if ui.configbool('experimental', 'manifestv2'):
2293 requirements.add('manifestv2')
2293 requirements.add('manifestv2')
2294
2294
2295 revlogv2 = ui.config('experimental', 'revlogv2')
2295 revlogv2 = ui.config('experimental', 'revlogv2')
2296 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2296 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2297 requirements.remove('revlogv1')
2297 requirements.remove('revlogv1')
2298 # generaldelta is implied by revlogv2.
2298 # generaldelta is implied by revlogv2.
2299 requirements.discard('generaldelta')
2299 requirements.discard('generaldelta')
2300 requirements.add(REVLOGV2_REQUIREMENT)
2300 requirements.add(REVLOGV2_REQUIREMENT)
2301
2301
2302 return requirements
2302 return requirements
General Comments 0
You need to be logged in to leave comments. Login now