##// END OF EJS Templates
cleanup: say goodbye to manifestv2 format...
Augie Fackler -
r36391:0147a473 default
parent child Browse files
Show More
@@ -1,1311 +1,1308 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=None,
117 default=None,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('auth', 'cookiefile',
150 coreconfigitem('auth', 'cookiefile',
151 default=None,
151 default=None,
152 )
152 )
153 # bookmarks.pushing: internal hack for discovery
153 # bookmarks.pushing: internal hack for discovery
154 coreconfigitem('bookmarks', 'pushing',
154 coreconfigitem('bookmarks', 'pushing',
155 default=list,
155 default=list,
156 )
156 )
157 # bundle.mainreporoot: internal hack for bundlerepo
157 # bundle.mainreporoot: internal hack for bundlerepo
158 coreconfigitem('bundle', 'mainreporoot',
158 coreconfigitem('bundle', 'mainreporoot',
159 default='',
159 default='',
160 )
160 )
161 # bundle.reorder: experimental config
161 # bundle.reorder: experimental config
162 coreconfigitem('bundle', 'reorder',
162 coreconfigitem('bundle', 'reorder',
163 default='auto',
163 default='auto',
164 )
164 )
165 coreconfigitem('censor', 'policy',
165 coreconfigitem('censor', 'policy',
166 default='abort',
166 default='abort',
167 )
167 )
168 coreconfigitem('chgserver', 'idletimeout',
168 coreconfigitem('chgserver', 'idletimeout',
169 default=3600,
169 default=3600,
170 )
170 )
171 coreconfigitem('chgserver', 'skiphash',
171 coreconfigitem('chgserver', 'skiphash',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem('cmdserver', 'log',
174 coreconfigitem('cmdserver', 'log',
175 default=None,
175 default=None,
176 )
176 )
177 coreconfigitem('color', '.*',
177 coreconfigitem('color', '.*',
178 default=None,
178 default=None,
179 generic=True,
179 generic=True,
180 )
180 )
181 coreconfigitem('color', 'mode',
181 coreconfigitem('color', 'mode',
182 default='auto',
182 default='auto',
183 )
183 )
184 coreconfigitem('color', 'pagermode',
184 coreconfigitem('color', 'pagermode',
185 default=dynamicdefault,
185 default=dynamicdefault,
186 )
186 )
187 coreconfigitem('commands', 'show.aliasprefix',
187 coreconfigitem('commands', 'show.aliasprefix',
188 default=list,
188 default=list,
189 )
189 )
190 coreconfigitem('commands', 'status.relative',
190 coreconfigitem('commands', 'status.relative',
191 default=False,
191 default=False,
192 )
192 )
193 coreconfigitem('commands', 'status.skipstates',
193 coreconfigitem('commands', 'status.skipstates',
194 default=[],
194 default=[],
195 )
195 )
196 coreconfigitem('commands', 'status.verbose',
196 coreconfigitem('commands', 'status.verbose',
197 default=False,
197 default=False,
198 )
198 )
199 coreconfigitem('commands', 'update.check',
199 coreconfigitem('commands', 'update.check',
200 default=None,
200 default=None,
201 # Deprecated, remove after 4.4 release
201 # Deprecated, remove after 4.4 release
202 alias=[('experimental', 'updatecheck')]
202 alias=[('experimental', 'updatecheck')]
203 )
203 )
204 coreconfigitem('commands', 'update.requiredest',
204 coreconfigitem('commands', 'update.requiredest',
205 default=False,
205 default=False,
206 )
206 )
207 coreconfigitem('committemplate', '.*',
207 coreconfigitem('committemplate', '.*',
208 default=None,
208 default=None,
209 generic=True,
209 generic=True,
210 )
210 )
211 coreconfigitem('convert', 'cvsps.cache',
211 coreconfigitem('convert', 'cvsps.cache',
212 default=True,
212 default=True,
213 )
213 )
214 coreconfigitem('convert', 'cvsps.fuzz',
214 coreconfigitem('convert', 'cvsps.fuzz',
215 default=60,
215 default=60,
216 )
216 )
217 coreconfigitem('convert', 'cvsps.logencoding',
217 coreconfigitem('convert', 'cvsps.logencoding',
218 default=None,
218 default=None,
219 )
219 )
220 coreconfigitem('convert', 'cvsps.mergefrom',
220 coreconfigitem('convert', 'cvsps.mergefrom',
221 default=None,
221 default=None,
222 )
222 )
223 coreconfigitem('convert', 'cvsps.mergeto',
223 coreconfigitem('convert', 'cvsps.mergeto',
224 default=None,
224 default=None,
225 )
225 )
226 coreconfigitem('convert', 'git.committeractions',
226 coreconfigitem('convert', 'git.committeractions',
227 default=lambda: ['messagedifferent'],
227 default=lambda: ['messagedifferent'],
228 )
228 )
229 coreconfigitem('convert', 'git.extrakeys',
229 coreconfigitem('convert', 'git.extrakeys',
230 default=list,
230 default=list,
231 )
231 )
232 coreconfigitem('convert', 'git.findcopiesharder',
232 coreconfigitem('convert', 'git.findcopiesharder',
233 default=False,
233 default=False,
234 )
234 )
235 coreconfigitem('convert', 'git.remoteprefix',
235 coreconfigitem('convert', 'git.remoteprefix',
236 default='remote',
236 default='remote',
237 )
237 )
238 coreconfigitem('convert', 'git.renamelimit',
238 coreconfigitem('convert', 'git.renamelimit',
239 default=400,
239 default=400,
240 )
240 )
241 coreconfigitem('convert', 'git.saverev',
241 coreconfigitem('convert', 'git.saverev',
242 default=True,
242 default=True,
243 )
243 )
244 coreconfigitem('convert', 'git.similarity',
244 coreconfigitem('convert', 'git.similarity',
245 default=50,
245 default=50,
246 )
246 )
247 coreconfigitem('convert', 'git.skipsubmodules',
247 coreconfigitem('convert', 'git.skipsubmodules',
248 default=False,
248 default=False,
249 )
249 )
250 coreconfigitem('convert', 'hg.clonebranches',
250 coreconfigitem('convert', 'hg.clonebranches',
251 default=False,
251 default=False,
252 )
252 )
253 coreconfigitem('convert', 'hg.ignoreerrors',
253 coreconfigitem('convert', 'hg.ignoreerrors',
254 default=False,
254 default=False,
255 )
255 )
256 coreconfigitem('convert', 'hg.revs',
256 coreconfigitem('convert', 'hg.revs',
257 default=None,
257 default=None,
258 )
258 )
259 coreconfigitem('convert', 'hg.saverev',
259 coreconfigitem('convert', 'hg.saverev',
260 default=False,
260 default=False,
261 )
261 )
262 coreconfigitem('convert', 'hg.sourcename',
262 coreconfigitem('convert', 'hg.sourcename',
263 default=None,
263 default=None,
264 )
264 )
265 coreconfigitem('convert', 'hg.startrev',
265 coreconfigitem('convert', 'hg.startrev',
266 default=None,
266 default=None,
267 )
267 )
268 coreconfigitem('convert', 'hg.tagsbranch',
268 coreconfigitem('convert', 'hg.tagsbranch',
269 default='default',
269 default='default',
270 )
270 )
271 coreconfigitem('convert', 'hg.usebranchnames',
271 coreconfigitem('convert', 'hg.usebranchnames',
272 default=True,
272 default=True,
273 )
273 )
274 coreconfigitem('convert', 'ignoreancestorcheck',
274 coreconfigitem('convert', 'ignoreancestorcheck',
275 default=False,
275 default=False,
276 )
276 )
277 coreconfigitem('convert', 'localtimezone',
277 coreconfigitem('convert', 'localtimezone',
278 default=False,
278 default=False,
279 )
279 )
280 coreconfigitem('convert', 'p4.encoding',
280 coreconfigitem('convert', 'p4.encoding',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem('convert', 'p4.startrev',
283 coreconfigitem('convert', 'p4.startrev',
284 default=0,
284 default=0,
285 )
285 )
286 coreconfigitem('convert', 'skiptags',
286 coreconfigitem('convert', 'skiptags',
287 default=False,
287 default=False,
288 )
288 )
289 coreconfigitem('convert', 'svn.debugsvnlog',
289 coreconfigitem('convert', 'svn.debugsvnlog',
290 default=True,
290 default=True,
291 )
291 )
292 coreconfigitem('convert', 'svn.trunk',
292 coreconfigitem('convert', 'svn.trunk',
293 default=None,
293 default=None,
294 )
294 )
295 coreconfigitem('convert', 'svn.tags',
295 coreconfigitem('convert', 'svn.tags',
296 default=None,
296 default=None,
297 )
297 )
298 coreconfigitem('convert', 'svn.branches',
298 coreconfigitem('convert', 'svn.branches',
299 default=None,
299 default=None,
300 )
300 )
301 coreconfigitem('convert', 'svn.startrev',
301 coreconfigitem('convert', 'svn.startrev',
302 default=0,
302 default=0,
303 )
303 )
304 coreconfigitem('debug', 'dirstate.delaywrite',
304 coreconfigitem('debug', 'dirstate.delaywrite',
305 default=0,
305 default=0,
306 )
306 )
307 coreconfigitem('defaults', '.*',
307 coreconfigitem('defaults', '.*',
308 default=None,
308 default=None,
309 generic=True,
309 generic=True,
310 )
310 )
311 coreconfigitem('devel', 'all-warnings',
311 coreconfigitem('devel', 'all-warnings',
312 default=False,
312 default=False,
313 )
313 )
314 coreconfigitem('devel', 'bundle2.debug',
314 coreconfigitem('devel', 'bundle2.debug',
315 default=False,
315 default=False,
316 )
316 )
317 coreconfigitem('devel', 'cache-vfs',
317 coreconfigitem('devel', 'cache-vfs',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem('devel', 'check-locks',
320 coreconfigitem('devel', 'check-locks',
321 default=False,
321 default=False,
322 )
322 )
323 coreconfigitem('devel', 'check-relroot',
323 coreconfigitem('devel', 'check-relroot',
324 default=False,
324 default=False,
325 )
325 )
326 coreconfigitem('devel', 'default-date',
326 coreconfigitem('devel', 'default-date',
327 default=None,
327 default=None,
328 )
328 )
329 coreconfigitem('devel', 'deprec-warn',
329 coreconfigitem('devel', 'deprec-warn',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('devel', 'disableloaddefaultcerts',
332 coreconfigitem('devel', 'disableloaddefaultcerts',
333 default=False,
333 default=False,
334 )
334 )
335 coreconfigitem('devel', 'warn-empty-changegroup',
335 coreconfigitem('devel', 'warn-empty-changegroup',
336 default=False,
336 default=False,
337 )
337 )
338 coreconfigitem('devel', 'legacy.exchange',
338 coreconfigitem('devel', 'legacy.exchange',
339 default=list,
339 default=list,
340 )
340 )
341 coreconfigitem('devel', 'servercafile',
341 coreconfigitem('devel', 'servercafile',
342 default='',
342 default='',
343 )
343 )
344 coreconfigitem('devel', 'serverexactprotocol',
344 coreconfigitem('devel', 'serverexactprotocol',
345 default='',
345 default='',
346 )
346 )
347 coreconfigitem('devel', 'serverrequirecert',
347 coreconfigitem('devel', 'serverrequirecert',
348 default=False,
348 default=False,
349 )
349 )
350 coreconfigitem('devel', 'strip-obsmarkers',
350 coreconfigitem('devel', 'strip-obsmarkers',
351 default=True,
351 default=True,
352 )
352 )
353 coreconfigitem('devel', 'warn-config',
353 coreconfigitem('devel', 'warn-config',
354 default=None,
354 default=None,
355 )
355 )
356 coreconfigitem('devel', 'warn-config-default',
356 coreconfigitem('devel', 'warn-config-default',
357 default=None,
357 default=None,
358 )
358 )
359 coreconfigitem('devel', 'user.obsmarker',
359 coreconfigitem('devel', 'user.obsmarker',
360 default=None,
360 default=None,
361 )
361 )
362 coreconfigitem('devel', 'warn-config-unknown',
362 coreconfigitem('devel', 'warn-config-unknown',
363 default=None,
363 default=None,
364 )
364 )
365 coreconfigitem('devel', 'debug.peer-request',
365 coreconfigitem('devel', 'debug.peer-request',
366 default=False,
366 default=False,
367 )
367 )
368 coreconfigitem('diff', 'nodates',
368 coreconfigitem('diff', 'nodates',
369 default=False,
369 default=False,
370 )
370 )
371 coreconfigitem('diff', 'showfunc',
371 coreconfigitem('diff', 'showfunc',
372 default=False,
372 default=False,
373 )
373 )
374 coreconfigitem('diff', 'unified',
374 coreconfigitem('diff', 'unified',
375 default=None,
375 default=None,
376 )
376 )
377 coreconfigitem('diff', 'git',
377 coreconfigitem('diff', 'git',
378 default=False,
378 default=False,
379 )
379 )
380 coreconfigitem('diff', 'ignorews',
380 coreconfigitem('diff', 'ignorews',
381 default=False,
381 default=False,
382 )
382 )
383 coreconfigitem('diff', 'ignorewsamount',
383 coreconfigitem('diff', 'ignorewsamount',
384 default=False,
384 default=False,
385 )
385 )
386 coreconfigitem('diff', 'ignoreblanklines',
386 coreconfigitem('diff', 'ignoreblanklines',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem('diff', 'ignorewseol',
389 coreconfigitem('diff', 'ignorewseol',
390 default=False,
390 default=False,
391 )
391 )
392 coreconfigitem('diff', 'nobinary',
392 coreconfigitem('diff', 'nobinary',
393 default=False,
393 default=False,
394 )
394 )
395 coreconfigitem('diff', 'noprefix',
395 coreconfigitem('diff', 'noprefix',
396 default=False,
396 default=False,
397 )
397 )
398 coreconfigitem('email', 'bcc',
398 coreconfigitem('email', 'bcc',
399 default=None,
399 default=None,
400 )
400 )
401 coreconfigitem('email', 'cc',
401 coreconfigitem('email', 'cc',
402 default=None,
402 default=None,
403 )
403 )
404 coreconfigitem('email', 'charsets',
404 coreconfigitem('email', 'charsets',
405 default=list,
405 default=list,
406 )
406 )
407 coreconfigitem('email', 'from',
407 coreconfigitem('email', 'from',
408 default=None,
408 default=None,
409 )
409 )
410 coreconfigitem('email', 'method',
410 coreconfigitem('email', 'method',
411 default='smtp',
411 default='smtp',
412 )
412 )
413 coreconfigitem('email', 'reply-to',
413 coreconfigitem('email', 'reply-to',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('email', 'to',
416 coreconfigitem('email', 'to',
417 default=None,
417 default=None,
418 )
418 )
419 coreconfigitem('experimental', 'archivemetatemplate',
419 coreconfigitem('experimental', 'archivemetatemplate',
420 default=dynamicdefault,
420 default=dynamicdefault,
421 )
421 )
422 coreconfigitem('experimental', 'bundle-phases',
422 coreconfigitem('experimental', 'bundle-phases',
423 default=False,
423 default=False,
424 )
424 )
425 coreconfigitem('experimental', 'bundle2-advertise',
425 coreconfigitem('experimental', 'bundle2-advertise',
426 default=True,
426 default=True,
427 )
427 )
428 coreconfigitem('experimental', 'bundle2-output-capture',
428 coreconfigitem('experimental', 'bundle2-output-capture',
429 default=False,
429 default=False,
430 )
430 )
431 coreconfigitem('experimental', 'bundle2.pushback',
431 coreconfigitem('experimental', 'bundle2.pushback',
432 default=False,
432 default=False,
433 )
433 )
434 coreconfigitem('experimental', 'bundle2.stream',
434 coreconfigitem('experimental', 'bundle2.stream',
435 default=False,
435 default=False,
436 )
436 )
437 coreconfigitem('experimental', 'bundle2lazylocking',
437 coreconfigitem('experimental', 'bundle2lazylocking',
438 default=False,
438 default=False,
439 )
439 )
440 coreconfigitem('experimental', 'bundlecomplevel',
440 coreconfigitem('experimental', 'bundlecomplevel',
441 default=None,
441 default=None,
442 )
442 )
443 coreconfigitem('experimental', 'changegroup3',
443 coreconfigitem('experimental', 'changegroup3',
444 default=False,
444 default=False,
445 )
445 )
446 coreconfigitem('experimental', 'clientcompressionengines',
446 coreconfigitem('experimental', 'clientcompressionengines',
447 default=list,
447 default=list,
448 )
448 )
449 coreconfigitem('experimental', 'copytrace',
449 coreconfigitem('experimental', 'copytrace',
450 default='on',
450 default='on',
451 )
451 )
452 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
452 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
453 default=100,
453 default=100,
454 )
454 )
455 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
455 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
456 default=100,
456 default=100,
457 )
457 )
458 coreconfigitem('experimental', 'crecordtest',
458 coreconfigitem('experimental', 'crecordtest',
459 default=None,
459 default=None,
460 )
460 )
461 coreconfigitem('experimental', 'directaccess',
461 coreconfigitem('experimental', 'directaccess',
462 default=False,
462 default=False,
463 )
463 )
464 coreconfigitem('experimental', 'directaccess.revnums',
464 coreconfigitem('experimental', 'directaccess.revnums',
465 default=False,
465 default=False,
466 )
466 )
467 coreconfigitem('experimental', 'editortmpinhg',
467 coreconfigitem('experimental', 'editortmpinhg',
468 default=False,
468 default=False,
469 )
469 )
470 coreconfigitem('experimental', 'evolution',
470 coreconfigitem('experimental', 'evolution',
471 default=list,
471 default=list,
472 )
472 )
473 coreconfigitem('experimental', 'evolution.allowdivergence',
473 coreconfigitem('experimental', 'evolution.allowdivergence',
474 default=False,
474 default=False,
475 alias=[('experimental', 'allowdivergence')]
475 alias=[('experimental', 'allowdivergence')]
476 )
476 )
477 coreconfigitem('experimental', 'evolution.allowunstable',
477 coreconfigitem('experimental', 'evolution.allowunstable',
478 default=None,
478 default=None,
479 )
479 )
480 coreconfigitem('experimental', 'evolution.createmarkers',
480 coreconfigitem('experimental', 'evolution.createmarkers',
481 default=None,
481 default=None,
482 )
482 )
483 coreconfigitem('experimental', 'evolution.effect-flags',
483 coreconfigitem('experimental', 'evolution.effect-flags',
484 default=True,
484 default=True,
485 alias=[('experimental', 'effect-flags')]
485 alias=[('experimental', 'effect-flags')]
486 )
486 )
487 coreconfigitem('experimental', 'evolution.exchange',
487 coreconfigitem('experimental', 'evolution.exchange',
488 default=None,
488 default=None,
489 )
489 )
490 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
490 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
491 default=False,
491 default=False,
492 )
492 )
493 coreconfigitem('experimental', 'evolution.report-instabilities',
493 coreconfigitem('experimental', 'evolution.report-instabilities',
494 default=True,
494 default=True,
495 )
495 )
496 coreconfigitem('experimental', 'evolution.track-operation',
496 coreconfigitem('experimental', 'evolution.track-operation',
497 default=True,
497 default=True,
498 )
498 )
499 coreconfigitem('experimental', 'worddiff',
499 coreconfigitem('experimental', 'worddiff',
500 default=False,
500 default=False,
501 )
501 )
502 coreconfigitem('experimental', 'maxdeltachainspan',
502 coreconfigitem('experimental', 'maxdeltachainspan',
503 default=-1,
503 default=-1,
504 )
504 )
505 coreconfigitem('experimental', 'mmapindexthreshold',
505 coreconfigitem('experimental', 'mmapindexthreshold',
506 default=None,
506 default=None,
507 )
507 )
508 coreconfigitem('experimental', 'nonnormalparanoidcheck',
508 coreconfigitem('experimental', 'nonnormalparanoidcheck',
509 default=False,
509 default=False,
510 )
510 )
511 coreconfigitem('experimental', 'exportableenviron',
511 coreconfigitem('experimental', 'exportableenviron',
512 default=list,
512 default=list,
513 )
513 )
514 coreconfigitem('experimental', 'extendedheader.index',
514 coreconfigitem('experimental', 'extendedheader.index',
515 default=None,
515 default=None,
516 )
516 )
517 coreconfigitem('experimental', 'extendedheader.similarity',
517 coreconfigitem('experimental', 'extendedheader.similarity',
518 default=False,
518 default=False,
519 )
519 )
520 coreconfigitem('experimental', 'format.compression',
520 coreconfigitem('experimental', 'format.compression',
521 default='zlib',
521 default='zlib',
522 )
522 )
523 coreconfigitem('experimental', 'graphshorten',
523 coreconfigitem('experimental', 'graphshorten',
524 default=False,
524 default=False,
525 )
525 )
526 coreconfigitem('experimental', 'graphstyle.parent',
526 coreconfigitem('experimental', 'graphstyle.parent',
527 default=dynamicdefault,
527 default=dynamicdefault,
528 )
528 )
529 coreconfigitem('experimental', 'graphstyle.missing',
529 coreconfigitem('experimental', 'graphstyle.missing',
530 default=dynamicdefault,
530 default=dynamicdefault,
531 )
531 )
532 coreconfigitem('experimental', 'graphstyle.grandparent',
532 coreconfigitem('experimental', 'graphstyle.grandparent',
533 default=dynamicdefault,
533 default=dynamicdefault,
534 )
534 )
535 coreconfigitem('experimental', 'hook-track-tags',
535 coreconfigitem('experimental', 'hook-track-tags',
536 default=False,
536 default=False,
537 )
537 )
538 coreconfigitem('experimental', 'httppostargs',
538 coreconfigitem('experimental', 'httppostargs',
539 default=False,
539 default=False,
540 )
540 )
541 coreconfigitem('experimental', 'manifestv2',
542 default=False,
543 )
544 coreconfigitem('experimental', 'mergedriver',
541 coreconfigitem('experimental', 'mergedriver',
545 default=None,
542 default=None,
546 )
543 )
547 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
544 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
548 default=False,
545 default=False,
549 )
546 )
550 coreconfigitem('experimental', 'remotenames',
547 coreconfigitem('experimental', 'remotenames',
551 default=False,
548 default=False,
552 )
549 )
553 coreconfigitem('experimental', 'revlogv2',
550 coreconfigitem('experimental', 'revlogv2',
554 default=None,
551 default=None,
555 )
552 )
556 coreconfigitem('experimental', 'single-head-per-branch',
553 coreconfigitem('experimental', 'single-head-per-branch',
557 default=False,
554 default=False,
558 )
555 )
559 coreconfigitem('experimental', 'sshserver.support-v2',
556 coreconfigitem('experimental', 'sshserver.support-v2',
560 default=False,
557 default=False,
561 )
558 )
562 coreconfigitem('experimental', 'spacemovesdown',
559 coreconfigitem('experimental', 'spacemovesdown',
563 default=False,
560 default=False,
564 )
561 )
565 coreconfigitem('experimental', 'sparse-read',
562 coreconfigitem('experimental', 'sparse-read',
566 default=False,
563 default=False,
567 )
564 )
568 coreconfigitem('experimental', 'sparse-read.density-threshold',
565 coreconfigitem('experimental', 'sparse-read.density-threshold',
569 default=0.25,
566 default=0.25,
570 )
567 )
571 coreconfigitem('experimental', 'sparse-read.min-gap-size',
568 coreconfigitem('experimental', 'sparse-read.min-gap-size',
572 default='256K',
569 default='256K',
573 )
570 )
574 coreconfigitem('experimental', 'treemanifest',
571 coreconfigitem('experimental', 'treemanifest',
575 default=False,
572 default=False,
576 )
573 )
577 coreconfigitem('experimental', 'update.atomic-file',
574 coreconfigitem('experimental', 'update.atomic-file',
578 default=False,
575 default=False,
579 )
576 )
580 coreconfigitem('experimental', 'sshpeer.advertise-v2',
577 coreconfigitem('experimental', 'sshpeer.advertise-v2',
581 default=False,
578 default=False,
582 )
579 )
583 coreconfigitem('extensions', '.*',
580 coreconfigitem('extensions', '.*',
584 default=None,
581 default=None,
585 generic=True,
582 generic=True,
586 )
583 )
587 coreconfigitem('extdata', '.*',
584 coreconfigitem('extdata', '.*',
588 default=None,
585 default=None,
589 generic=True,
586 generic=True,
590 )
587 )
591 coreconfigitem('format', 'aggressivemergedeltas',
588 coreconfigitem('format', 'aggressivemergedeltas',
592 default=False,
589 default=False,
593 )
590 )
594 coreconfigitem('format', 'chunkcachesize',
591 coreconfigitem('format', 'chunkcachesize',
595 default=None,
592 default=None,
596 )
593 )
597 coreconfigitem('format', 'dotencode',
594 coreconfigitem('format', 'dotencode',
598 default=True,
595 default=True,
599 )
596 )
600 coreconfigitem('format', 'generaldelta',
597 coreconfigitem('format', 'generaldelta',
601 default=False,
598 default=False,
602 )
599 )
603 coreconfigitem('format', 'manifestcachesize',
600 coreconfigitem('format', 'manifestcachesize',
604 default=None,
601 default=None,
605 )
602 )
606 coreconfigitem('format', 'maxchainlen',
603 coreconfigitem('format', 'maxchainlen',
607 default=None,
604 default=None,
608 )
605 )
609 coreconfigitem('format', 'obsstore-version',
606 coreconfigitem('format', 'obsstore-version',
610 default=None,
607 default=None,
611 )
608 )
612 coreconfigitem('format', 'usefncache',
609 coreconfigitem('format', 'usefncache',
613 default=True,
610 default=True,
614 )
611 )
615 coreconfigitem('format', 'usegeneraldelta',
612 coreconfigitem('format', 'usegeneraldelta',
616 default=True,
613 default=True,
617 )
614 )
618 coreconfigitem('format', 'usestore',
615 coreconfigitem('format', 'usestore',
619 default=True,
616 default=True,
620 )
617 )
621 coreconfigitem('fsmonitor', 'warn_when_unused',
618 coreconfigitem('fsmonitor', 'warn_when_unused',
622 default=True,
619 default=True,
623 )
620 )
624 coreconfigitem('fsmonitor', 'warn_update_file_count',
621 coreconfigitem('fsmonitor', 'warn_update_file_count',
625 default=50000,
622 default=50000,
626 )
623 )
627 coreconfigitem('hooks', '.*',
624 coreconfigitem('hooks', '.*',
628 default=dynamicdefault,
625 default=dynamicdefault,
629 generic=True,
626 generic=True,
630 )
627 )
631 coreconfigitem('hgweb-paths', '.*',
628 coreconfigitem('hgweb-paths', '.*',
632 default=list,
629 default=list,
633 generic=True,
630 generic=True,
634 )
631 )
635 coreconfigitem('hostfingerprints', '.*',
632 coreconfigitem('hostfingerprints', '.*',
636 default=list,
633 default=list,
637 generic=True,
634 generic=True,
638 )
635 )
639 coreconfigitem('hostsecurity', 'ciphers',
636 coreconfigitem('hostsecurity', 'ciphers',
640 default=None,
637 default=None,
641 )
638 )
642 coreconfigitem('hostsecurity', 'disabletls10warning',
639 coreconfigitem('hostsecurity', 'disabletls10warning',
643 default=False,
640 default=False,
644 )
641 )
645 coreconfigitem('hostsecurity', 'minimumprotocol',
642 coreconfigitem('hostsecurity', 'minimumprotocol',
646 default=dynamicdefault,
643 default=dynamicdefault,
647 )
644 )
648 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
645 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
649 default=dynamicdefault,
646 default=dynamicdefault,
650 generic=True,
647 generic=True,
651 )
648 )
652 coreconfigitem('hostsecurity', '.*:ciphers$',
649 coreconfigitem('hostsecurity', '.*:ciphers$',
653 default=dynamicdefault,
650 default=dynamicdefault,
654 generic=True,
651 generic=True,
655 )
652 )
656 coreconfigitem('hostsecurity', '.*:fingerprints$',
653 coreconfigitem('hostsecurity', '.*:fingerprints$',
657 default=list,
654 default=list,
658 generic=True,
655 generic=True,
659 )
656 )
660 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
657 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
661 default=None,
658 default=None,
662 generic=True,
659 generic=True,
663 )
660 )
664
661
665 coreconfigitem('http_proxy', 'always',
662 coreconfigitem('http_proxy', 'always',
666 default=False,
663 default=False,
667 )
664 )
668 coreconfigitem('http_proxy', 'host',
665 coreconfigitem('http_proxy', 'host',
669 default=None,
666 default=None,
670 )
667 )
671 coreconfigitem('http_proxy', 'no',
668 coreconfigitem('http_proxy', 'no',
672 default=list,
669 default=list,
673 )
670 )
674 coreconfigitem('http_proxy', 'passwd',
671 coreconfigitem('http_proxy', 'passwd',
675 default=None,
672 default=None,
676 )
673 )
677 coreconfigitem('http_proxy', 'user',
674 coreconfigitem('http_proxy', 'user',
678 default=None,
675 default=None,
679 )
676 )
680 coreconfigitem('logtoprocess', 'commandexception',
677 coreconfigitem('logtoprocess', 'commandexception',
681 default=None,
678 default=None,
682 )
679 )
683 coreconfigitem('logtoprocess', 'commandfinish',
680 coreconfigitem('logtoprocess', 'commandfinish',
684 default=None,
681 default=None,
685 )
682 )
686 coreconfigitem('logtoprocess', 'command',
683 coreconfigitem('logtoprocess', 'command',
687 default=None,
684 default=None,
688 )
685 )
689 coreconfigitem('logtoprocess', 'develwarn',
686 coreconfigitem('logtoprocess', 'develwarn',
690 default=None,
687 default=None,
691 )
688 )
692 coreconfigitem('logtoprocess', 'uiblocked',
689 coreconfigitem('logtoprocess', 'uiblocked',
693 default=None,
690 default=None,
694 )
691 )
695 coreconfigitem('merge', 'checkunknown',
692 coreconfigitem('merge', 'checkunknown',
696 default='abort',
693 default='abort',
697 )
694 )
698 coreconfigitem('merge', 'checkignored',
695 coreconfigitem('merge', 'checkignored',
699 default='abort',
696 default='abort',
700 )
697 )
701 coreconfigitem('experimental', 'merge.checkpathconflicts',
698 coreconfigitem('experimental', 'merge.checkpathconflicts',
702 default=False,
699 default=False,
703 )
700 )
704 coreconfigitem('merge', 'followcopies',
701 coreconfigitem('merge', 'followcopies',
705 default=True,
702 default=True,
706 )
703 )
707 coreconfigitem('merge', 'on-failure',
704 coreconfigitem('merge', 'on-failure',
708 default='continue',
705 default='continue',
709 )
706 )
710 coreconfigitem('merge', 'preferancestor',
707 coreconfigitem('merge', 'preferancestor',
711 default=lambda: ['*'],
708 default=lambda: ['*'],
712 )
709 )
713 coreconfigitem('merge-tools', '.*',
710 coreconfigitem('merge-tools', '.*',
714 default=None,
711 default=None,
715 generic=True,
712 generic=True,
716 )
713 )
717 coreconfigitem('merge-tools', br'.*\.args$',
714 coreconfigitem('merge-tools', br'.*\.args$',
718 default="$local $base $other",
715 default="$local $base $other",
719 generic=True,
716 generic=True,
720 priority=-1,
717 priority=-1,
721 )
718 )
722 coreconfigitem('merge-tools', br'.*\.binary$',
719 coreconfigitem('merge-tools', br'.*\.binary$',
723 default=False,
720 default=False,
724 generic=True,
721 generic=True,
725 priority=-1,
722 priority=-1,
726 )
723 )
727 coreconfigitem('merge-tools', br'.*\.check$',
724 coreconfigitem('merge-tools', br'.*\.check$',
728 default=list,
725 default=list,
729 generic=True,
726 generic=True,
730 priority=-1,
727 priority=-1,
731 )
728 )
732 coreconfigitem('merge-tools', br'.*\.checkchanged$',
729 coreconfigitem('merge-tools', br'.*\.checkchanged$',
733 default=False,
730 default=False,
734 generic=True,
731 generic=True,
735 priority=-1,
732 priority=-1,
736 )
733 )
737 coreconfigitem('merge-tools', br'.*\.executable$',
734 coreconfigitem('merge-tools', br'.*\.executable$',
738 default=dynamicdefault,
735 default=dynamicdefault,
739 generic=True,
736 generic=True,
740 priority=-1,
737 priority=-1,
741 )
738 )
742 coreconfigitem('merge-tools', br'.*\.fixeol$',
739 coreconfigitem('merge-tools', br'.*\.fixeol$',
743 default=False,
740 default=False,
744 generic=True,
741 generic=True,
745 priority=-1,
742 priority=-1,
746 )
743 )
747 coreconfigitem('merge-tools', br'.*\.gui$',
744 coreconfigitem('merge-tools', br'.*\.gui$',
748 default=False,
745 default=False,
749 generic=True,
746 generic=True,
750 priority=-1,
747 priority=-1,
751 )
748 )
752 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
749 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
753 default='basic',
750 default='basic',
754 generic=True,
751 generic=True,
755 priority=-1,
752 priority=-1,
756 )
753 )
757 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
754 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
758 default=dynamicdefault, # take from ui.mergemarkertemplate
755 default=dynamicdefault, # take from ui.mergemarkertemplate
759 generic=True,
756 generic=True,
760 priority=-1,
757 priority=-1,
761 )
758 )
762 coreconfigitem('merge-tools', br'.*\.priority$',
759 coreconfigitem('merge-tools', br'.*\.priority$',
763 default=0,
760 default=0,
764 generic=True,
761 generic=True,
765 priority=-1,
762 priority=-1,
766 )
763 )
767 coreconfigitem('merge-tools', br'.*\.premerge$',
764 coreconfigitem('merge-tools', br'.*\.premerge$',
768 default=dynamicdefault,
765 default=dynamicdefault,
769 generic=True,
766 generic=True,
770 priority=-1,
767 priority=-1,
771 )
768 )
772 coreconfigitem('merge-tools', br'.*\.symlink$',
769 coreconfigitem('merge-tools', br'.*\.symlink$',
773 default=False,
770 default=False,
774 generic=True,
771 generic=True,
775 priority=-1,
772 priority=-1,
776 )
773 )
777 coreconfigitem('pager', 'attend-.*',
774 coreconfigitem('pager', 'attend-.*',
778 default=dynamicdefault,
775 default=dynamicdefault,
779 generic=True,
776 generic=True,
780 )
777 )
781 coreconfigitem('pager', 'ignore',
778 coreconfigitem('pager', 'ignore',
782 default=list,
779 default=list,
783 )
780 )
784 coreconfigitem('pager', 'pager',
781 coreconfigitem('pager', 'pager',
785 default=dynamicdefault,
782 default=dynamicdefault,
786 )
783 )
787 coreconfigitem('patch', 'eol',
784 coreconfigitem('patch', 'eol',
788 default='strict',
785 default='strict',
789 )
786 )
790 coreconfigitem('patch', 'fuzz',
787 coreconfigitem('patch', 'fuzz',
791 default=2,
788 default=2,
792 )
789 )
793 coreconfigitem('paths', 'default',
790 coreconfigitem('paths', 'default',
794 default=None,
791 default=None,
795 )
792 )
796 coreconfigitem('paths', 'default-push',
793 coreconfigitem('paths', 'default-push',
797 default=None,
794 default=None,
798 )
795 )
799 coreconfigitem('paths', '.*',
796 coreconfigitem('paths', '.*',
800 default=None,
797 default=None,
801 generic=True,
798 generic=True,
802 )
799 )
803 coreconfigitem('phases', 'checksubrepos',
800 coreconfigitem('phases', 'checksubrepos',
804 default='follow',
801 default='follow',
805 )
802 )
806 coreconfigitem('phases', 'new-commit',
803 coreconfigitem('phases', 'new-commit',
807 default='draft',
804 default='draft',
808 )
805 )
809 coreconfigitem('phases', 'publish',
806 coreconfigitem('phases', 'publish',
810 default=True,
807 default=True,
811 )
808 )
812 coreconfigitem('profiling', 'enabled',
809 coreconfigitem('profiling', 'enabled',
813 default=False,
810 default=False,
814 )
811 )
815 coreconfigitem('profiling', 'format',
812 coreconfigitem('profiling', 'format',
816 default='text',
813 default='text',
817 )
814 )
818 coreconfigitem('profiling', 'freq',
815 coreconfigitem('profiling', 'freq',
819 default=1000,
816 default=1000,
820 )
817 )
821 coreconfigitem('profiling', 'limit',
818 coreconfigitem('profiling', 'limit',
822 default=30,
819 default=30,
823 )
820 )
824 coreconfigitem('profiling', 'nested',
821 coreconfigitem('profiling', 'nested',
825 default=0,
822 default=0,
826 )
823 )
827 coreconfigitem('profiling', 'output',
824 coreconfigitem('profiling', 'output',
828 default=None,
825 default=None,
829 )
826 )
830 coreconfigitem('profiling', 'showmax',
827 coreconfigitem('profiling', 'showmax',
831 default=0.999,
828 default=0.999,
832 )
829 )
833 coreconfigitem('profiling', 'showmin',
830 coreconfigitem('profiling', 'showmin',
834 default=dynamicdefault,
831 default=dynamicdefault,
835 )
832 )
836 coreconfigitem('profiling', 'sort',
833 coreconfigitem('profiling', 'sort',
837 default='inlinetime',
834 default='inlinetime',
838 )
835 )
839 coreconfigitem('profiling', 'statformat',
836 coreconfigitem('profiling', 'statformat',
840 default='hotpath',
837 default='hotpath',
841 )
838 )
842 coreconfigitem('profiling', 'type',
839 coreconfigitem('profiling', 'type',
843 default='stat',
840 default='stat',
844 )
841 )
845 coreconfigitem('progress', 'assume-tty',
842 coreconfigitem('progress', 'assume-tty',
846 default=False,
843 default=False,
847 )
844 )
848 coreconfigitem('progress', 'changedelay',
845 coreconfigitem('progress', 'changedelay',
849 default=1,
846 default=1,
850 )
847 )
851 coreconfigitem('progress', 'clear-complete',
848 coreconfigitem('progress', 'clear-complete',
852 default=True,
849 default=True,
853 )
850 )
854 coreconfigitem('progress', 'debug',
851 coreconfigitem('progress', 'debug',
855 default=False,
852 default=False,
856 )
853 )
857 coreconfigitem('progress', 'delay',
854 coreconfigitem('progress', 'delay',
858 default=3,
855 default=3,
859 )
856 )
860 coreconfigitem('progress', 'disable',
857 coreconfigitem('progress', 'disable',
861 default=False,
858 default=False,
862 )
859 )
863 coreconfigitem('progress', 'estimateinterval',
860 coreconfigitem('progress', 'estimateinterval',
864 default=60.0,
861 default=60.0,
865 )
862 )
866 coreconfigitem('progress', 'format',
863 coreconfigitem('progress', 'format',
867 default=lambda: ['topic', 'bar', 'number', 'estimate'],
864 default=lambda: ['topic', 'bar', 'number', 'estimate'],
868 )
865 )
869 coreconfigitem('progress', 'refresh',
866 coreconfigitem('progress', 'refresh',
870 default=0.1,
867 default=0.1,
871 )
868 )
872 coreconfigitem('progress', 'width',
869 coreconfigitem('progress', 'width',
873 default=dynamicdefault,
870 default=dynamicdefault,
874 )
871 )
875 coreconfigitem('push', 'pushvars.server',
872 coreconfigitem('push', 'pushvars.server',
876 default=False,
873 default=False,
877 )
874 )
878 coreconfigitem('server', 'bookmarks-pushkey-compat',
875 coreconfigitem('server', 'bookmarks-pushkey-compat',
879 default=True,
876 default=True,
880 )
877 )
881 coreconfigitem('server', 'bundle1',
878 coreconfigitem('server', 'bundle1',
882 default=True,
879 default=True,
883 )
880 )
884 coreconfigitem('server', 'bundle1gd',
881 coreconfigitem('server', 'bundle1gd',
885 default=None,
882 default=None,
886 )
883 )
887 coreconfigitem('server', 'bundle1.pull',
884 coreconfigitem('server', 'bundle1.pull',
888 default=None,
885 default=None,
889 )
886 )
890 coreconfigitem('server', 'bundle1gd.pull',
887 coreconfigitem('server', 'bundle1gd.pull',
891 default=None,
888 default=None,
892 )
889 )
893 coreconfigitem('server', 'bundle1.push',
890 coreconfigitem('server', 'bundle1.push',
894 default=None,
891 default=None,
895 )
892 )
896 coreconfigitem('server', 'bundle1gd.push',
893 coreconfigitem('server', 'bundle1gd.push',
897 default=None,
894 default=None,
898 )
895 )
899 coreconfigitem('server', 'compressionengines',
896 coreconfigitem('server', 'compressionengines',
900 default=list,
897 default=list,
901 )
898 )
902 coreconfigitem('server', 'concurrent-push-mode',
899 coreconfigitem('server', 'concurrent-push-mode',
903 default='strict',
900 default='strict',
904 )
901 )
905 coreconfigitem('server', 'disablefullbundle',
902 coreconfigitem('server', 'disablefullbundle',
906 default=False,
903 default=False,
907 )
904 )
908 coreconfigitem('server', 'maxhttpheaderlen',
905 coreconfigitem('server', 'maxhttpheaderlen',
909 default=1024,
906 default=1024,
910 )
907 )
911 coreconfigitem('server', 'preferuncompressed',
908 coreconfigitem('server', 'preferuncompressed',
912 default=False,
909 default=False,
913 )
910 )
914 coreconfigitem('server', 'uncompressed',
911 coreconfigitem('server', 'uncompressed',
915 default=True,
912 default=True,
916 )
913 )
917 coreconfigitem('server', 'uncompressedallowsecret',
914 coreconfigitem('server', 'uncompressedallowsecret',
918 default=False,
915 default=False,
919 )
916 )
920 coreconfigitem('server', 'validate',
917 coreconfigitem('server', 'validate',
921 default=False,
918 default=False,
922 )
919 )
923 coreconfigitem('server', 'zliblevel',
920 coreconfigitem('server', 'zliblevel',
924 default=-1,
921 default=-1,
925 )
922 )
926 coreconfigitem('share', 'pool',
923 coreconfigitem('share', 'pool',
927 default=None,
924 default=None,
928 )
925 )
929 coreconfigitem('share', 'poolnaming',
926 coreconfigitem('share', 'poolnaming',
930 default='identity',
927 default='identity',
931 )
928 )
932 coreconfigitem('smtp', 'host',
929 coreconfigitem('smtp', 'host',
933 default=None,
930 default=None,
934 )
931 )
935 coreconfigitem('smtp', 'local_hostname',
932 coreconfigitem('smtp', 'local_hostname',
936 default=None,
933 default=None,
937 )
934 )
938 coreconfigitem('smtp', 'password',
935 coreconfigitem('smtp', 'password',
939 default=None,
936 default=None,
940 )
937 )
941 coreconfigitem('smtp', 'port',
938 coreconfigitem('smtp', 'port',
942 default=dynamicdefault,
939 default=dynamicdefault,
943 )
940 )
944 coreconfigitem('smtp', 'tls',
941 coreconfigitem('smtp', 'tls',
945 default='none',
942 default='none',
946 )
943 )
947 coreconfigitem('smtp', 'username',
944 coreconfigitem('smtp', 'username',
948 default=None,
945 default=None,
949 )
946 )
950 coreconfigitem('sparse', 'missingwarning',
947 coreconfigitem('sparse', 'missingwarning',
951 default=True,
948 default=True,
952 )
949 )
953 coreconfigitem('subrepos', 'allowed',
950 coreconfigitem('subrepos', 'allowed',
954 default=dynamicdefault, # to make backporting simpler
951 default=dynamicdefault, # to make backporting simpler
955 )
952 )
956 coreconfigitem('subrepos', 'hg:allowed',
953 coreconfigitem('subrepos', 'hg:allowed',
957 default=dynamicdefault,
954 default=dynamicdefault,
958 )
955 )
959 coreconfigitem('subrepos', 'git:allowed',
956 coreconfigitem('subrepos', 'git:allowed',
960 default=dynamicdefault,
957 default=dynamicdefault,
961 )
958 )
962 coreconfigitem('subrepos', 'svn:allowed',
959 coreconfigitem('subrepos', 'svn:allowed',
963 default=dynamicdefault,
960 default=dynamicdefault,
964 )
961 )
965 coreconfigitem('templates', '.*',
962 coreconfigitem('templates', '.*',
966 default=None,
963 default=None,
967 generic=True,
964 generic=True,
968 )
965 )
969 coreconfigitem('trusted', 'groups',
966 coreconfigitem('trusted', 'groups',
970 default=list,
967 default=list,
971 )
968 )
972 coreconfigitem('trusted', 'users',
969 coreconfigitem('trusted', 'users',
973 default=list,
970 default=list,
974 )
971 )
975 coreconfigitem('ui', '_usedassubrepo',
972 coreconfigitem('ui', '_usedassubrepo',
976 default=False,
973 default=False,
977 )
974 )
978 coreconfigitem('ui', 'allowemptycommit',
975 coreconfigitem('ui', 'allowemptycommit',
979 default=False,
976 default=False,
980 )
977 )
981 coreconfigitem('ui', 'archivemeta',
978 coreconfigitem('ui', 'archivemeta',
982 default=True,
979 default=True,
983 )
980 )
984 coreconfigitem('ui', 'askusername',
981 coreconfigitem('ui', 'askusername',
985 default=False,
982 default=False,
986 )
983 )
987 coreconfigitem('ui', 'clonebundlefallback',
984 coreconfigitem('ui', 'clonebundlefallback',
988 default=False,
985 default=False,
989 )
986 )
990 coreconfigitem('ui', 'clonebundleprefers',
987 coreconfigitem('ui', 'clonebundleprefers',
991 default=list,
988 default=list,
992 )
989 )
993 coreconfigitem('ui', 'clonebundles',
990 coreconfigitem('ui', 'clonebundles',
994 default=True,
991 default=True,
995 )
992 )
996 coreconfigitem('ui', 'color',
993 coreconfigitem('ui', 'color',
997 default='auto',
994 default='auto',
998 )
995 )
999 coreconfigitem('ui', 'commitsubrepos',
996 coreconfigitem('ui', 'commitsubrepos',
1000 default=False,
997 default=False,
1001 )
998 )
1002 coreconfigitem('ui', 'debug',
999 coreconfigitem('ui', 'debug',
1003 default=False,
1000 default=False,
1004 )
1001 )
1005 coreconfigitem('ui', 'debugger',
1002 coreconfigitem('ui', 'debugger',
1006 default=None,
1003 default=None,
1007 )
1004 )
1008 coreconfigitem('ui', 'editor',
1005 coreconfigitem('ui', 'editor',
1009 default=dynamicdefault,
1006 default=dynamicdefault,
1010 )
1007 )
1011 coreconfigitem('ui', 'fallbackencoding',
1008 coreconfigitem('ui', 'fallbackencoding',
1012 default=None,
1009 default=None,
1013 )
1010 )
1014 coreconfigitem('ui', 'forcecwd',
1011 coreconfigitem('ui', 'forcecwd',
1015 default=None,
1012 default=None,
1016 )
1013 )
1017 coreconfigitem('ui', 'forcemerge',
1014 coreconfigitem('ui', 'forcemerge',
1018 default=None,
1015 default=None,
1019 )
1016 )
1020 coreconfigitem('ui', 'formatdebug',
1017 coreconfigitem('ui', 'formatdebug',
1021 default=False,
1018 default=False,
1022 )
1019 )
1023 coreconfigitem('ui', 'formatjson',
1020 coreconfigitem('ui', 'formatjson',
1024 default=False,
1021 default=False,
1025 )
1022 )
1026 coreconfigitem('ui', 'formatted',
1023 coreconfigitem('ui', 'formatted',
1027 default=None,
1024 default=None,
1028 )
1025 )
1029 coreconfigitem('ui', 'graphnodetemplate',
1026 coreconfigitem('ui', 'graphnodetemplate',
1030 default=None,
1027 default=None,
1031 )
1028 )
1032 coreconfigitem('ui', 'http2debuglevel',
1029 coreconfigitem('ui', 'http2debuglevel',
1033 default=None,
1030 default=None,
1034 )
1031 )
1035 coreconfigitem('ui', 'interactive',
1032 coreconfigitem('ui', 'interactive',
1036 default=None,
1033 default=None,
1037 )
1034 )
1038 coreconfigitem('ui', 'interface',
1035 coreconfigitem('ui', 'interface',
1039 default=None,
1036 default=None,
1040 )
1037 )
1041 coreconfigitem('ui', 'interface.chunkselector',
1038 coreconfigitem('ui', 'interface.chunkselector',
1042 default=None,
1039 default=None,
1043 )
1040 )
1044 coreconfigitem('ui', 'logblockedtimes',
1041 coreconfigitem('ui', 'logblockedtimes',
1045 default=False,
1042 default=False,
1046 )
1043 )
1047 coreconfigitem('ui', 'logtemplate',
1044 coreconfigitem('ui', 'logtemplate',
1048 default=None,
1045 default=None,
1049 )
1046 )
1050 coreconfigitem('ui', 'merge',
1047 coreconfigitem('ui', 'merge',
1051 default=None,
1048 default=None,
1052 )
1049 )
1053 coreconfigitem('ui', 'mergemarkers',
1050 coreconfigitem('ui', 'mergemarkers',
1054 default='basic',
1051 default='basic',
1055 )
1052 )
1056 coreconfigitem('ui', 'mergemarkertemplate',
1053 coreconfigitem('ui', 'mergemarkertemplate',
1057 default=('{node|short} '
1054 default=('{node|short} '
1058 '{ifeq(tags, "tip", "", '
1055 '{ifeq(tags, "tip", "", '
1059 'ifeq(tags, "", "", "{tags} "))}'
1056 'ifeq(tags, "", "", "{tags} "))}'
1060 '{if(bookmarks, "{bookmarks} ")}'
1057 '{if(bookmarks, "{bookmarks} ")}'
1061 '{ifeq(branch, "default", "", "{branch} ")}'
1058 '{ifeq(branch, "default", "", "{branch} ")}'
1062 '- {author|user}: {desc|firstline}')
1059 '- {author|user}: {desc|firstline}')
1063 )
1060 )
1064 coreconfigitem('ui', 'nontty',
1061 coreconfigitem('ui', 'nontty',
1065 default=False,
1062 default=False,
1066 )
1063 )
1067 coreconfigitem('ui', 'origbackuppath',
1064 coreconfigitem('ui', 'origbackuppath',
1068 default=None,
1065 default=None,
1069 )
1066 )
1070 coreconfigitem('ui', 'paginate',
1067 coreconfigitem('ui', 'paginate',
1071 default=True,
1068 default=True,
1072 )
1069 )
1073 coreconfigitem('ui', 'patch',
1070 coreconfigitem('ui', 'patch',
1074 default=None,
1071 default=None,
1075 )
1072 )
1076 coreconfigitem('ui', 'portablefilenames',
1073 coreconfigitem('ui', 'portablefilenames',
1077 default='warn',
1074 default='warn',
1078 )
1075 )
1079 coreconfigitem('ui', 'promptecho',
1076 coreconfigitem('ui', 'promptecho',
1080 default=False,
1077 default=False,
1081 )
1078 )
1082 coreconfigitem('ui', 'quiet',
1079 coreconfigitem('ui', 'quiet',
1083 default=False,
1080 default=False,
1084 )
1081 )
1085 coreconfigitem('ui', 'quietbookmarkmove',
1082 coreconfigitem('ui', 'quietbookmarkmove',
1086 default=False,
1083 default=False,
1087 )
1084 )
1088 coreconfigitem('ui', 'remotecmd',
1085 coreconfigitem('ui', 'remotecmd',
1089 default='hg',
1086 default='hg',
1090 )
1087 )
1091 coreconfigitem('ui', 'report_untrusted',
1088 coreconfigitem('ui', 'report_untrusted',
1092 default=True,
1089 default=True,
1093 )
1090 )
1094 coreconfigitem('ui', 'rollback',
1091 coreconfigitem('ui', 'rollback',
1095 default=True,
1092 default=True,
1096 )
1093 )
1097 coreconfigitem('ui', 'slash',
1094 coreconfigitem('ui', 'slash',
1098 default=False,
1095 default=False,
1099 )
1096 )
1100 coreconfigitem('ui', 'ssh',
1097 coreconfigitem('ui', 'ssh',
1101 default='ssh',
1098 default='ssh',
1102 )
1099 )
1103 coreconfigitem('ui', 'ssherrorhint',
1100 coreconfigitem('ui', 'ssherrorhint',
1104 default=None,
1101 default=None,
1105 )
1102 )
1106 coreconfigitem('ui', 'statuscopies',
1103 coreconfigitem('ui', 'statuscopies',
1107 default=False,
1104 default=False,
1108 )
1105 )
1109 coreconfigitem('ui', 'strict',
1106 coreconfigitem('ui', 'strict',
1110 default=False,
1107 default=False,
1111 )
1108 )
1112 coreconfigitem('ui', 'style',
1109 coreconfigitem('ui', 'style',
1113 default='',
1110 default='',
1114 )
1111 )
1115 coreconfigitem('ui', 'supportcontact',
1112 coreconfigitem('ui', 'supportcontact',
1116 default=None,
1113 default=None,
1117 )
1114 )
1118 coreconfigitem('ui', 'textwidth',
1115 coreconfigitem('ui', 'textwidth',
1119 default=78,
1116 default=78,
1120 )
1117 )
1121 coreconfigitem('ui', 'timeout',
1118 coreconfigitem('ui', 'timeout',
1122 default='600',
1119 default='600',
1123 )
1120 )
1124 coreconfigitem('ui', 'timeout.warn',
1121 coreconfigitem('ui', 'timeout.warn',
1125 default=0,
1122 default=0,
1126 )
1123 )
1127 coreconfigitem('ui', 'traceback',
1124 coreconfigitem('ui', 'traceback',
1128 default=False,
1125 default=False,
1129 )
1126 )
1130 coreconfigitem('ui', 'tweakdefaults',
1127 coreconfigitem('ui', 'tweakdefaults',
1131 default=False,
1128 default=False,
1132 )
1129 )
1133 coreconfigitem('ui', 'usehttp2',
1130 coreconfigitem('ui', 'usehttp2',
1134 default=False,
1131 default=False,
1135 )
1132 )
1136 coreconfigitem('ui', 'username',
1133 coreconfigitem('ui', 'username',
1137 alias=[('ui', 'user')]
1134 alias=[('ui', 'user')]
1138 )
1135 )
1139 coreconfigitem('ui', 'verbose',
1136 coreconfigitem('ui', 'verbose',
1140 default=False,
1137 default=False,
1141 )
1138 )
1142 coreconfigitem('verify', 'skipflags',
1139 coreconfigitem('verify', 'skipflags',
1143 default=None,
1140 default=None,
1144 )
1141 )
1145 coreconfigitem('web', 'allowbz2',
1142 coreconfigitem('web', 'allowbz2',
1146 default=False,
1143 default=False,
1147 )
1144 )
1148 coreconfigitem('web', 'allowgz',
1145 coreconfigitem('web', 'allowgz',
1149 default=False,
1146 default=False,
1150 )
1147 )
1151 coreconfigitem('web', 'allow-pull',
1148 coreconfigitem('web', 'allow-pull',
1152 alias=[('web', 'allowpull')],
1149 alias=[('web', 'allowpull')],
1153 default=True,
1150 default=True,
1154 )
1151 )
1155 coreconfigitem('web', 'allow-push',
1152 coreconfigitem('web', 'allow-push',
1156 alias=[('web', 'allow_push')],
1153 alias=[('web', 'allow_push')],
1157 default=list,
1154 default=list,
1158 )
1155 )
1159 coreconfigitem('web', 'allowzip',
1156 coreconfigitem('web', 'allowzip',
1160 default=False,
1157 default=False,
1161 )
1158 )
1162 coreconfigitem('web', 'archivesubrepos',
1159 coreconfigitem('web', 'archivesubrepos',
1163 default=False,
1160 default=False,
1164 )
1161 )
1165 coreconfigitem('web', 'cache',
1162 coreconfigitem('web', 'cache',
1166 default=True,
1163 default=True,
1167 )
1164 )
1168 coreconfigitem('web', 'contact',
1165 coreconfigitem('web', 'contact',
1169 default=None,
1166 default=None,
1170 )
1167 )
1171 coreconfigitem('web', 'deny_push',
1168 coreconfigitem('web', 'deny_push',
1172 default=list,
1169 default=list,
1173 )
1170 )
1174 coreconfigitem('web', 'guessmime',
1171 coreconfigitem('web', 'guessmime',
1175 default=False,
1172 default=False,
1176 )
1173 )
1177 coreconfigitem('web', 'hidden',
1174 coreconfigitem('web', 'hidden',
1178 default=False,
1175 default=False,
1179 )
1176 )
1180 coreconfigitem('web', 'labels',
1177 coreconfigitem('web', 'labels',
1181 default=list,
1178 default=list,
1182 )
1179 )
1183 coreconfigitem('web', 'logoimg',
1180 coreconfigitem('web', 'logoimg',
1184 default='hglogo.png',
1181 default='hglogo.png',
1185 )
1182 )
1186 coreconfigitem('web', 'logourl',
1183 coreconfigitem('web', 'logourl',
1187 default='https://mercurial-scm.org/',
1184 default='https://mercurial-scm.org/',
1188 )
1185 )
1189 coreconfigitem('web', 'accesslog',
1186 coreconfigitem('web', 'accesslog',
1190 default='-',
1187 default='-',
1191 )
1188 )
1192 coreconfigitem('web', 'address',
1189 coreconfigitem('web', 'address',
1193 default='',
1190 default='',
1194 )
1191 )
1195 coreconfigitem('web', 'allow_archive',
1192 coreconfigitem('web', 'allow_archive',
1196 default=list,
1193 default=list,
1197 )
1194 )
1198 coreconfigitem('web', 'allow_read',
1195 coreconfigitem('web', 'allow_read',
1199 default=list,
1196 default=list,
1200 )
1197 )
1201 coreconfigitem('web', 'baseurl',
1198 coreconfigitem('web', 'baseurl',
1202 default=None,
1199 default=None,
1203 )
1200 )
1204 coreconfigitem('web', 'cacerts',
1201 coreconfigitem('web', 'cacerts',
1205 default=None,
1202 default=None,
1206 )
1203 )
1207 coreconfigitem('web', 'certificate',
1204 coreconfigitem('web', 'certificate',
1208 default=None,
1205 default=None,
1209 )
1206 )
1210 coreconfigitem('web', 'collapse',
1207 coreconfigitem('web', 'collapse',
1211 default=False,
1208 default=False,
1212 )
1209 )
1213 coreconfigitem('web', 'csp',
1210 coreconfigitem('web', 'csp',
1214 default=None,
1211 default=None,
1215 )
1212 )
1216 coreconfigitem('web', 'deny_read',
1213 coreconfigitem('web', 'deny_read',
1217 default=list,
1214 default=list,
1218 )
1215 )
1219 coreconfigitem('web', 'descend',
1216 coreconfigitem('web', 'descend',
1220 default=True,
1217 default=True,
1221 )
1218 )
1222 coreconfigitem('web', 'description',
1219 coreconfigitem('web', 'description',
1223 default="",
1220 default="",
1224 )
1221 )
1225 coreconfigitem('web', 'encoding',
1222 coreconfigitem('web', 'encoding',
1226 default=lambda: encoding.encoding,
1223 default=lambda: encoding.encoding,
1227 )
1224 )
1228 coreconfigitem('web', 'errorlog',
1225 coreconfigitem('web', 'errorlog',
1229 default='-',
1226 default='-',
1230 )
1227 )
1231 coreconfigitem('web', 'ipv6',
1228 coreconfigitem('web', 'ipv6',
1232 default=False,
1229 default=False,
1233 )
1230 )
1234 coreconfigitem('web', 'maxchanges',
1231 coreconfigitem('web', 'maxchanges',
1235 default=10,
1232 default=10,
1236 )
1233 )
1237 coreconfigitem('web', 'maxfiles',
1234 coreconfigitem('web', 'maxfiles',
1238 default=10,
1235 default=10,
1239 )
1236 )
1240 coreconfigitem('web', 'maxshortchanges',
1237 coreconfigitem('web', 'maxshortchanges',
1241 default=60,
1238 default=60,
1242 )
1239 )
1243 coreconfigitem('web', 'motd',
1240 coreconfigitem('web', 'motd',
1244 default='',
1241 default='',
1245 )
1242 )
1246 coreconfigitem('web', 'name',
1243 coreconfigitem('web', 'name',
1247 default=dynamicdefault,
1244 default=dynamicdefault,
1248 )
1245 )
1249 coreconfigitem('web', 'port',
1246 coreconfigitem('web', 'port',
1250 default=8000,
1247 default=8000,
1251 )
1248 )
1252 coreconfigitem('web', 'prefix',
1249 coreconfigitem('web', 'prefix',
1253 default='',
1250 default='',
1254 )
1251 )
1255 coreconfigitem('web', 'push_ssl',
1252 coreconfigitem('web', 'push_ssl',
1256 default=True,
1253 default=True,
1257 )
1254 )
1258 coreconfigitem('web', 'refreshinterval',
1255 coreconfigitem('web', 'refreshinterval',
1259 default=20,
1256 default=20,
1260 )
1257 )
1261 coreconfigitem('web', 'staticurl',
1258 coreconfigitem('web', 'staticurl',
1262 default=None,
1259 default=None,
1263 )
1260 )
1264 coreconfigitem('web', 'stripes',
1261 coreconfigitem('web', 'stripes',
1265 default=1,
1262 default=1,
1266 )
1263 )
1267 coreconfigitem('web', 'style',
1264 coreconfigitem('web', 'style',
1268 default='paper',
1265 default='paper',
1269 )
1266 )
1270 coreconfigitem('web', 'templates',
1267 coreconfigitem('web', 'templates',
1271 default=None,
1268 default=None,
1272 )
1269 )
1273 coreconfigitem('web', 'view',
1270 coreconfigitem('web', 'view',
1274 default='served',
1271 default='served',
1275 )
1272 )
1276 coreconfigitem('worker', 'backgroundclose',
1273 coreconfigitem('worker', 'backgroundclose',
1277 default=dynamicdefault,
1274 default=dynamicdefault,
1278 )
1275 )
1279 # Windows defaults to a limit of 512 open files. A buffer of 128
1276 # Windows defaults to a limit of 512 open files. A buffer of 128
1280 # should give us enough headway.
1277 # should give us enough headway.
1281 coreconfigitem('worker', 'backgroundclosemaxqueue',
1278 coreconfigitem('worker', 'backgroundclosemaxqueue',
1282 default=384,
1279 default=384,
1283 )
1280 )
1284 coreconfigitem('worker', 'backgroundcloseminfilecount',
1281 coreconfigitem('worker', 'backgroundcloseminfilecount',
1285 default=2048,
1282 default=2048,
1286 )
1283 )
1287 coreconfigitem('worker', 'backgroundclosethreadcount',
1284 coreconfigitem('worker', 'backgroundclosethreadcount',
1288 default=4,
1285 default=4,
1289 )
1286 )
1290 coreconfigitem('worker', 'enabled',
1287 coreconfigitem('worker', 'enabled',
1291 default=True,
1288 default=True,
1292 )
1289 )
1293 coreconfigitem('worker', 'numcpus',
1290 coreconfigitem('worker', 'numcpus',
1294 default=None,
1291 default=None,
1295 )
1292 )
1296
1293
1297 # Rebase related configuration moved to core because other extension are doing
1294 # Rebase related configuration moved to core because other extension are doing
1298 # strange things. For example, shelve import the extensions to reuse some bit
1295 # strange things. For example, shelve import the extensions to reuse some bit
1299 # without formally loading it.
1296 # without formally loading it.
1300 coreconfigitem('commands', 'rebase.requiredest',
1297 coreconfigitem('commands', 'rebase.requiredest',
1301 default=False,
1298 default=False,
1302 )
1299 )
1303 coreconfigitem('experimental', 'rebaseskipobsolete',
1300 coreconfigitem('experimental', 'rebaseskipobsolete',
1304 default=True,
1301 default=True,
1305 )
1302 )
1306 coreconfigitem('rebase', 'singletransaction',
1303 coreconfigitem('rebase', 'singletransaction',
1307 default=False,
1304 default=False,
1308 )
1305 )
1309 coreconfigitem('rebase', 'experimental.inmemory',
1306 coreconfigitem('rebase', 'experimental.inmemory',
1310 default=False,
1307 default=False,
1311 )
1308 )
@@ -1,130 +1,131 b''
1
2 Repositories contain a file (``.hg/requires``) containing a list of
1 Repositories contain a file (``.hg/requires``) containing a list of
3 features/capabilities that are *required* for clients to interface
2 features/capabilities that are *required* for clients to interface
4 with the repository. This file has been present in Mercurial since
3 with the repository. This file has been present in Mercurial since
5 version 0.9.2 (released December 2006).
4 version 0.9.2 (released December 2006).
6
5
7 One of the first things clients do when opening a repository is read
6 One of the first things clients do when opening a repository is read
8 ``.hg/requires`` and verify that all listed requirements are supported,
7 ``.hg/requires`` and verify that all listed requirements are supported,
9 aborting if not. Requirements are therefore a strong mechanism to
8 aborting if not. Requirements are therefore a strong mechanism to
10 prevent incompatible clients from reading from unknown repository
9 prevent incompatible clients from reading from unknown repository
11 formats or even corrupting them by writing to them.
10 formats or even corrupting them by writing to them.
12
11
13 Extensions may add requirements. When they do this, clients not running
12 Extensions may add requirements. When they do this, clients not running
14 an extension will be unable to read from repositories.
13 an extension will be unable to read from repositories.
15
14
16 The following sections describe the requirements defined by the
15 The following sections describe the requirements defined by the
17 Mercurial core distribution.
16 Mercurial core distribution.
18
17
19 revlogv1
18 revlogv1
20 ========
19 ========
21
20
22 When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
21 When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
23 in 2006. The ``revlogv1`` requirement has been enabled by default
22 in 2006. The ``revlogv1`` requirement has been enabled by default
24 since the ``requires`` file was introduced in Mercurial 0.9.2.
23 since the ``requires`` file was introduced in Mercurial 0.9.2.
25
24
26 If this requirement is not present, version 0 revlogs are assumed.
25 If this requirement is not present, version 0 revlogs are assumed.
27
26
28 store
27 store
29 =====
28 =====
30
29
31 The *store* repository layout should be used.
30 The *store* repository layout should be used.
32
31
33 This requirement has been enabled by default since the ``requires`` file
32 This requirement has been enabled by default since the ``requires`` file
34 was introduced in Mercurial 0.9.2.
33 was introduced in Mercurial 0.9.2.
35
34
36 fncache
35 fncache
37 =======
36 =======
38
37
39 The *fncache* repository layout should be used.
38 The *fncache* repository layout should be used.
40
39
41 The *fncache* layout hash encodes filenames with long paths and
40 The *fncache* layout hash encodes filenames with long paths and
42 encodes reserved filenames.
41 encodes reserved filenames.
43
42
44 This requirement is enabled by default when the *store* requirement is
43 This requirement is enabled by default when the *store* requirement is
45 enabled (which is the default behavior). It was introduced in Mercurial
44 enabled (which is the default behavior). It was introduced in Mercurial
46 1.1 (released December 2008).
45 1.1 (released December 2008).
47
46
48 shared
47 shared
49 ======
48 ======
50
49
51 Denotes that the store for a repository is shared from another location
50 Denotes that the store for a repository is shared from another location
52 (defined by the ``.hg/sharedpath`` file).
51 (defined by the ``.hg/sharedpath`` file).
53
52
54 This requirement is set when a repository is created via :hg:`share`.
53 This requirement is set when a repository is created via :hg:`share`.
55
54
56 The requirement was added in Mercurial 1.3 (released July 2009).
55 The requirement was added in Mercurial 1.3 (released July 2009).
57
56
58 relshared
57 relshared
59 =========
58 =========
60
59
61 Derivative of ``shared``; the location of the store is relative to the
60 Derivative of ``shared``; the location of the store is relative to the
62 store of this repository.
61 store of this repository.
63
62
64 This requirement is set when a repository is created via :hg:`share`
63 This requirement is set when a repository is created via :hg:`share`
65 using the ``--relative`` option.
64 using the ``--relative`` option.
66
65
67 The requirement was added in Mercurial 4.2 (released May 2017).
66 The requirement was added in Mercurial 4.2 (released May 2017).
68
67
69 dotencode
68 dotencode
70 =========
69 =========
71
70
72 The *dotencode* repository layout should be used.
71 The *dotencode* repository layout should be used.
73
72
74 The *dotencode* layout encodes the first period or space in filenames
73 The *dotencode* layout encodes the first period or space in filenames
75 to prevent issues on OS X and Windows.
74 to prevent issues on OS X and Windows.
76
75
77 This requirement is enabled by default when the *store* requirement
76 This requirement is enabled by default when the *store* requirement
78 is enabled (which is the default behavior). It was introduced in
77 is enabled (which is the default behavior). It was introduced in
79 Mercurial 1.7 (released November 2010).
78 Mercurial 1.7 (released November 2010).
80
79
81 parentdelta
80 parentdelta
82 ===========
81 ===========
83
82
84 Denotes a revlog delta encoding format that was experimental and
83 Denotes a revlog delta encoding format that was experimental and
85 replaced by *generaldelta*. It should not be seen in the wild because
84 replaced by *generaldelta*. It should not be seen in the wild because
86 it was never enabled by default.
85 it was never enabled by default.
87
86
88 This requirement was added in Mercurial 1.7 and removed in Mercurial
87 This requirement was added in Mercurial 1.7 and removed in Mercurial
89 1.9.
88 1.9.
90
89
91 generaldelta
90 generaldelta
92 ============
91 ============
93
92
94 Revlogs should be created with the *generaldelta* flag enabled. The
93 Revlogs should be created with the *generaldelta* flag enabled. The
95 generaldelta flag will cause deltas to be encoded against a parent
94 generaldelta flag will cause deltas to be encoded against a parent
96 revision instead of the previous revision in the revlog.
95 revision instead of the previous revision in the revlog.
97
96
98 Support for this requirement was added in Mercurial 1.9 (released
97 Support for this requirement was added in Mercurial 1.9 (released
99 July 2011). The requirement was disabled on new repositories by
98 July 2011). The requirement was disabled on new repositories by
100 default until Mercurial 3.7 (released February 2016).
99 default until Mercurial 3.7 (released February 2016).
101
100
102 manifestv2
101 manifestv2
103 ==========
102 ==========
104
103
105 Denotes that version 2 of manifests are being used.
104 Denotes that version 2 of manifests are being used.
106
105
107 Support for this requirement was added in Mercurial 3.4 (released
106 Support for this requirement was added in Mercurial 3.4 (released
108 May 2015). The requirement is currently experimental and is disabled
107 May 2015). The new format failed to meet expectations and support
109 by default.
108 for the format and requirement were removed in Mercurial 4.6
109 (released May 2018) since the feature never graduated frome experiment
110 status.
110
111
111 treemanifest
112 treemanifest
112 ============
113 ============
113
114
114 Denotes that tree manifests are being used. Tree manifests are
115 Denotes that tree manifests are being used. Tree manifests are
115 one manifest per directory (as opposed to a single flat manifest).
116 one manifest per directory (as opposed to a single flat manifest).
116
117
117 Support for this requirement was added in Mercurial 3.4 (released
118 Support for this requirement was added in Mercurial 3.4 (released
118 August 2015). The requirement is currently experimental and is
119 August 2015). The requirement is currently experimental and is
119 disabled by default.
120 disabled by default.
120
121
121 exp-sparse
122 exp-sparse
122 ==========
123 ==========
123
124
124 The working directory is sparse (only contains a subset of files).
125 The working directory is sparse (only contains a subset of files).
125
126
126 Support for this requirement was added in Mercurial 4.3 (released
127 Support for this requirement was added in Mercurial 4.3 (released
127 August 2017). This requirement and feature are experimental and may
128 August 2017). This requirement and feature are experimental and may
128 disappear in a future Mercurial release. The requirement will only
129 disappear in a future Mercurial release. The requirement will only
129 be present on repositories that have opted in to a sparse working
130 be present on repositories that have opted in to a sparse working
130 directory.
131 directory.
@@ -1,2274 +1,2275 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 branchmap,
25 branchmap,
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 color,
29 color,
30 context,
30 context,
31 dirstate,
31 dirstate,
32 dirstateguard,
32 dirstateguard,
33 discovery,
33 discovery,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repository,
52 repository,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 sparse,
57 sparse,
58 store,
58 store,
59 subrepoutil,
59 subrepoutil,
60 tags as tagsmod,
60 tags as tagsmod,
61 transaction,
61 transaction,
62 txnutil,
62 txnutil,
63 util,
63 util,
64 vfs as vfsmod,
64 vfs as vfsmod,
65 )
65 )
66
66
67 release = lockmod.release
67 release = lockmod.release
68 urlerr = util.urlerr
68 urlerr = util.urlerr
69 urlreq = util.urlreq
69 urlreq = util.urlreq
70
70
71 # set of (path, vfs-location) tuples. vfs-location is:
71 # set of (path, vfs-location) tuples. vfs-location is:
72 # - 'plain for vfs relative paths
72 # - 'plain for vfs relative paths
73 # - '' for svfs relative paths
73 # - '' for svfs relative paths
74 _cachedfiles = set()
74 _cachedfiles = set()
75
75
76 class _basefilecache(scmutil.filecache):
76 class _basefilecache(scmutil.filecache):
77 """All filecache usage on repo are done for logic that should be unfiltered
77 """All filecache usage on repo are done for logic that should be unfiltered
78 """
78 """
79 def __get__(self, repo, type=None):
79 def __get__(self, repo, type=None):
80 if repo is None:
80 if repo is None:
81 return self
81 return self
82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 def __set__(self, repo, value):
83 def __set__(self, repo, value):
84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 def __delete__(self, repo):
85 def __delete__(self, repo):
86 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 return super(_basefilecache, self).__delete__(repo.unfiltered())
87
87
88 class repofilecache(_basefilecache):
88 class repofilecache(_basefilecache):
89 """filecache for files in .hg but outside of .hg/store"""
89 """filecache for files in .hg but outside of .hg/store"""
90 def __init__(self, *paths):
90 def __init__(self, *paths):
91 super(repofilecache, self).__init__(*paths)
91 super(repofilecache, self).__init__(*paths)
92 for path in paths:
92 for path in paths:
93 _cachedfiles.add((path, 'plain'))
93 _cachedfiles.add((path, 'plain'))
94
94
95 def join(self, obj, fname):
95 def join(self, obj, fname):
96 return obj.vfs.join(fname)
96 return obj.vfs.join(fname)
97
97
98 class storecache(_basefilecache):
98 class storecache(_basefilecache):
99 """filecache for files in the store"""
99 """filecache for files in the store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(storecache, self).__init__(*paths)
101 super(storecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, ''))
103 _cachedfiles.add((path, ''))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.sjoin(fname)
106 return obj.sjoin(fname)
107
107
108 def isfilecached(repo, name):
108 def isfilecached(repo, name):
109 """check if a repo has already cached "name" filecache-ed property
109 """check if a repo has already cached "name" filecache-ed property
110
110
111 This returns (cachedobj-or-None, iscached) tuple.
111 This returns (cachedobj-or-None, iscached) tuple.
112 """
112 """
113 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 if not cacheentry:
114 if not cacheentry:
115 return None, False
115 return None, False
116 return cacheentry.obj, True
116 return cacheentry.obj, True
117
117
118 class unfilteredpropertycache(util.propertycache):
118 class unfilteredpropertycache(util.propertycache):
119 """propertycache that apply to unfiltered repo only"""
119 """propertycache that apply to unfiltered repo only"""
120
120
121 def __get__(self, repo, type=None):
121 def __get__(self, repo, type=None):
122 unfi = repo.unfiltered()
122 unfi = repo.unfiltered()
123 if unfi is repo:
123 if unfi is repo:
124 return super(unfilteredpropertycache, self).__get__(unfi)
124 return super(unfilteredpropertycache, self).__get__(unfi)
125 return getattr(unfi, self.name)
125 return getattr(unfi, self.name)
126
126
127 class filteredpropertycache(util.propertycache):
127 class filteredpropertycache(util.propertycache):
128 """propertycache that must take filtering in account"""
128 """propertycache that must take filtering in account"""
129
129
130 def cachevalue(self, obj, value):
130 def cachevalue(self, obj, value):
131 object.__setattr__(obj, self.name, value)
131 object.__setattr__(obj, self.name, value)
132
132
133
133
134 def hasunfilteredcache(repo, name):
134 def hasunfilteredcache(repo, name):
135 """check if a repo has an unfilteredpropertycache value for <name>"""
135 """check if a repo has an unfilteredpropertycache value for <name>"""
136 return name in vars(repo.unfiltered())
136 return name in vars(repo.unfiltered())
137
137
138 def unfilteredmethod(orig):
138 def unfilteredmethod(orig):
139 """decorate method that always need to be run on unfiltered version"""
139 """decorate method that always need to be run on unfiltered version"""
140 def wrapper(repo, *args, **kwargs):
140 def wrapper(repo, *args, **kwargs):
141 return orig(repo.unfiltered(), *args, **kwargs)
141 return orig(repo.unfiltered(), *args, **kwargs)
142 return wrapper
142 return wrapper
143
143
144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 'unbundle'}
145 'unbundle'}
146 legacycaps = moderncaps.union({'changegroupsubset'})
146 legacycaps = moderncaps.union({'changegroupsubset'})
147
147
148 class localpeer(repository.peer):
148 class localpeer(repository.peer):
149 '''peer for a local repo; reflects only the most recent API'''
149 '''peer for a local repo; reflects only the most recent API'''
150
150
151 def __init__(self, repo, caps=None):
151 def __init__(self, repo, caps=None):
152 super(localpeer, self).__init__()
152 super(localpeer, self).__init__()
153
153
154 if caps is None:
154 if caps is None:
155 caps = moderncaps.copy()
155 caps = moderncaps.copy()
156 self._repo = repo.filtered('served')
156 self._repo = repo.filtered('served')
157 self._ui = repo.ui
157 self._ui = repo.ui
158 self._caps = repo._restrictcapabilities(caps)
158 self._caps = repo._restrictcapabilities(caps)
159
159
160 # Begin of _basepeer interface.
160 # Begin of _basepeer interface.
161
161
162 @util.propertycache
162 @util.propertycache
163 def ui(self):
163 def ui(self):
164 return self._ui
164 return self._ui
165
165
166 def url(self):
166 def url(self):
167 return self._repo.url()
167 return self._repo.url()
168
168
169 def local(self):
169 def local(self):
170 return self._repo
170 return self._repo
171
171
172 def peer(self):
172 def peer(self):
173 return self
173 return self
174
174
175 def canpush(self):
175 def canpush(self):
176 return True
176 return True
177
177
178 def close(self):
178 def close(self):
179 self._repo.close()
179 self._repo.close()
180
180
181 # End of _basepeer interface.
181 # End of _basepeer interface.
182
182
183 # Begin of _basewirecommands interface.
183 # Begin of _basewirecommands interface.
184
184
185 def branchmap(self):
185 def branchmap(self):
186 return self._repo.branchmap()
186 return self._repo.branchmap()
187
187
188 def capabilities(self):
188 def capabilities(self):
189 return self._caps
189 return self._caps
190
190
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 """Used to test argument passing over the wire"""
192 """Used to test argument passing over the wire"""
193 return "%s %s %s %s %s" % (one, two, three, four, five)
193 return "%s %s %s %s %s" % (one, two, three, four, five)
194
194
195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 **kwargs):
196 **kwargs):
197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 common=common, bundlecaps=bundlecaps,
198 common=common, bundlecaps=bundlecaps,
199 **kwargs)[1]
199 **kwargs)[1]
200 cb = util.chunkbuffer(chunks)
200 cb = util.chunkbuffer(chunks)
201
201
202 if exchange.bundle2requested(bundlecaps):
202 if exchange.bundle2requested(bundlecaps):
203 # When requesting a bundle2, getbundle returns a stream to make the
203 # When requesting a bundle2, getbundle returns a stream to make the
204 # wire level function happier. We need to build a proper object
204 # wire level function happier. We need to build a proper object
205 # from it in local peer.
205 # from it in local peer.
206 return bundle2.getunbundler(self.ui, cb)
206 return bundle2.getunbundler(self.ui, cb)
207 else:
207 else:
208 return changegroup.getunbundler('01', cb, None)
208 return changegroup.getunbundler('01', cb, None)
209
209
210 def heads(self):
210 def heads(self):
211 return self._repo.heads()
211 return self._repo.heads()
212
212
213 def known(self, nodes):
213 def known(self, nodes):
214 return self._repo.known(nodes)
214 return self._repo.known(nodes)
215
215
216 def listkeys(self, namespace):
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
218
218
219 def lookup(self, key):
219 def lookup(self, key):
220 return self._repo.lookup(key)
220 return self._repo.lookup(key)
221
221
222 def pushkey(self, namespace, key, old, new):
222 def pushkey(self, namespace, key, old, new):
223 return self._repo.pushkey(namespace, key, old, new)
223 return self._repo.pushkey(namespace, key, old, new)
224
224
225 def stream_out(self):
225 def stream_out(self):
226 raise error.Abort(_('cannot perform stream clone against local '
226 raise error.Abort(_('cannot perform stream clone against local '
227 'peer'))
227 'peer'))
228
228
229 def unbundle(self, cg, heads, url):
229 def unbundle(self, cg, heads, url):
230 """apply a bundle on a repo
230 """apply a bundle on a repo
231
231
232 This function handles the repo locking itself."""
232 This function handles the repo locking itself."""
233 try:
233 try:
234 try:
234 try:
235 cg = exchange.readbundle(self.ui, cg, None)
235 cg = exchange.readbundle(self.ui, cg, None)
236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 if util.safehasattr(ret, 'getchunks'):
237 if util.safehasattr(ret, 'getchunks'):
238 # This is a bundle20 object, turn it into an unbundler.
238 # This is a bundle20 object, turn it into an unbundler.
239 # This little dance should be dropped eventually when the
239 # This little dance should be dropped eventually when the
240 # API is finally improved.
240 # API is finally improved.
241 stream = util.chunkbuffer(ret.getchunks())
241 stream = util.chunkbuffer(ret.getchunks())
242 ret = bundle2.getunbundler(self.ui, stream)
242 ret = bundle2.getunbundler(self.ui, stream)
243 return ret
243 return ret
244 except Exception as exc:
244 except Exception as exc:
245 # If the exception contains output salvaged from a bundle2
245 # If the exception contains output salvaged from a bundle2
246 # reply, we need to make sure it is printed before continuing
246 # reply, we need to make sure it is printed before continuing
247 # to fail. So we build a bundle2 with such output and consume
247 # to fail. So we build a bundle2 with such output and consume
248 # it directly.
248 # it directly.
249 #
249 #
250 # This is not very elegant but allows a "simple" solution for
250 # This is not very elegant but allows a "simple" solution for
251 # issue4594
251 # issue4594
252 output = getattr(exc, '_bundle2salvagedoutput', ())
252 output = getattr(exc, '_bundle2salvagedoutput', ())
253 if output:
253 if output:
254 bundler = bundle2.bundle20(self._repo.ui)
254 bundler = bundle2.bundle20(self._repo.ui)
255 for out in output:
255 for out in output:
256 bundler.addpart(out)
256 bundler.addpart(out)
257 stream = util.chunkbuffer(bundler.getchunks())
257 stream = util.chunkbuffer(bundler.getchunks())
258 b = bundle2.getunbundler(self.ui, stream)
258 b = bundle2.getunbundler(self.ui, stream)
259 bundle2.processbundle(self._repo, b)
259 bundle2.processbundle(self._repo, b)
260 raise
260 raise
261 except error.PushRaced as exc:
261 except error.PushRaced as exc:
262 raise error.ResponseError(_('push failed:'), str(exc))
262 raise error.ResponseError(_('push failed:'), str(exc))
263
263
264 # End of _basewirecommands interface.
264 # End of _basewirecommands interface.
265
265
266 # Begin of peer interface.
266 # Begin of peer interface.
267
267
268 def iterbatch(self):
268 def iterbatch(self):
269 return peer.localiterbatcher(self)
269 return peer.localiterbatcher(self)
270
270
271 # End of peer interface.
271 # End of peer interface.
272
272
273 class locallegacypeer(repository.legacypeer, localpeer):
273 class locallegacypeer(repository.legacypeer, localpeer):
274 '''peer extension which implements legacy methods too; used for tests with
274 '''peer extension which implements legacy methods too; used for tests with
275 restricted capabilities'''
275 restricted capabilities'''
276
276
277 def __init__(self, repo):
277 def __init__(self, repo):
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279
279
280 # Begin of baselegacywirecommands interface.
280 # Begin of baselegacywirecommands interface.
281
281
282 def between(self, pairs):
282 def between(self, pairs):
283 return self._repo.between(pairs)
283 return self._repo.between(pairs)
284
284
285 def branches(self, nodes):
285 def branches(self, nodes):
286 return self._repo.branches(nodes)
286 return self._repo.branches(nodes)
287
287
288 def changegroup(self, basenodes, source):
288 def changegroup(self, basenodes, source):
289 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
289 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 missingheads=self._repo.heads())
290 missingheads=self._repo.heads())
291 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
291 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292
292
293 def changegroupsubset(self, bases, heads, source):
293 def changegroupsubset(self, bases, heads, source):
294 outgoing = discovery.outgoing(self._repo, missingroots=bases,
294 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 missingheads=heads)
295 missingheads=heads)
296 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
296 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297
297
298 # End of baselegacywirecommands interface.
298 # End of baselegacywirecommands interface.
299
299
300 # Increment the sub-version when the revlog v2 format changes to lock out old
300 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # clients.
301 # clients.
302 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
302 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303
303
304 class localrepository(object):
304 class localrepository(object):
305
305
306 # obsolete experimental requirements:
307 # - manifestv2: An experimental new manifest format that allowed
308 # for stem compression of long paths. Experiment ended up not
309 # being successful (repository sizes went up due to worse delta
310 # chains), and the code was deleted in 4.6.
306 supportedformats = {
311 supportedformats = {
307 'revlogv1',
312 'revlogv1',
308 'generaldelta',
313 'generaldelta',
309 'treemanifest',
314 'treemanifest',
310 'manifestv2',
311 REVLOGV2_REQUIREMENT,
315 REVLOGV2_REQUIREMENT,
312 }
316 }
313 _basesupported = supportedformats | {
317 _basesupported = supportedformats | {
314 'store',
318 'store',
315 'fncache',
319 'fncache',
316 'shared',
320 'shared',
317 'relshared',
321 'relshared',
318 'dotencode',
322 'dotencode',
319 'exp-sparse',
323 'exp-sparse',
320 }
324 }
321 openerreqs = {
325 openerreqs = {
322 'revlogv1',
326 'revlogv1',
323 'generaldelta',
327 'generaldelta',
324 'treemanifest',
328 'treemanifest',
325 'manifestv2',
326 }
329 }
327
330
328 # a list of (ui, featureset) functions.
331 # a list of (ui, featureset) functions.
329 # only functions defined in module of enabled extensions are invoked
332 # only functions defined in module of enabled extensions are invoked
330 featuresetupfuncs = set()
333 featuresetupfuncs = set()
331
334
332 # list of prefix for file which can be written without 'wlock'
335 # list of prefix for file which can be written without 'wlock'
333 # Extensions should extend this list when needed
336 # Extensions should extend this list when needed
334 _wlockfreeprefix = {
337 _wlockfreeprefix = {
335 # We migh consider requiring 'wlock' for the next
338 # We migh consider requiring 'wlock' for the next
336 # two, but pretty much all the existing code assume
339 # two, but pretty much all the existing code assume
337 # wlock is not needed so we keep them excluded for
340 # wlock is not needed so we keep them excluded for
338 # now.
341 # now.
339 'hgrc',
342 'hgrc',
340 'requires',
343 'requires',
341 # XXX cache is a complicatged business someone
344 # XXX cache is a complicatged business someone
342 # should investigate this in depth at some point
345 # should investigate this in depth at some point
343 'cache/',
346 'cache/',
344 # XXX shouldn't be dirstate covered by the wlock?
347 # XXX shouldn't be dirstate covered by the wlock?
345 'dirstate',
348 'dirstate',
346 # XXX bisect was still a bit too messy at the time
349 # XXX bisect was still a bit too messy at the time
347 # this changeset was introduced. Someone should fix
350 # this changeset was introduced. Someone should fix
348 # the remainig bit and drop this line
351 # the remainig bit and drop this line
349 'bisect.state',
352 'bisect.state',
350 }
353 }
351
354
352 def __init__(self, baseui, path, create=False):
355 def __init__(self, baseui, path, create=False):
353 self.requirements = set()
356 self.requirements = set()
354 self.filtername = None
357 self.filtername = None
355 # wvfs: rooted at the repository root, used to access the working copy
358 # wvfs: rooted at the repository root, used to access the working copy
356 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
359 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 # vfs: rooted at .hg, used to access repo files outside of .hg/store
360 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 self.vfs = None
361 self.vfs = None
359 # svfs: usually rooted at .hg/store, used to access repository history
362 # svfs: usually rooted at .hg/store, used to access repository history
360 # If this is a shared repository, this vfs may point to another
363 # If this is a shared repository, this vfs may point to another
361 # repository's .hg/store directory.
364 # repository's .hg/store directory.
362 self.svfs = None
365 self.svfs = None
363 self.root = self.wvfs.base
366 self.root = self.wvfs.base
364 self.path = self.wvfs.join(".hg")
367 self.path = self.wvfs.join(".hg")
365 self.origroot = path
368 self.origroot = path
366 # This is only used by context.workingctx.match in order to
369 # This is only used by context.workingctx.match in order to
367 # detect files in subrepos.
370 # detect files in subrepos.
368 self.auditor = pathutil.pathauditor(
371 self.auditor = pathutil.pathauditor(
369 self.root, callback=self._checknested)
372 self.root, callback=self._checknested)
370 # This is only used by context.basectx.match in order to detect
373 # This is only used by context.basectx.match in order to detect
371 # files in subrepos.
374 # files in subrepos.
372 self.nofsauditor = pathutil.pathauditor(
375 self.nofsauditor = pathutil.pathauditor(
373 self.root, callback=self._checknested, realfs=False, cached=True)
376 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.baseui = baseui
377 self.baseui = baseui
375 self.ui = baseui.copy()
378 self.ui = baseui.copy()
376 self.ui.copy = baseui.copy # prevent copying repo configuration
379 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
380 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 if (self.ui.configbool('devel', 'all-warnings') or
381 if (self.ui.configbool('devel', 'all-warnings') or
379 self.ui.configbool('devel', 'check-locks')):
382 self.ui.configbool('devel', 'check-locks')):
380 self.vfs.audit = self._getvfsward(self.vfs.audit)
383 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 # A list of callback to shape the phase if no data were found.
384 # A list of callback to shape the phase if no data were found.
382 # Callback are in the form: func(repo, roots) --> processed root.
385 # Callback are in the form: func(repo, roots) --> processed root.
383 # This list it to be filled by extension during repo setup
386 # This list it to be filled by extension during repo setup
384 self._phasedefaults = []
387 self._phasedefaults = []
385 try:
388 try:
386 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
389 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self._loadextensions()
390 self._loadextensions()
388 except IOError:
391 except IOError:
389 pass
392 pass
390
393
391 if self.featuresetupfuncs:
394 if self.featuresetupfuncs:
392 self.supported = set(self._basesupported) # use private copy
395 self.supported = set(self._basesupported) # use private copy
393 extmods = set(m.__name__ for n, m
396 extmods = set(m.__name__ for n, m
394 in extensions.extensions(self.ui))
397 in extensions.extensions(self.ui))
395 for setupfunc in self.featuresetupfuncs:
398 for setupfunc in self.featuresetupfuncs:
396 if setupfunc.__module__ in extmods:
399 if setupfunc.__module__ in extmods:
397 setupfunc(self.ui, self.supported)
400 setupfunc(self.ui, self.supported)
398 else:
401 else:
399 self.supported = self._basesupported
402 self.supported = self._basesupported
400 color.setup(self.ui)
403 color.setup(self.ui)
401
404
402 # Add compression engines.
405 # Add compression engines.
403 for name in util.compengines:
406 for name in util.compengines:
404 engine = util.compengines[name]
407 engine = util.compengines[name]
405 if engine.revlogheader():
408 if engine.revlogheader():
406 self.supported.add('exp-compression-%s' % name)
409 self.supported.add('exp-compression-%s' % name)
407
410
408 if not self.vfs.isdir():
411 if not self.vfs.isdir():
409 if create:
412 if create:
410 self.requirements = newreporequirements(self)
413 self.requirements = newreporequirements(self)
411
414
412 if not self.wvfs.exists():
415 if not self.wvfs.exists():
413 self.wvfs.makedirs()
416 self.wvfs.makedirs()
414 self.vfs.makedir(notindexed=True)
417 self.vfs.makedir(notindexed=True)
415
418
416 if 'store' in self.requirements:
419 if 'store' in self.requirements:
417 self.vfs.mkdir("store")
420 self.vfs.mkdir("store")
418
421
419 # create an invalid changelog
422 # create an invalid changelog
420 self.vfs.append(
423 self.vfs.append(
421 "00changelog.i",
424 "00changelog.i",
422 '\0\0\0\2' # represents revlogv2
425 '\0\0\0\2' # represents revlogv2
423 ' dummy changelog to prevent using the old repo layout'
426 ' dummy changelog to prevent using the old repo layout'
424 )
427 )
425 else:
428 else:
426 raise error.RepoError(_("repository %s not found") % path)
429 raise error.RepoError(_("repository %s not found") % path)
427 elif create:
430 elif create:
428 raise error.RepoError(_("repository %s already exists") % path)
431 raise error.RepoError(_("repository %s already exists") % path)
429 else:
432 else:
430 try:
433 try:
431 self.requirements = scmutil.readrequires(
434 self.requirements = scmutil.readrequires(
432 self.vfs, self.supported)
435 self.vfs, self.supported)
433 except IOError as inst:
436 except IOError as inst:
434 if inst.errno != errno.ENOENT:
437 if inst.errno != errno.ENOENT:
435 raise
438 raise
436
439
437 cachepath = self.vfs.join('cache')
440 cachepath = self.vfs.join('cache')
438 self.sharedpath = self.path
441 self.sharedpath = self.path
439 try:
442 try:
440 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
443 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 if 'relshared' in self.requirements:
444 if 'relshared' in self.requirements:
442 sharedpath = self.vfs.join(sharedpath)
445 sharedpath = self.vfs.join(sharedpath)
443 vfs = vfsmod.vfs(sharedpath, realpath=True)
446 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 cachepath = vfs.join('cache')
447 cachepath = vfs.join('cache')
445 s = vfs.base
448 s = vfs.base
446 if not vfs.exists():
449 if not vfs.exists():
447 raise error.RepoError(
450 raise error.RepoError(
448 _('.hg/sharedpath points to nonexistent directory %s') % s)
451 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 self.sharedpath = s
452 self.sharedpath = s
450 except IOError as inst:
453 except IOError as inst:
451 if inst.errno != errno.ENOENT:
454 if inst.errno != errno.ENOENT:
452 raise
455 raise
453
456
454 if 'exp-sparse' in self.requirements and not sparse.enabled:
457 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 raise error.RepoError(_('repository is using sparse feature but '
458 raise error.RepoError(_('repository is using sparse feature but '
456 'sparse is not enabled; enable the '
459 'sparse is not enabled; enable the '
457 '"sparse" extensions to access'))
460 '"sparse" extensions to access'))
458
461
459 self.store = store.store(
462 self.store = store.store(
460 self.requirements, self.sharedpath,
463 self.requirements, self.sharedpath,
461 lambda base: vfsmod.vfs(base, cacheaudited=True))
464 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 self.spath = self.store.path
465 self.spath = self.store.path
463 self.svfs = self.store.vfs
466 self.svfs = self.store.vfs
464 self.sjoin = self.store.join
467 self.sjoin = self.store.join
465 self.vfs.createmode = self.store.createmode
468 self.vfs.createmode = self.store.createmode
466 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
469 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs.createmode = self.store.createmode
470 self.cachevfs.createmode = self.store.createmode
468 if (self.ui.configbool('devel', 'all-warnings') or
471 if (self.ui.configbool('devel', 'all-warnings') or
469 self.ui.configbool('devel', 'check-locks')):
472 self.ui.configbool('devel', 'check-locks')):
470 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
473 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
474 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 else: # standard vfs
475 else: # standard vfs
473 self.svfs.audit = self._getsvfsward(self.svfs.audit)
476 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self._applyopenerreqs()
477 self._applyopenerreqs()
475 if create:
478 if create:
476 self._writerequirements()
479 self._writerequirements()
477
480
478 self._dirstatevalidatewarned = False
481 self._dirstatevalidatewarned = False
479
482
480 self._branchcaches = {}
483 self._branchcaches = {}
481 self._revbranchcache = None
484 self._revbranchcache = None
482 self.filterpats = {}
485 self.filterpats = {}
483 self._datafilters = {}
486 self._datafilters = {}
484 self._transref = self._lockref = self._wlockref = None
487 self._transref = self._lockref = self._wlockref = None
485
488
486 # A cache for various files under .hg/ that tracks file changes,
489 # A cache for various files under .hg/ that tracks file changes,
487 # (used by the filecache decorator)
490 # (used by the filecache decorator)
488 #
491 #
489 # Maps a property name to its util.filecacheentry
492 # Maps a property name to its util.filecacheentry
490 self._filecache = {}
493 self._filecache = {}
491
494
492 # hold sets of revision to be filtered
495 # hold sets of revision to be filtered
493 # should be cleared when something might have changed the filter value:
496 # should be cleared when something might have changed the filter value:
494 # - new changesets,
497 # - new changesets,
495 # - phase change,
498 # - phase change,
496 # - new obsolescence marker,
499 # - new obsolescence marker,
497 # - working directory parent change,
500 # - working directory parent change,
498 # - bookmark changes
501 # - bookmark changes
499 self.filteredrevcache = {}
502 self.filteredrevcache = {}
500
503
501 # post-dirstate-status hooks
504 # post-dirstate-status hooks
502 self._postdsstatus = []
505 self._postdsstatus = []
503
506
504 # generic mapping between names and nodes
507 # generic mapping between names and nodes
505 self.names = namespaces.namespaces()
508 self.names = namespaces.namespaces()
506
509
507 # Key to signature value.
510 # Key to signature value.
508 self._sparsesignaturecache = {}
511 self._sparsesignaturecache = {}
509 # Signature to cached matcher instance.
512 # Signature to cached matcher instance.
510 self._sparsematchercache = {}
513 self._sparsematchercache = {}
511
514
512 def _getvfsward(self, origfunc):
515 def _getvfsward(self, origfunc):
513 """build a ward for self.vfs"""
516 """build a ward for self.vfs"""
514 rref = weakref.ref(self)
517 rref = weakref.ref(self)
515 def checkvfs(path, mode=None):
518 def checkvfs(path, mode=None):
516 ret = origfunc(path, mode=mode)
519 ret = origfunc(path, mode=mode)
517 repo = rref()
520 repo = rref()
518 if (repo is None
521 if (repo is None
519 or not util.safehasattr(repo, '_wlockref')
522 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_lockref')):
523 or not util.safehasattr(repo, '_lockref')):
521 return
524 return
522 if mode in (None, 'r', 'rb'):
525 if mode in (None, 'r', 'rb'):
523 return
526 return
524 if path.startswith(repo.path):
527 if path.startswith(repo.path):
525 # truncate name relative to the repository (.hg)
528 # truncate name relative to the repository (.hg)
526 path = path[len(repo.path) + 1:]
529 path = path[len(repo.path) + 1:]
527 if path.startswith('cache/'):
530 if path.startswith('cache/'):
528 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
531 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
532 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 if path.startswith('journal.'):
533 if path.startswith('journal.'):
531 # journal is covered by 'lock'
534 # journal is covered by 'lock'
532 if repo._currentlock(repo._lockref) is None:
535 if repo._currentlock(repo._lockref) is None:
533 repo.ui.develwarn('write with no lock: "%s"' % path,
536 repo.ui.develwarn('write with no lock: "%s"' % path,
534 stacklevel=2, config='check-locks')
537 stacklevel=2, config='check-locks')
535 elif repo._currentlock(repo._wlockref) is None:
538 elif repo._currentlock(repo._wlockref) is None:
536 # rest of vfs files are covered by 'wlock'
539 # rest of vfs files are covered by 'wlock'
537 #
540 #
538 # exclude special files
541 # exclude special files
539 for prefix in self._wlockfreeprefix:
542 for prefix in self._wlockfreeprefix:
540 if path.startswith(prefix):
543 if path.startswith(prefix):
541 return
544 return
542 repo.ui.develwarn('write with no wlock: "%s"' % path,
545 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 stacklevel=2, config='check-locks')
546 stacklevel=2, config='check-locks')
544 return ret
547 return ret
545 return checkvfs
548 return checkvfs
546
549
547 def _getsvfsward(self, origfunc):
550 def _getsvfsward(self, origfunc):
548 """build a ward for self.svfs"""
551 """build a ward for self.svfs"""
549 rref = weakref.ref(self)
552 rref = weakref.ref(self)
550 def checksvfs(path, mode=None):
553 def checksvfs(path, mode=None):
551 ret = origfunc(path, mode=mode)
554 ret = origfunc(path, mode=mode)
552 repo = rref()
555 repo = rref()
553 if repo is None or not util.safehasattr(repo, '_lockref'):
556 if repo is None or not util.safehasattr(repo, '_lockref'):
554 return
557 return
555 if mode in (None, 'r', 'rb'):
558 if mode in (None, 'r', 'rb'):
556 return
559 return
557 if path.startswith(repo.sharedpath):
560 if path.startswith(repo.sharedpath):
558 # truncate name relative to the repository (.hg)
561 # truncate name relative to the repository (.hg)
559 path = path[len(repo.sharedpath) + 1:]
562 path = path[len(repo.sharedpath) + 1:]
560 if repo._currentlock(repo._lockref) is None:
563 if repo._currentlock(repo._lockref) is None:
561 repo.ui.develwarn('write with no lock: "%s"' % path,
564 repo.ui.develwarn('write with no lock: "%s"' % path,
562 stacklevel=3)
565 stacklevel=3)
563 return ret
566 return ret
564 return checksvfs
567 return checksvfs
565
568
566 def close(self):
569 def close(self):
567 self._writecaches()
570 self._writecaches()
568
571
569 def _loadextensions(self):
572 def _loadextensions(self):
570 extensions.loadall(self.ui)
573 extensions.loadall(self.ui)
571
574
572 def _writecaches(self):
575 def _writecaches(self):
573 if self._revbranchcache:
576 if self._revbranchcache:
574 self._revbranchcache.write()
577 self._revbranchcache.write()
575
578
576 def _restrictcapabilities(self, caps):
579 def _restrictcapabilities(self, caps):
577 if self.ui.configbool('experimental', 'bundle2-advertise'):
580 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 caps = set(caps)
581 caps = set(caps)
579 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
582 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
580 role='client'))
583 role='client'))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
585 return caps
583
586
584 def _applyopenerreqs(self):
587 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
588 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
589 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
590 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
592 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
593 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
594 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
596 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
597 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
598 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
600 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
601 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
602 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
603 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
604 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
608 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
609 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
610 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold')
611 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
612 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 srdensitythres = float(self.ui.config('experimental',
615 srdensitythres = float(self.ui.config('experimental',
613 'sparse-read.density-threshold'))
616 'sparse-read.density-threshold'))
614 srmingapsize = self.ui.configbytes('experimental',
617 srmingapsize = self.ui.configbytes('experimental',
615 'sparse-read.min-gap-size')
618 'sparse-read.min-gap-size')
616 self.svfs.options['with-sparse-read'] = withsparseread
619 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619
622
620 for r in self.requirements:
623 for r in self.requirements:
621 if r.startswith('exp-compression-'):
624 if r.startswith('exp-compression-'):
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623
626
624 # TODO move "revlogv2" to openerreqs once finalized.
627 # TODO move "revlogv2" to openerreqs once finalized.
625 if REVLOGV2_REQUIREMENT in self.requirements:
628 if REVLOGV2_REQUIREMENT in self.requirements:
626 self.svfs.options['revlogv2'] = True
629 self.svfs.options['revlogv2'] = True
627
630
628 def _writerequirements(self):
631 def _writerequirements(self):
629 scmutil.writerequires(self.vfs, self.requirements)
632 scmutil.writerequires(self.vfs, self.requirements)
630
633
631 def _checknested(self, path):
634 def _checknested(self, path):
632 """Determine if path is a legal nested repository."""
635 """Determine if path is a legal nested repository."""
633 if not path.startswith(self.root):
636 if not path.startswith(self.root):
634 return False
637 return False
635 subpath = path[len(self.root) + 1:]
638 subpath = path[len(self.root) + 1:]
636 normsubpath = util.pconvert(subpath)
639 normsubpath = util.pconvert(subpath)
637
640
638 # XXX: Checking against the current working copy is wrong in
641 # XXX: Checking against the current working copy is wrong in
639 # the sense that it can reject things like
642 # the sense that it can reject things like
640 #
643 #
641 # $ hg cat -r 10 sub/x.txt
644 # $ hg cat -r 10 sub/x.txt
642 #
645 #
643 # if sub/ is no longer a subrepository in the working copy
646 # if sub/ is no longer a subrepository in the working copy
644 # parent revision.
647 # parent revision.
645 #
648 #
646 # However, it can of course also allow things that would have
649 # However, it can of course also allow things that would have
647 # been rejected before, such as the above cat command if sub/
650 # been rejected before, such as the above cat command if sub/
648 # is a subrepository now, but was a normal directory before.
651 # is a subrepository now, but was a normal directory before.
649 # The old path auditor would have rejected by mistake since it
652 # The old path auditor would have rejected by mistake since it
650 # panics when it sees sub/.hg/.
653 # panics when it sees sub/.hg/.
651 #
654 #
652 # All in all, checking against the working copy seems sensible
655 # All in all, checking against the working copy seems sensible
653 # since we want to prevent access to nested repositories on
656 # since we want to prevent access to nested repositories on
654 # the filesystem *now*.
657 # the filesystem *now*.
655 ctx = self[None]
658 ctx = self[None]
656 parts = util.splitpath(subpath)
659 parts = util.splitpath(subpath)
657 while parts:
660 while parts:
658 prefix = '/'.join(parts)
661 prefix = '/'.join(parts)
659 if prefix in ctx.substate:
662 if prefix in ctx.substate:
660 if prefix == normsubpath:
663 if prefix == normsubpath:
661 return True
664 return True
662 else:
665 else:
663 sub = ctx.sub(prefix)
666 sub = ctx.sub(prefix)
664 return sub.checknested(subpath[len(prefix) + 1:])
667 return sub.checknested(subpath[len(prefix) + 1:])
665 else:
668 else:
666 parts.pop()
669 parts.pop()
667 return False
670 return False
668
671
669 def peer(self):
672 def peer(self):
670 return localpeer(self) # not cached to avoid reference cycle
673 return localpeer(self) # not cached to avoid reference cycle
671
674
672 def unfiltered(self):
675 def unfiltered(self):
673 """Return unfiltered version of the repository
676 """Return unfiltered version of the repository
674
677
675 Intended to be overwritten by filtered repo."""
678 Intended to be overwritten by filtered repo."""
676 return self
679 return self
677
680
678 def filtered(self, name, visibilityexceptions=None):
681 def filtered(self, name, visibilityexceptions=None):
679 """Return a filtered version of a repository"""
682 """Return a filtered version of a repository"""
680 cls = repoview.newtype(self.unfiltered().__class__)
683 cls = repoview.newtype(self.unfiltered().__class__)
681 return cls(self, name, visibilityexceptions)
684 return cls(self, name, visibilityexceptions)
682
685
683 @repofilecache('bookmarks', 'bookmarks.current')
686 @repofilecache('bookmarks', 'bookmarks.current')
684 def _bookmarks(self):
687 def _bookmarks(self):
685 return bookmarks.bmstore(self)
688 return bookmarks.bmstore(self)
686
689
687 @property
690 @property
688 def _activebookmark(self):
691 def _activebookmark(self):
689 return self._bookmarks.active
692 return self._bookmarks.active
690
693
691 # _phasesets depend on changelog. what we need is to call
694 # _phasesets depend on changelog. what we need is to call
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
695 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 # can't be easily expressed in filecache mechanism.
696 # can't be easily expressed in filecache mechanism.
694 @storecache('phaseroots', '00changelog.i')
697 @storecache('phaseroots', '00changelog.i')
695 def _phasecache(self):
698 def _phasecache(self):
696 return phases.phasecache(self, self._phasedefaults)
699 return phases.phasecache(self, self._phasedefaults)
697
700
698 @storecache('obsstore')
701 @storecache('obsstore')
699 def obsstore(self):
702 def obsstore(self):
700 return obsolete.makestore(self.ui, self)
703 return obsolete.makestore(self.ui, self)
701
704
702 @storecache('00changelog.i')
705 @storecache('00changelog.i')
703 def changelog(self):
706 def changelog(self):
704 return changelog.changelog(self.svfs,
707 return changelog.changelog(self.svfs,
705 trypending=txnutil.mayhavepending(self.root))
708 trypending=txnutil.mayhavepending(self.root))
706
709
707 def _constructmanifest(self):
710 def _constructmanifest(self):
708 # This is a temporary function while we migrate from manifest to
711 # This is a temporary function while we migrate from manifest to
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
712 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 # manifest creation.
713 # manifest creation.
711 return manifest.manifestrevlog(self.svfs)
714 return manifest.manifestrevlog(self.svfs)
712
715
713 @storecache('00manifest.i')
716 @storecache('00manifest.i')
714 def manifestlog(self):
717 def manifestlog(self):
715 return manifest.manifestlog(self.svfs, self)
718 return manifest.manifestlog(self.svfs, self)
716
719
717 @repofilecache('dirstate')
720 @repofilecache('dirstate')
718 def dirstate(self):
721 def dirstate(self):
719 sparsematchfn = lambda: sparse.matcher(self)
722 sparsematchfn = lambda: sparse.matcher(self)
720
723
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
724 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 self._dirstatevalidate, sparsematchfn)
725 self._dirstatevalidate, sparsematchfn)
723
726
724 def _dirstatevalidate(self, node):
727 def _dirstatevalidate(self, node):
725 try:
728 try:
726 self.changelog.rev(node)
729 self.changelog.rev(node)
727 return node
730 return node
728 except error.LookupError:
731 except error.LookupError:
729 if not self._dirstatevalidatewarned:
732 if not self._dirstatevalidatewarned:
730 self._dirstatevalidatewarned = True
733 self._dirstatevalidatewarned = True
731 self.ui.warn(_("warning: ignoring unknown"
734 self.ui.warn(_("warning: ignoring unknown"
732 " working parent %s!\n") % short(node))
735 " working parent %s!\n") % short(node))
733 return nullid
736 return nullid
734
737
735 def __getitem__(self, changeid):
738 def __getitem__(self, changeid):
736 if changeid is None:
739 if changeid is None:
737 return context.workingctx(self)
740 return context.workingctx(self)
738 if isinstance(changeid, slice):
741 if isinstance(changeid, slice):
739 # wdirrev isn't contiguous so the slice shouldn't include it
742 # wdirrev isn't contiguous so the slice shouldn't include it
740 return [context.changectx(self, i)
743 return [context.changectx(self, i)
741 for i in xrange(*changeid.indices(len(self)))
744 for i in xrange(*changeid.indices(len(self)))
742 if i not in self.changelog.filteredrevs]
745 if i not in self.changelog.filteredrevs]
743 try:
746 try:
744 return context.changectx(self, changeid)
747 return context.changectx(self, changeid)
745 except error.WdirUnsupported:
748 except error.WdirUnsupported:
746 return context.workingctx(self)
749 return context.workingctx(self)
747
750
748 def __contains__(self, changeid):
751 def __contains__(self, changeid):
749 """True if the given changeid exists
752 """True if the given changeid exists
750
753
751 error.LookupError is raised if an ambiguous node specified.
754 error.LookupError is raised if an ambiguous node specified.
752 """
755 """
753 try:
756 try:
754 self[changeid]
757 self[changeid]
755 return True
758 return True
756 except error.RepoLookupError:
759 except error.RepoLookupError:
757 return False
760 return False
758
761
759 def __nonzero__(self):
762 def __nonzero__(self):
760 return True
763 return True
761
764
762 __bool__ = __nonzero__
765 __bool__ = __nonzero__
763
766
764 def __len__(self):
767 def __len__(self):
765 # no need to pay the cost of repoview.changelog
768 # no need to pay the cost of repoview.changelog
766 unfi = self.unfiltered()
769 unfi = self.unfiltered()
767 return len(unfi.changelog)
770 return len(unfi.changelog)
768
771
769 def __iter__(self):
772 def __iter__(self):
770 return iter(self.changelog)
773 return iter(self.changelog)
771
774
772 def revs(self, expr, *args):
775 def revs(self, expr, *args):
773 '''Find revisions matching a revset.
776 '''Find revisions matching a revset.
774
777
775 The revset is specified as a string ``expr`` that may contain
778 The revset is specified as a string ``expr`` that may contain
776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
779 %-formatting to escape certain types. See ``revsetlang.formatspec``.
777
780
778 Revset aliases from the configuration are not expanded. To expand
781 Revset aliases from the configuration are not expanded. To expand
779 user aliases, consider calling ``scmutil.revrange()`` or
782 user aliases, consider calling ``scmutil.revrange()`` or
780 ``repo.anyrevs([expr], user=True)``.
783 ``repo.anyrevs([expr], user=True)``.
781
784
782 Returns a revset.abstractsmartset, which is a list-like interface
785 Returns a revset.abstractsmartset, which is a list-like interface
783 that contains integer revisions.
786 that contains integer revisions.
784 '''
787 '''
785 expr = revsetlang.formatspec(expr, *args)
788 expr = revsetlang.formatspec(expr, *args)
786 m = revset.match(None, expr)
789 m = revset.match(None, expr)
787 return m(self)
790 return m(self)
788
791
789 def set(self, expr, *args):
792 def set(self, expr, *args):
790 '''Find revisions matching a revset and emit changectx instances.
793 '''Find revisions matching a revset and emit changectx instances.
791
794
792 This is a convenience wrapper around ``revs()`` that iterates the
795 This is a convenience wrapper around ``revs()`` that iterates the
793 result and is a generator of changectx instances.
796 result and is a generator of changectx instances.
794
797
795 Revset aliases from the configuration are not expanded. To expand
798 Revset aliases from the configuration are not expanded. To expand
796 user aliases, consider calling ``scmutil.revrange()``.
799 user aliases, consider calling ``scmutil.revrange()``.
797 '''
800 '''
798 for r in self.revs(expr, *args):
801 for r in self.revs(expr, *args):
799 yield self[r]
802 yield self[r]
800
803
801 def anyrevs(self, specs, user=False, localalias=None):
804 def anyrevs(self, specs, user=False, localalias=None):
802 '''Find revisions matching one of the given revsets.
805 '''Find revisions matching one of the given revsets.
803
806
804 Revset aliases from the configuration are not expanded by default. To
807 Revset aliases from the configuration are not expanded by default. To
805 expand user aliases, specify ``user=True``. To provide some local
808 expand user aliases, specify ``user=True``. To provide some local
806 definitions overriding user aliases, set ``localalias`` to
809 definitions overriding user aliases, set ``localalias`` to
807 ``{name: definitionstring}``.
810 ``{name: definitionstring}``.
808 '''
811 '''
809 if user:
812 if user:
810 m = revset.matchany(self.ui, specs, repo=self,
813 m = revset.matchany(self.ui, specs, repo=self,
811 localalias=localalias)
814 localalias=localalias)
812 else:
815 else:
813 m = revset.matchany(None, specs, localalias=localalias)
816 m = revset.matchany(None, specs, localalias=localalias)
814 return m(self)
817 return m(self)
815
818
816 def url(self):
819 def url(self):
817 return 'file:' + self.root
820 return 'file:' + self.root
818
821
819 def hook(self, name, throw=False, **args):
822 def hook(self, name, throw=False, **args):
820 """Call a hook, passing this repo instance.
823 """Call a hook, passing this repo instance.
821
824
822 This a convenience method to aid invoking hooks. Extensions likely
825 This a convenience method to aid invoking hooks. Extensions likely
823 won't call this unless they have registered a custom hook or are
826 won't call this unless they have registered a custom hook or are
824 replacing code that is expected to call a hook.
827 replacing code that is expected to call a hook.
825 """
828 """
826 return hook.hook(self.ui, self, name, throw, **args)
829 return hook.hook(self.ui, self, name, throw, **args)
827
830
828 @filteredpropertycache
831 @filteredpropertycache
829 def _tagscache(self):
832 def _tagscache(self):
830 '''Returns a tagscache object that contains various tags related
833 '''Returns a tagscache object that contains various tags related
831 caches.'''
834 caches.'''
832
835
833 # This simplifies its cache management by having one decorated
836 # This simplifies its cache management by having one decorated
834 # function (this one) and the rest simply fetch things from it.
837 # function (this one) and the rest simply fetch things from it.
835 class tagscache(object):
838 class tagscache(object):
836 def __init__(self):
839 def __init__(self):
837 # These two define the set of tags for this repository. tags
840 # These two define the set of tags for this repository. tags
838 # maps tag name to node; tagtypes maps tag name to 'global' or
841 # maps tag name to node; tagtypes maps tag name to 'global' or
839 # 'local'. (Global tags are defined by .hgtags across all
842 # 'local'. (Global tags are defined by .hgtags across all
840 # heads, and local tags are defined in .hg/localtags.)
843 # heads, and local tags are defined in .hg/localtags.)
841 # They constitute the in-memory cache of tags.
844 # They constitute the in-memory cache of tags.
842 self.tags = self.tagtypes = None
845 self.tags = self.tagtypes = None
843
846
844 self.nodetagscache = self.tagslist = None
847 self.nodetagscache = self.tagslist = None
845
848
846 cache = tagscache()
849 cache = tagscache()
847 cache.tags, cache.tagtypes = self._findtags()
850 cache.tags, cache.tagtypes = self._findtags()
848
851
849 return cache
852 return cache
850
853
851 def tags(self):
854 def tags(self):
852 '''return a mapping of tag to node'''
855 '''return a mapping of tag to node'''
853 t = {}
856 t = {}
854 if self.changelog.filteredrevs:
857 if self.changelog.filteredrevs:
855 tags, tt = self._findtags()
858 tags, tt = self._findtags()
856 else:
859 else:
857 tags = self._tagscache.tags
860 tags = self._tagscache.tags
858 for k, v in tags.iteritems():
861 for k, v in tags.iteritems():
859 try:
862 try:
860 # ignore tags to unknown nodes
863 # ignore tags to unknown nodes
861 self.changelog.rev(v)
864 self.changelog.rev(v)
862 t[k] = v
865 t[k] = v
863 except (error.LookupError, ValueError):
866 except (error.LookupError, ValueError):
864 pass
867 pass
865 return t
868 return t
866
869
867 def _findtags(self):
870 def _findtags(self):
868 '''Do the hard work of finding tags. Return a pair of dicts
871 '''Do the hard work of finding tags. Return a pair of dicts
869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
872 (tags, tagtypes) where tags maps tag name to node, and tagtypes
870 maps tag name to a string like \'global\' or \'local\'.
873 maps tag name to a string like \'global\' or \'local\'.
871 Subclasses or extensions are free to add their own tags, but
874 Subclasses or extensions are free to add their own tags, but
872 should be aware that the returned dicts will be retained for the
875 should be aware that the returned dicts will be retained for the
873 duration of the localrepo object.'''
876 duration of the localrepo object.'''
874
877
875 # XXX what tagtype should subclasses/extensions use? Currently
878 # XXX what tagtype should subclasses/extensions use? Currently
876 # mq and bookmarks add tags, but do not set the tagtype at all.
879 # mq and bookmarks add tags, but do not set the tagtype at all.
877 # Should each extension invent its own tag type? Should there
880 # Should each extension invent its own tag type? Should there
878 # be one tagtype for all such "virtual" tags? Or is the status
881 # be one tagtype for all such "virtual" tags? Or is the status
879 # quo fine?
882 # quo fine?
880
883
881
884
882 # map tag name to (node, hist)
885 # map tag name to (node, hist)
883 alltags = tagsmod.findglobaltags(self.ui, self)
886 alltags = tagsmod.findglobaltags(self.ui, self)
884 # map tag name to tag type
887 # map tag name to tag type
885 tagtypes = dict((tag, 'global') for tag in alltags)
888 tagtypes = dict((tag, 'global') for tag in alltags)
886
889
887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
890 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
888
891
889 # Build the return dicts. Have to re-encode tag names because
892 # Build the return dicts. Have to re-encode tag names because
890 # the tags module always uses UTF-8 (in order not to lose info
893 # the tags module always uses UTF-8 (in order not to lose info
891 # writing to the cache), but the rest of Mercurial wants them in
894 # writing to the cache), but the rest of Mercurial wants them in
892 # local encoding.
895 # local encoding.
893 tags = {}
896 tags = {}
894 for (name, (node, hist)) in alltags.iteritems():
897 for (name, (node, hist)) in alltags.iteritems():
895 if node != nullid:
898 if node != nullid:
896 tags[encoding.tolocal(name)] = node
899 tags[encoding.tolocal(name)] = node
897 tags['tip'] = self.changelog.tip()
900 tags['tip'] = self.changelog.tip()
898 tagtypes = dict([(encoding.tolocal(name), value)
901 tagtypes = dict([(encoding.tolocal(name), value)
899 for (name, value) in tagtypes.iteritems()])
902 for (name, value) in tagtypes.iteritems()])
900 return (tags, tagtypes)
903 return (tags, tagtypes)
901
904
902 def tagtype(self, tagname):
905 def tagtype(self, tagname):
903 '''
906 '''
904 return the type of the given tag. result can be:
907 return the type of the given tag. result can be:
905
908
906 'local' : a local tag
909 'local' : a local tag
907 'global' : a global tag
910 'global' : a global tag
908 None : tag does not exist
911 None : tag does not exist
909 '''
912 '''
910
913
911 return self._tagscache.tagtypes.get(tagname)
914 return self._tagscache.tagtypes.get(tagname)
912
915
913 def tagslist(self):
916 def tagslist(self):
914 '''return a list of tags ordered by revision'''
917 '''return a list of tags ordered by revision'''
915 if not self._tagscache.tagslist:
918 if not self._tagscache.tagslist:
916 l = []
919 l = []
917 for t, n in self.tags().iteritems():
920 for t, n in self.tags().iteritems():
918 l.append((self.changelog.rev(n), t, n))
921 l.append((self.changelog.rev(n), t, n))
919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
922 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
920
923
921 return self._tagscache.tagslist
924 return self._tagscache.tagslist
922
925
923 def nodetags(self, node):
926 def nodetags(self, node):
924 '''return the tags associated with a node'''
927 '''return the tags associated with a node'''
925 if not self._tagscache.nodetagscache:
928 if not self._tagscache.nodetagscache:
926 nodetagscache = {}
929 nodetagscache = {}
927 for t, n in self._tagscache.tags.iteritems():
930 for t, n in self._tagscache.tags.iteritems():
928 nodetagscache.setdefault(n, []).append(t)
931 nodetagscache.setdefault(n, []).append(t)
929 for tags in nodetagscache.itervalues():
932 for tags in nodetagscache.itervalues():
930 tags.sort()
933 tags.sort()
931 self._tagscache.nodetagscache = nodetagscache
934 self._tagscache.nodetagscache = nodetagscache
932 return self._tagscache.nodetagscache.get(node, [])
935 return self._tagscache.nodetagscache.get(node, [])
933
936
934 def nodebookmarks(self, node):
937 def nodebookmarks(self, node):
935 """return the list of bookmarks pointing to the specified node"""
938 """return the list of bookmarks pointing to the specified node"""
936 marks = []
939 marks = []
937 for bookmark, n in self._bookmarks.iteritems():
940 for bookmark, n in self._bookmarks.iteritems():
938 if n == node:
941 if n == node:
939 marks.append(bookmark)
942 marks.append(bookmark)
940 return sorted(marks)
943 return sorted(marks)
941
944
942 def branchmap(self):
945 def branchmap(self):
943 '''returns a dictionary {branch: [branchheads]} with branchheads
946 '''returns a dictionary {branch: [branchheads]} with branchheads
944 ordered by increasing revision number'''
947 ordered by increasing revision number'''
945 branchmap.updatecache(self)
948 branchmap.updatecache(self)
946 return self._branchcaches[self.filtername]
949 return self._branchcaches[self.filtername]
947
950
948 @unfilteredmethod
951 @unfilteredmethod
949 def revbranchcache(self):
952 def revbranchcache(self):
950 if not self._revbranchcache:
953 if not self._revbranchcache:
951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
954 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
952 return self._revbranchcache
955 return self._revbranchcache
953
956
954 def branchtip(self, branch, ignoremissing=False):
957 def branchtip(self, branch, ignoremissing=False):
955 '''return the tip node for a given branch
958 '''return the tip node for a given branch
956
959
957 If ignoremissing is True, then this method will not raise an error.
960 If ignoremissing is True, then this method will not raise an error.
958 This is helpful for callers that only expect None for a missing branch
961 This is helpful for callers that only expect None for a missing branch
959 (e.g. namespace).
962 (e.g. namespace).
960
963
961 '''
964 '''
962 try:
965 try:
963 return self.branchmap().branchtip(branch)
966 return self.branchmap().branchtip(branch)
964 except KeyError:
967 except KeyError:
965 if not ignoremissing:
968 if not ignoremissing:
966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
969 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
967 else:
970 else:
968 pass
971 pass
969
972
970 def lookup(self, key):
973 def lookup(self, key):
971 return self[key].node()
974 return self[key].node()
972
975
973 def lookupbranch(self, key, remote=None):
976 def lookupbranch(self, key, remote=None):
974 repo = remote or self
977 repo = remote or self
975 if key in repo.branchmap():
978 if key in repo.branchmap():
976 return key
979 return key
977
980
978 repo = (remote and remote.local()) and remote or self
981 repo = (remote and remote.local()) and remote or self
979 return repo[key].branch()
982 return repo[key].branch()
980
983
981 def known(self, nodes):
984 def known(self, nodes):
982 cl = self.changelog
985 cl = self.changelog
983 nm = cl.nodemap
986 nm = cl.nodemap
984 filtered = cl.filteredrevs
987 filtered = cl.filteredrevs
985 result = []
988 result = []
986 for n in nodes:
989 for n in nodes:
987 r = nm.get(n)
990 r = nm.get(n)
988 resp = not (r is None or r in filtered)
991 resp = not (r is None or r in filtered)
989 result.append(resp)
992 result.append(resp)
990 return result
993 return result
991
994
992 def local(self):
995 def local(self):
993 return self
996 return self
994
997
995 def publishing(self):
998 def publishing(self):
996 # it's safe (and desirable) to trust the publish flag unconditionally
999 # it's safe (and desirable) to trust the publish flag unconditionally
997 # so that we don't finalize changes shared between users via ssh or nfs
1000 # so that we don't finalize changes shared between users via ssh or nfs
998 return self.ui.configbool('phases', 'publish', untrusted=True)
1001 return self.ui.configbool('phases', 'publish', untrusted=True)
999
1002
1000 def cancopy(self):
1003 def cancopy(self):
1001 # so statichttprepo's override of local() works
1004 # so statichttprepo's override of local() works
1002 if not self.local():
1005 if not self.local():
1003 return False
1006 return False
1004 if not self.publishing():
1007 if not self.publishing():
1005 return True
1008 return True
1006 # if publishing we can't copy if there is filtered content
1009 # if publishing we can't copy if there is filtered content
1007 return not self.filtered('visible').changelog.filteredrevs
1010 return not self.filtered('visible').changelog.filteredrevs
1008
1011
1009 def shared(self):
1012 def shared(self):
1010 '''the type of shared repository (None if not shared)'''
1013 '''the type of shared repository (None if not shared)'''
1011 if self.sharedpath != self.path:
1014 if self.sharedpath != self.path:
1012 return 'store'
1015 return 'store'
1013 return None
1016 return None
1014
1017
1015 def wjoin(self, f, *insidef):
1018 def wjoin(self, f, *insidef):
1016 return self.vfs.reljoin(self.root, f, *insidef)
1019 return self.vfs.reljoin(self.root, f, *insidef)
1017
1020
1018 def file(self, f):
1021 def file(self, f):
1019 if f[0] == '/':
1022 if f[0] == '/':
1020 f = f[1:]
1023 f = f[1:]
1021 return filelog.filelog(self.svfs, f)
1024 return filelog.filelog(self.svfs, f)
1022
1025
1023 def changectx(self, changeid):
1026 def changectx(self, changeid):
1024 return self[changeid]
1027 return self[changeid]
1025
1028
1026 def setparents(self, p1, p2=nullid):
1029 def setparents(self, p1, p2=nullid):
1027 with self.dirstate.parentchange():
1030 with self.dirstate.parentchange():
1028 copies = self.dirstate.setparents(p1, p2)
1031 copies = self.dirstate.setparents(p1, p2)
1029 pctx = self[p1]
1032 pctx = self[p1]
1030 if copies:
1033 if copies:
1031 # Adjust copy records, the dirstate cannot do it, it
1034 # Adjust copy records, the dirstate cannot do it, it
1032 # requires access to parents manifests. Preserve them
1035 # requires access to parents manifests. Preserve them
1033 # only for entries added to first parent.
1036 # only for entries added to first parent.
1034 for f in copies:
1037 for f in copies:
1035 if f not in pctx and copies[f] in pctx:
1038 if f not in pctx and copies[f] in pctx:
1036 self.dirstate.copy(copies[f], f)
1039 self.dirstate.copy(copies[f], f)
1037 if p2 == nullid:
1040 if p2 == nullid:
1038 for f, s in sorted(self.dirstate.copies().items()):
1041 for f, s in sorted(self.dirstate.copies().items()):
1039 if f not in pctx and s not in pctx:
1042 if f not in pctx and s not in pctx:
1040 self.dirstate.copy(None, f)
1043 self.dirstate.copy(None, f)
1041
1044
1042 def filectx(self, path, changeid=None, fileid=None):
1045 def filectx(self, path, changeid=None, fileid=None):
1043 """changeid can be a changeset revision, node, or tag.
1046 """changeid can be a changeset revision, node, or tag.
1044 fileid can be a file revision or node."""
1047 fileid can be a file revision or node."""
1045 return context.filectx(self, path, changeid, fileid)
1048 return context.filectx(self, path, changeid, fileid)
1046
1049
1047 def getcwd(self):
1050 def getcwd(self):
1048 return self.dirstate.getcwd()
1051 return self.dirstate.getcwd()
1049
1052
1050 def pathto(self, f, cwd=None):
1053 def pathto(self, f, cwd=None):
1051 return self.dirstate.pathto(f, cwd)
1054 return self.dirstate.pathto(f, cwd)
1052
1055
1053 def _loadfilter(self, filter):
1056 def _loadfilter(self, filter):
1054 if filter not in self.filterpats:
1057 if filter not in self.filterpats:
1055 l = []
1058 l = []
1056 for pat, cmd in self.ui.configitems(filter):
1059 for pat, cmd in self.ui.configitems(filter):
1057 if cmd == '!':
1060 if cmd == '!':
1058 continue
1061 continue
1059 mf = matchmod.match(self.root, '', [pat])
1062 mf = matchmod.match(self.root, '', [pat])
1060 fn = None
1063 fn = None
1061 params = cmd
1064 params = cmd
1062 for name, filterfn in self._datafilters.iteritems():
1065 for name, filterfn in self._datafilters.iteritems():
1063 if cmd.startswith(name):
1066 if cmd.startswith(name):
1064 fn = filterfn
1067 fn = filterfn
1065 params = cmd[len(name):].lstrip()
1068 params = cmd[len(name):].lstrip()
1066 break
1069 break
1067 if not fn:
1070 if not fn:
1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1071 fn = lambda s, c, **kwargs: util.filter(s, c)
1069 # Wrap old filters not supporting keyword arguments
1072 # Wrap old filters not supporting keyword arguments
1070 if not pycompat.getargspec(fn)[2]:
1073 if not pycompat.getargspec(fn)[2]:
1071 oldfn = fn
1074 oldfn = fn
1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1075 fn = lambda s, c, **kwargs: oldfn(s, c)
1073 l.append((mf, fn, params))
1076 l.append((mf, fn, params))
1074 self.filterpats[filter] = l
1077 self.filterpats[filter] = l
1075 return self.filterpats[filter]
1078 return self.filterpats[filter]
1076
1079
1077 def _filter(self, filterpats, filename, data):
1080 def _filter(self, filterpats, filename, data):
1078 for mf, fn, cmd in filterpats:
1081 for mf, fn, cmd in filterpats:
1079 if mf(filename):
1082 if mf(filename):
1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1083 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1084 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1082 break
1085 break
1083
1086
1084 return data
1087 return data
1085
1088
1086 @unfilteredpropertycache
1089 @unfilteredpropertycache
1087 def _encodefilterpats(self):
1090 def _encodefilterpats(self):
1088 return self._loadfilter('encode')
1091 return self._loadfilter('encode')
1089
1092
1090 @unfilteredpropertycache
1093 @unfilteredpropertycache
1091 def _decodefilterpats(self):
1094 def _decodefilterpats(self):
1092 return self._loadfilter('decode')
1095 return self._loadfilter('decode')
1093
1096
1094 def adddatafilter(self, name, filter):
1097 def adddatafilter(self, name, filter):
1095 self._datafilters[name] = filter
1098 self._datafilters[name] = filter
1096
1099
1097 def wread(self, filename):
1100 def wread(self, filename):
1098 if self.wvfs.islink(filename):
1101 if self.wvfs.islink(filename):
1099 data = self.wvfs.readlink(filename)
1102 data = self.wvfs.readlink(filename)
1100 else:
1103 else:
1101 data = self.wvfs.read(filename)
1104 data = self.wvfs.read(filename)
1102 return self._filter(self._encodefilterpats, filename, data)
1105 return self._filter(self._encodefilterpats, filename, data)
1103
1106
1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1107 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1105 """write ``data`` into ``filename`` in the working directory
1108 """write ``data`` into ``filename`` in the working directory
1106
1109
1107 This returns length of written (maybe decoded) data.
1110 This returns length of written (maybe decoded) data.
1108 """
1111 """
1109 data = self._filter(self._decodefilterpats, filename, data)
1112 data = self._filter(self._decodefilterpats, filename, data)
1110 if 'l' in flags:
1113 if 'l' in flags:
1111 self.wvfs.symlink(data, filename)
1114 self.wvfs.symlink(data, filename)
1112 else:
1115 else:
1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1116 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1114 **kwargs)
1117 **kwargs)
1115 if 'x' in flags:
1118 if 'x' in flags:
1116 self.wvfs.setflags(filename, False, True)
1119 self.wvfs.setflags(filename, False, True)
1117 else:
1120 else:
1118 self.wvfs.setflags(filename, False, False)
1121 self.wvfs.setflags(filename, False, False)
1119 return len(data)
1122 return len(data)
1120
1123
1121 def wwritedata(self, filename, data):
1124 def wwritedata(self, filename, data):
1122 return self._filter(self._decodefilterpats, filename, data)
1125 return self._filter(self._decodefilterpats, filename, data)
1123
1126
1124 def currenttransaction(self):
1127 def currenttransaction(self):
1125 """return the current transaction or None if non exists"""
1128 """return the current transaction or None if non exists"""
1126 if self._transref:
1129 if self._transref:
1127 tr = self._transref()
1130 tr = self._transref()
1128 else:
1131 else:
1129 tr = None
1132 tr = None
1130
1133
1131 if tr and tr.running():
1134 if tr and tr.running():
1132 return tr
1135 return tr
1133 return None
1136 return None
1134
1137
1135 def transaction(self, desc, report=None):
1138 def transaction(self, desc, report=None):
1136 if (self.ui.configbool('devel', 'all-warnings')
1139 if (self.ui.configbool('devel', 'all-warnings')
1137 or self.ui.configbool('devel', 'check-locks')):
1140 or self.ui.configbool('devel', 'check-locks')):
1138 if self._currentlock(self._lockref) is None:
1141 if self._currentlock(self._lockref) is None:
1139 raise error.ProgrammingError('transaction requires locking')
1142 raise error.ProgrammingError('transaction requires locking')
1140 tr = self.currenttransaction()
1143 tr = self.currenttransaction()
1141 if tr is not None:
1144 if tr is not None:
1142 return tr.nest()
1145 return tr.nest()
1143
1146
1144 # abort here if the journal already exists
1147 # abort here if the journal already exists
1145 if self.svfs.exists("journal"):
1148 if self.svfs.exists("journal"):
1146 raise error.RepoError(
1149 raise error.RepoError(
1147 _("abandoned transaction found"),
1150 _("abandoned transaction found"),
1148 hint=_("run 'hg recover' to clean up transaction"))
1151 hint=_("run 'hg recover' to clean up transaction"))
1149
1152
1150 idbase = "%.40f#%f" % (random.random(), time.time())
1153 idbase = "%.40f#%f" % (random.random(), time.time())
1151 ha = hex(hashlib.sha1(idbase).digest())
1154 ha = hex(hashlib.sha1(idbase).digest())
1152 txnid = 'TXN:' + ha
1155 txnid = 'TXN:' + ha
1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1156 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1154
1157
1155 self._writejournal(desc)
1158 self._writejournal(desc)
1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1159 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1157 if report:
1160 if report:
1158 rp = report
1161 rp = report
1159 else:
1162 else:
1160 rp = self.ui.warn
1163 rp = self.ui.warn
1161 vfsmap = {'plain': self.vfs} # root of .hg/
1164 vfsmap = {'plain': self.vfs} # root of .hg/
1162 # we must avoid cyclic reference between repo and transaction.
1165 # we must avoid cyclic reference between repo and transaction.
1163 reporef = weakref.ref(self)
1166 reporef = weakref.ref(self)
1164 # Code to track tag movement
1167 # Code to track tag movement
1165 #
1168 #
1166 # Since tags are all handled as file content, it is actually quite hard
1169 # Since tags are all handled as file content, it is actually quite hard
1167 # to track these movement from a code perspective. So we fallback to a
1170 # to track these movement from a code perspective. So we fallback to a
1168 # tracking at the repository level. One could envision to track changes
1171 # tracking at the repository level. One could envision to track changes
1169 # to the '.hgtags' file through changegroup apply but that fails to
1172 # to the '.hgtags' file through changegroup apply but that fails to
1170 # cope with case where transaction expose new heads without changegroup
1173 # cope with case where transaction expose new heads without changegroup
1171 # being involved (eg: phase movement).
1174 # being involved (eg: phase movement).
1172 #
1175 #
1173 # For now, We gate the feature behind a flag since this likely comes
1176 # For now, We gate the feature behind a flag since this likely comes
1174 # with performance impacts. The current code run more often than needed
1177 # with performance impacts. The current code run more often than needed
1175 # and do not use caches as much as it could. The current focus is on
1178 # and do not use caches as much as it could. The current focus is on
1176 # the behavior of the feature so we disable it by default. The flag
1179 # the behavior of the feature so we disable it by default. The flag
1177 # will be removed when we are happy with the performance impact.
1180 # will be removed when we are happy with the performance impact.
1178 #
1181 #
1179 # Once this feature is no longer experimental move the following
1182 # Once this feature is no longer experimental move the following
1180 # documentation to the appropriate help section:
1183 # documentation to the appropriate help section:
1181 #
1184 #
1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1185 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1183 # tags (new or changed or deleted tags). In addition the details of
1186 # tags (new or changed or deleted tags). In addition the details of
1184 # these changes are made available in a file at:
1187 # these changes are made available in a file at:
1185 # ``REPOROOT/.hg/changes/tags.changes``.
1188 # ``REPOROOT/.hg/changes/tags.changes``.
1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1189 # Make sure you check for HG_TAG_MOVED before reading that file as it
1187 # might exist from a previous transaction even if no tag were touched
1190 # might exist from a previous transaction even if no tag were touched
1188 # in this one. Changes are recorded in a line base format::
1191 # in this one. Changes are recorded in a line base format::
1189 #
1192 #
1190 # <action> <hex-node> <tag-name>\n
1193 # <action> <hex-node> <tag-name>\n
1191 #
1194 #
1192 # Actions are defined as follow:
1195 # Actions are defined as follow:
1193 # "-R": tag is removed,
1196 # "-R": tag is removed,
1194 # "+A": tag is added,
1197 # "+A": tag is added,
1195 # "-M": tag is moved (old value),
1198 # "-M": tag is moved (old value),
1196 # "+M": tag is moved (new value),
1199 # "+M": tag is moved (new value),
1197 tracktags = lambda x: None
1200 tracktags = lambda x: None
1198 # experimental config: experimental.hook-track-tags
1201 # experimental config: experimental.hook-track-tags
1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1202 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1200 if desc != 'strip' and shouldtracktags:
1203 if desc != 'strip' and shouldtracktags:
1201 oldheads = self.changelog.headrevs()
1204 oldheads = self.changelog.headrevs()
1202 def tracktags(tr2):
1205 def tracktags(tr2):
1203 repo = reporef()
1206 repo = reporef()
1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1207 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1205 newheads = repo.changelog.headrevs()
1208 newheads = repo.changelog.headrevs()
1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1209 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1207 # notes: we compare lists here.
1210 # notes: we compare lists here.
1208 # As we do it only once buiding set would not be cheaper
1211 # As we do it only once buiding set would not be cheaper
1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1212 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1210 if changes:
1213 if changes:
1211 tr2.hookargs['tag_moved'] = '1'
1214 tr2.hookargs['tag_moved'] = '1'
1212 with repo.vfs('changes/tags.changes', 'w',
1215 with repo.vfs('changes/tags.changes', 'w',
1213 atomictemp=True) as changesfile:
1216 atomictemp=True) as changesfile:
1214 # note: we do not register the file to the transaction
1217 # note: we do not register the file to the transaction
1215 # because we needs it to still exist on the transaction
1218 # because we needs it to still exist on the transaction
1216 # is close (for txnclose hooks)
1219 # is close (for txnclose hooks)
1217 tagsmod.writediff(changesfile, changes)
1220 tagsmod.writediff(changesfile, changes)
1218 def validate(tr2):
1221 def validate(tr2):
1219 """will run pre-closing hooks"""
1222 """will run pre-closing hooks"""
1220 # XXX the transaction API is a bit lacking here so we take a hacky
1223 # XXX the transaction API is a bit lacking here so we take a hacky
1221 # path for now
1224 # path for now
1222 #
1225 #
1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1226 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1224 # dict is copied before these run. In addition we needs the data
1227 # dict is copied before these run. In addition we needs the data
1225 # available to in memory hooks too.
1228 # available to in memory hooks too.
1226 #
1229 #
1227 # Moreover, we also need to make sure this runs before txnclose
1230 # Moreover, we also need to make sure this runs before txnclose
1228 # hooks and there is no "pending" mechanism that would execute
1231 # hooks and there is no "pending" mechanism that would execute
1229 # logic only if hooks are about to run.
1232 # logic only if hooks are about to run.
1230 #
1233 #
1231 # Fixing this limitation of the transaction is also needed to track
1234 # Fixing this limitation of the transaction is also needed to track
1232 # other families of changes (bookmarks, phases, obsolescence).
1235 # other families of changes (bookmarks, phases, obsolescence).
1233 #
1236 #
1234 # This will have to be fixed before we remove the experimental
1237 # This will have to be fixed before we remove the experimental
1235 # gating.
1238 # gating.
1236 tracktags(tr2)
1239 tracktags(tr2)
1237 repo = reporef()
1240 repo = reporef()
1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1241 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1239 scmutil.enforcesinglehead(repo, tr2, desc)
1242 scmutil.enforcesinglehead(repo, tr2, desc)
1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1243 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1244 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1242 args = tr.hookargs.copy()
1245 args = tr.hookargs.copy()
1243 args.update(bookmarks.preparehookargs(name, old, new))
1246 args.update(bookmarks.preparehookargs(name, old, new))
1244 repo.hook('pretxnclose-bookmark', throw=True,
1247 repo.hook('pretxnclose-bookmark', throw=True,
1245 txnname=desc,
1248 txnname=desc,
1246 **pycompat.strkwargs(args))
1249 **pycompat.strkwargs(args))
1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1250 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1248 cl = repo.unfiltered().changelog
1251 cl = repo.unfiltered().changelog
1249 for rev, (old, new) in tr.changes['phases'].items():
1252 for rev, (old, new) in tr.changes['phases'].items():
1250 args = tr.hookargs.copy()
1253 args = tr.hookargs.copy()
1251 node = hex(cl.node(rev))
1254 node = hex(cl.node(rev))
1252 args.update(phases.preparehookargs(node, old, new))
1255 args.update(phases.preparehookargs(node, old, new))
1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1256 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1254 **pycompat.strkwargs(args))
1257 **pycompat.strkwargs(args))
1255
1258
1256 repo.hook('pretxnclose', throw=True,
1259 repo.hook('pretxnclose', throw=True,
1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1260 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1258 def releasefn(tr, success):
1261 def releasefn(tr, success):
1259 repo = reporef()
1262 repo = reporef()
1260 if success:
1263 if success:
1261 # this should be explicitly invoked here, because
1264 # this should be explicitly invoked here, because
1262 # in-memory changes aren't written out at closing
1265 # in-memory changes aren't written out at closing
1263 # transaction, if tr.addfilegenerator (via
1266 # transaction, if tr.addfilegenerator (via
1264 # dirstate.write or so) isn't invoked while
1267 # dirstate.write or so) isn't invoked while
1265 # transaction running
1268 # transaction running
1266 repo.dirstate.write(None)
1269 repo.dirstate.write(None)
1267 else:
1270 else:
1268 # discard all changes (including ones already written
1271 # discard all changes (including ones already written
1269 # out) in this transaction
1272 # out) in this transaction
1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1273 repo.dirstate.restorebackup(None, 'journal.dirstate')
1271
1274
1272 repo.invalidate(clearfilecache=True)
1275 repo.invalidate(clearfilecache=True)
1273
1276
1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1277 tr = transaction.transaction(rp, self.svfs, vfsmap,
1275 "journal",
1278 "journal",
1276 "undo",
1279 "undo",
1277 aftertrans(renames),
1280 aftertrans(renames),
1278 self.store.createmode,
1281 self.store.createmode,
1279 validator=validate,
1282 validator=validate,
1280 releasefn=releasefn,
1283 releasefn=releasefn,
1281 checkambigfiles=_cachedfiles)
1284 checkambigfiles=_cachedfiles)
1282 tr.changes['revs'] = xrange(0, 0)
1285 tr.changes['revs'] = xrange(0, 0)
1283 tr.changes['obsmarkers'] = set()
1286 tr.changes['obsmarkers'] = set()
1284 tr.changes['phases'] = {}
1287 tr.changes['phases'] = {}
1285 tr.changes['bookmarks'] = {}
1288 tr.changes['bookmarks'] = {}
1286
1289
1287 tr.hookargs['txnid'] = txnid
1290 tr.hookargs['txnid'] = txnid
1288 # note: writing the fncache only during finalize mean that the file is
1291 # note: writing the fncache only during finalize mean that the file is
1289 # outdated when running hooks. As fncache is used for streaming clone,
1292 # outdated when running hooks. As fncache is used for streaming clone,
1290 # this is not expected to break anything that happen during the hooks.
1293 # this is not expected to break anything that happen during the hooks.
1291 tr.addfinalize('flush-fncache', self.store.write)
1294 tr.addfinalize('flush-fncache', self.store.write)
1292 def txnclosehook(tr2):
1295 def txnclosehook(tr2):
1293 """To be run if transaction is successful, will schedule a hook run
1296 """To be run if transaction is successful, will schedule a hook run
1294 """
1297 """
1295 # Don't reference tr2 in hook() so we don't hold a reference.
1298 # Don't reference tr2 in hook() so we don't hold a reference.
1296 # This reduces memory consumption when there are multiple
1299 # This reduces memory consumption when there are multiple
1297 # transactions per lock. This can likely go away if issue5045
1300 # transactions per lock. This can likely go away if issue5045
1298 # fixes the function accumulation.
1301 # fixes the function accumulation.
1299 hookargs = tr2.hookargs
1302 hookargs = tr2.hookargs
1300
1303
1301 def hookfunc():
1304 def hookfunc():
1302 repo = reporef()
1305 repo = reporef()
1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1306 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1307 bmchanges = sorted(tr.changes['bookmarks'].items())
1305 for name, (old, new) in bmchanges:
1308 for name, (old, new) in bmchanges:
1306 args = tr.hookargs.copy()
1309 args = tr.hookargs.copy()
1307 args.update(bookmarks.preparehookargs(name, old, new))
1310 args.update(bookmarks.preparehookargs(name, old, new))
1308 repo.hook('txnclose-bookmark', throw=False,
1311 repo.hook('txnclose-bookmark', throw=False,
1309 txnname=desc, **pycompat.strkwargs(args))
1312 txnname=desc, **pycompat.strkwargs(args))
1310
1313
1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1314 if hook.hashook(repo.ui, 'txnclose-phase'):
1312 cl = repo.unfiltered().changelog
1315 cl = repo.unfiltered().changelog
1313 phasemv = sorted(tr.changes['phases'].items())
1316 phasemv = sorted(tr.changes['phases'].items())
1314 for rev, (old, new) in phasemv:
1317 for rev, (old, new) in phasemv:
1315 args = tr.hookargs.copy()
1318 args = tr.hookargs.copy()
1316 node = hex(cl.node(rev))
1319 node = hex(cl.node(rev))
1317 args.update(phases.preparehookargs(node, old, new))
1320 args.update(phases.preparehookargs(node, old, new))
1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1321 repo.hook('txnclose-phase', throw=False, txnname=desc,
1319 **pycompat.strkwargs(args))
1322 **pycompat.strkwargs(args))
1320
1323
1321 repo.hook('txnclose', throw=False, txnname=desc,
1324 repo.hook('txnclose', throw=False, txnname=desc,
1322 **pycompat.strkwargs(hookargs))
1325 **pycompat.strkwargs(hookargs))
1323 reporef()._afterlock(hookfunc)
1326 reporef()._afterlock(hookfunc)
1324 tr.addfinalize('txnclose-hook', txnclosehook)
1327 tr.addfinalize('txnclose-hook', txnclosehook)
1325 # Include a leading "-" to make it happen before the transaction summary
1328 # Include a leading "-" to make it happen before the transaction summary
1326 # reports registered via scmutil.registersummarycallback() whose names
1329 # reports registered via scmutil.registersummarycallback() whose names
1327 # are 00-txnreport etc. That way, the caches will be warm when the
1330 # are 00-txnreport etc. That way, the caches will be warm when the
1328 # callbacks run.
1331 # callbacks run.
1329 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1332 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1330 def txnaborthook(tr2):
1333 def txnaborthook(tr2):
1331 """To be run if transaction is aborted
1334 """To be run if transaction is aborted
1332 """
1335 """
1333 reporef().hook('txnabort', throw=False, txnname=desc,
1336 reporef().hook('txnabort', throw=False, txnname=desc,
1334 **pycompat.strkwargs(tr2.hookargs))
1337 **pycompat.strkwargs(tr2.hookargs))
1335 tr.addabort('txnabort-hook', txnaborthook)
1338 tr.addabort('txnabort-hook', txnaborthook)
1336 # avoid eager cache invalidation. in-memory data should be identical
1339 # avoid eager cache invalidation. in-memory data should be identical
1337 # to stored data if transaction has no error.
1340 # to stored data if transaction has no error.
1338 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1339 self._transref = weakref.ref(tr)
1342 self._transref = weakref.ref(tr)
1340 scmutil.registersummarycallback(self, tr, desc)
1343 scmutil.registersummarycallback(self, tr, desc)
1341 return tr
1344 return tr
1342
1345
1343 def _journalfiles(self):
1346 def _journalfiles(self):
1344 return ((self.svfs, 'journal'),
1347 return ((self.svfs, 'journal'),
1345 (self.vfs, 'journal.dirstate'),
1348 (self.vfs, 'journal.dirstate'),
1346 (self.vfs, 'journal.branch'),
1349 (self.vfs, 'journal.branch'),
1347 (self.vfs, 'journal.desc'),
1350 (self.vfs, 'journal.desc'),
1348 (self.vfs, 'journal.bookmarks'),
1351 (self.vfs, 'journal.bookmarks'),
1349 (self.svfs, 'journal.phaseroots'))
1352 (self.svfs, 'journal.phaseroots'))
1350
1353
1351 def undofiles(self):
1354 def undofiles(self):
1352 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1353
1356
1354 @unfilteredmethod
1357 @unfilteredmethod
1355 def _writejournal(self, desc):
1358 def _writejournal(self, desc):
1356 self.dirstate.savebackup(None, 'journal.dirstate')
1359 self.dirstate.savebackup(None, 'journal.dirstate')
1357 self.vfs.write("journal.branch",
1360 self.vfs.write("journal.branch",
1358 encoding.fromlocal(self.dirstate.branch()))
1361 encoding.fromlocal(self.dirstate.branch()))
1359 self.vfs.write("journal.desc",
1362 self.vfs.write("journal.desc",
1360 "%d\n%s\n" % (len(self), desc))
1363 "%d\n%s\n" % (len(self), desc))
1361 self.vfs.write("journal.bookmarks",
1364 self.vfs.write("journal.bookmarks",
1362 self.vfs.tryread("bookmarks"))
1365 self.vfs.tryread("bookmarks"))
1363 self.svfs.write("journal.phaseroots",
1366 self.svfs.write("journal.phaseroots",
1364 self.svfs.tryread("phaseroots"))
1367 self.svfs.tryread("phaseroots"))
1365
1368
1366 def recover(self):
1369 def recover(self):
1367 with self.lock():
1370 with self.lock():
1368 if self.svfs.exists("journal"):
1371 if self.svfs.exists("journal"):
1369 self.ui.status(_("rolling back interrupted transaction\n"))
1372 self.ui.status(_("rolling back interrupted transaction\n"))
1370 vfsmap = {'': self.svfs,
1373 vfsmap = {'': self.svfs,
1371 'plain': self.vfs,}
1374 'plain': self.vfs,}
1372 transaction.rollback(self.svfs, vfsmap, "journal",
1375 transaction.rollback(self.svfs, vfsmap, "journal",
1373 self.ui.warn,
1376 self.ui.warn,
1374 checkambigfiles=_cachedfiles)
1377 checkambigfiles=_cachedfiles)
1375 self.invalidate()
1378 self.invalidate()
1376 return True
1379 return True
1377 else:
1380 else:
1378 self.ui.warn(_("no interrupted transaction available\n"))
1381 self.ui.warn(_("no interrupted transaction available\n"))
1379 return False
1382 return False
1380
1383
1381 def rollback(self, dryrun=False, force=False):
1384 def rollback(self, dryrun=False, force=False):
1382 wlock = lock = dsguard = None
1385 wlock = lock = dsguard = None
1383 try:
1386 try:
1384 wlock = self.wlock()
1387 wlock = self.wlock()
1385 lock = self.lock()
1388 lock = self.lock()
1386 if self.svfs.exists("undo"):
1389 if self.svfs.exists("undo"):
1387 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1388
1391
1389 return self._rollback(dryrun, force, dsguard)
1392 return self._rollback(dryrun, force, dsguard)
1390 else:
1393 else:
1391 self.ui.warn(_("no rollback information available\n"))
1394 self.ui.warn(_("no rollback information available\n"))
1392 return 1
1395 return 1
1393 finally:
1396 finally:
1394 release(dsguard, lock, wlock)
1397 release(dsguard, lock, wlock)
1395
1398
1396 @unfilteredmethod # Until we get smarter cache management
1399 @unfilteredmethod # Until we get smarter cache management
1397 def _rollback(self, dryrun, force, dsguard):
1400 def _rollback(self, dryrun, force, dsguard):
1398 ui = self.ui
1401 ui = self.ui
1399 try:
1402 try:
1400 args = self.vfs.read('undo.desc').splitlines()
1403 args = self.vfs.read('undo.desc').splitlines()
1401 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1402 if len(args) >= 3:
1405 if len(args) >= 3:
1403 detail = args[2]
1406 detail = args[2]
1404 oldtip = oldlen - 1
1407 oldtip = oldlen - 1
1405
1408
1406 if detail and ui.verbose:
1409 if detail and ui.verbose:
1407 msg = (_('repository tip rolled back to revision %d'
1410 msg = (_('repository tip rolled back to revision %d'
1408 ' (undo %s: %s)\n')
1411 ' (undo %s: %s)\n')
1409 % (oldtip, desc, detail))
1412 % (oldtip, desc, detail))
1410 else:
1413 else:
1411 msg = (_('repository tip rolled back to revision %d'
1414 msg = (_('repository tip rolled back to revision %d'
1412 ' (undo %s)\n')
1415 ' (undo %s)\n')
1413 % (oldtip, desc))
1416 % (oldtip, desc))
1414 except IOError:
1417 except IOError:
1415 msg = _('rolling back unknown transaction\n')
1418 msg = _('rolling back unknown transaction\n')
1416 desc = None
1419 desc = None
1417
1420
1418 if not force and self['.'] != self['tip'] and desc == 'commit':
1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1419 raise error.Abort(
1422 raise error.Abort(
1420 _('rollback of last commit while not checked out '
1423 _('rollback of last commit while not checked out '
1421 'may lose data'), hint=_('use -f to force'))
1424 'may lose data'), hint=_('use -f to force'))
1422
1425
1423 ui.status(msg)
1426 ui.status(msg)
1424 if dryrun:
1427 if dryrun:
1425 return 0
1428 return 0
1426
1429
1427 parents = self.dirstate.parents()
1430 parents = self.dirstate.parents()
1428 self.destroying()
1431 self.destroying()
1429 vfsmap = {'plain': self.vfs, '': self.svfs}
1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1430 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1431 checkambigfiles=_cachedfiles)
1434 checkambigfiles=_cachedfiles)
1432 if self.vfs.exists('undo.bookmarks'):
1435 if self.vfs.exists('undo.bookmarks'):
1433 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1434 if self.svfs.exists('undo.phaseroots'):
1437 if self.svfs.exists('undo.phaseroots'):
1435 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1436 self.invalidate()
1439 self.invalidate()
1437
1440
1438 parentgone = (parents[0] not in self.changelog.nodemap or
1441 parentgone = (parents[0] not in self.changelog.nodemap or
1439 parents[1] not in self.changelog.nodemap)
1442 parents[1] not in self.changelog.nodemap)
1440 if parentgone:
1443 if parentgone:
1441 # prevent dirstateguard from overwriting already restored one
1444 # prevent dirstateguard from overwriting already restored one
1442 dsguard.close()
1445 dsguard.close()
1443
1446
1444 self.dirstate.restorebackup(None, 'undo.dirstate')
1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1445 try:
1448 try:
1446 branch = self.vfs.read('undo.branch')
1449 branch = self.vfs.read('undo.branch')
1447 self.dirstate.setbranch(encoding.tolocal(branch))
1450 self.dirstate.setbranch(encoding.tolocal(branch))
1448 except IOError:
1451 except IOError:
1449 ui.warn(_('named branch could not be reset: '
1452 ui.warn(_('named branch could not be reset: '
1450 'current branch is still \'%s\'\n')
1453 'current branch is still \'%s\'\n')
1451 % self.dirstate.branch())
1454 % self.dirstate.branch())
1452
1455
1453 parents = tuple([p.rev() for p in self[None].parents()])
1456 parents = tuple([p.rev() for p in self[None].parents()])
1454 if len(parents) > 1:
1457 if len(parents) > 1:
1455 ui.status(_('working directory now based on '
1458 ui.status(_('working directory now based on '
1456 'revisions %d and %d\n') % parents)
1459 'revisions %d and %d\n') % parents)
1457 else:
1460 else:
1458 ui.status(_('working directory now based on '
1461 ui.status(_('working directory now based on '
1459 'revision %d\n') % parents)
1462 'revision %d\n') % parents)
1460 mergemod.mergestate.clean(self, self['.'].node())
1463 mergemod.mergestate.clean(self, self['.'].node())
1461
1464
1462 # TODO: if we know which new heads may result from this rollback, pass
1465 # TODO: if we know which new heads may result from this rollback, pass
1463 # them to destroy(), which will prevent the branchhead cache from being
1466 # them to destroy(), which will prevent the branchhead cache from being
1464 # invalidated.
1467 # invalidated.
1465 self.destroyed()
1468 self.destroyed()
1466 return 0
1469 return 0
1467
1470
1468 def _buildcacheupdater(self, newtransaction):
1471 def _buildcacheupdater(self, newtransaction):
1469 """called during transaction to build the callback updating cache
1472 """called during transaction to build the callback updating cache
1470
1473
1471 Lives on the repository to help extension who might want to augment
1474 Lives on the repository to help extension who might want to augment
1472 this logic. For this purpose, the created transaction is passed to the
1475 this logic. For this purpose, the created transaction is passed to the
1473 method.
1476 method.
1474 """
1477 """
1475 # we must avoid cyclic reference between repo and transaction.
1478 # we must avoid cyclic reference between repo and transaction.
1476 reporef = weakref.ref(self)
1479 reporef = weakref.ref(self)
1477 def updater(tr):
1480 def updater(tr):
1478 repo = reporef()
1481 repo = reporef()
1479 repo.updatecaches(tr)
1482 repo.updatecaches(tr)
1480 return updater
1483 return updater
1481
1484
1482 @unfilteredmethod
1485 @unfilteredmethod
1483 def updatecaches(self, tr=None):
1486 def updatecaches(self, tr=None):
1484 """warm appropriate caches
1487 """warm appropriate caches
1485
1488
1486 If this function is called after a transaction closed. The transaction
1489 If this function is called after a transaction closed. The transaction
1487 will be available in the 'tr' argument. This can be used to selectively
1490 will be available in the 'tr' argument. This can be used to selectively
1488 update caches relevant to the changes in that transaction.
1491 update caches relevant to the changes in that transaction.
1489 """
1492 """
1490 if tr is not None and tr.hookargs.get('source') == 'strip':
1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1491 # During strip, many caches are invalid but
1494 # During strip, many caches are invalid but
1492 # later call to `destroyed` will refresh them.
1495 # later call to `destroyed` will refresh them.
1493 return
1496 return
1494
1497
1495 if tr is None or tr.changes['revs']:
1498 if tr is None or tr.changes['revs']:
1496 # updating the unfiltered branchmap should refresh all the others,
1499 # updating the unfiltered branchmap should refresh all the others,
1497 self.ui.debug('updating the branch cache\n')
1500 self.ui.debug('updating the branch cache\n')
1498 branchmap.updatecache(self.filtered('served'))
1501 branchmap.updatecache(self.filtered('served'))
1499
1502
1500 def invalidatecaches(self):
1503 def invalidatecaches(self):
1501
1504
1502 if '_tagscache' in vars(self):
1505 if '_tagscache' in vars(self):
1503 # can't use delattr on proxy
1506 # can't use delattr on proxy
1504 del self.__dict__['_tagscache']
1507 del self.__dict__['_tagscache']
1505
1508
1506 self.unfiltered()._branchcaches.clear()
1509 self.unfiltered()._branchcaches.clear()
1507 self.invalidatevolatilesets()
1510 self.invalidatevolatilesets()
1508 self._sparsesignaturecache.clear()
1511 self._sparsesignaturecache.clear()
1509
1512
1510 def invalidatevolatilesets(self):
1513 def invalidatevolatilesets(self):
1511 self.filteredrevcache.clear()
1514 self.filteredrevcache.clear()
1512 obsolete.clearobscaches(self)
1515 obsolete.clearobscaches(self)
1513
1516
1514 def invalidatedirstate(self):
1517 def invalidatedirstate(self):
1515 '''Invalidates the dirstate, causing the next call to dirstate
1518 '''Invalidates the dirstate, causing the next call to dirstate
1516 to check if it was modified since the last time it was read,
1519 to check if it was modified since the last time it was read,
1517 rereading it if it has.
1520 rereading it if it has.
1518
1521
1519 This is different to dirstate.invalidate() that it doesn't always
1522 This is different to dirstate.invalidate() that it doesn't always
1520 rereads the dirstate. Use dirstate.invalidate() if you want to
1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1521 explicitly read the dirstate again (i.e. restoring it to a previous
1524 explicitly read the dirstate again (i.e. restoring it to a previous
1522 known good state).'''
1525 known good state).'''
1523 if hasunfilteredcache(self, 'dirstate'):
1526 if hasunfilteredcache(self, 'dirstate'):
1524 for k in self.dirstate._filecache:
1527 for k in self.dirstate._filecache:
1525 try:
1528 try:
1526 delattr(self.dirstate, k)
1529 delattr(self.dirstate, k)
1527 except AttributeError:
1530 except AttributeError:
1528 pass
1531 pass
1529 delattr(self.unfiltered(), 'dirstate')
1532 delattr(self.unfiltered(), 'dirstate')
1530
1533
1531 def invalidate(self, clearfilecache=False):
1534 def invalidate(self, clearfilecache=False):
1532 '''Invalidates both store and non-store parts other than dirstate
1535 '''Invalidates both store and non-store parts other than dirstate
1533
1536
1534 If a transaction is running, invalidation of store is omitted,
1537 If a transaction is running, invalidation of store is omitted,
1535 because discarding in-memory changes might cause inconsistency
1538 because discarding in-memory changes might cause inconsistency
1536 (e.g. incomplete fncache causes unintentional failure, but
1539 (e.g. incomplete fncache causes unintentional failure, but
1537 redundant one doesn't).
1540 redundant one doesn't).
1538 '''
1541 '''
1539 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1540 for k in list(self._filecache.keys()):
1543 for k in list(self._filecache.keys()):
1541 # dirstate is invalidated separately in invalidatedirstate()
1544 # dirstate is invalidated separately in invalidatedirstate()
1542 if k == 'dirstate':
1545 if k == 'dirstate':
1543 continue
1546 continue
1544 if (k == 'changelog' and
1547 if (k == 'changelog' and
1545 self.currenttransaction() and
1548 self.currenttransaction() and
1546 self.changelog._delayed):
1549 self.changelog._delayed):
1547 # The changelog object may store unwritten revisions. We don't
1550 # The changelog object may store unwritten revisions. We don't
1548 # want to lose them.
1551 # want to lose them.
1549 # TODO: Solve the problem instead of working around it.
1552 # TODO: Solve the problem instead of working around it.
1550 continue
1553 continue
1551
1554
1552 if clearfilecache:
1555 if clearfilecache:
1553 del self._filecache[k]
1556 del self._filecache[k]
1554 try:
1557 try:
1555 delattr(unfiltered, k)
1558 delattr(unfiltered, k)
1556 except AttributeError:
1559 except AttributeError:
1557 pass
1560 pass
1558 self.invalidatecaches()
1561 self.invalidatecaches()
1559 if not self.currenttransaction():
1562 if not self.currenttransaction():
1560 # TODO: Changing contents of store outside transaction
1563 # TODO: Changing contents of store outside transaction
1561 # causes inconsistency. We should make in-memory store
1564 # causes inconsistency. We should make in-memory store
1562 # changes detectable, and abort if changed.
1565 # changes detectable, and abort if changed.
1563 self.store.invalidatecaches()
1566 self.store.invalidatecaches()
1564
1567
1565 def invalidateall(self):
1568 def invalidateall(self):
1566 '''Fully invalidates both store and non-store parts, causing the
1569 '''Fully invalidates both store and non-store parts, causing the
1567 subsequent operation to reread any outside changes.'''
1570 subsequent operation to reread any outside changes.'''
1568 # extension should hook this to invalidate its caches
1571 # extension should hook this to invalidate its caches
1569 self.invalidate()
1572 self.invalidate()
1570 self.invalidatedirstate()
1573 self.invalidatedirstate()
1571
1574
1572 @unfilteredmethod
1575 @unfilteredmethod
1573 def _refreshfilecachestats(self, tr):
1576 def _refreshfilecachestats(self, tr):
1574 """Reload stats of cached files so that they are flagged as valid"""
1577 """Reload stats of cached files so that they are flagged as valid"""
1575 for k, ce in self._filecache.items():
1578 for k, ce in self._filecache.items():
1576 k = pycompat.sysstr(k)
1579 k = pycompat.sysstr(k)
1577 if k == r'dirstate' or k not in self.__dict__:
1580 if k == r'dirstate' or k not in self.__dict__:
1578 continue
1581 continue
1579 ce.refresh()
1582 ce.refresh()
1580
1583
1581 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1582 inheritchecker=None, parentenvvar=None):
1585 inheritchecker=None, parentenvvar=None):
1583 parentlock = None
1586 parentlock = None
1584 # the contents of parentenvvar are used by the underlying lock to
1587 # the contents of parentenvvar are used by the underlying lock to
1585 # determine whether it can be inherited
1588 # determine whether it can be inherited
1586 if parentenvvar is not None:
1589 if parentenvvar is not None:
1587 parentlock = encoding.environ.get(parentenvvar)
1590 parentlock = encoding.environ.get(parentenvvar)
1588
1591
1589 timeout = 0
1592 timeout = 0
1590 warntimeout = 0
1593 warntimeout = 0
1591 if wait:
1594 if wait:
1592 timeout = self.ui.configint("ui", "timeout")
1595 timeout = self.ui.configint("ui", "timeout")
1593 warntimeout = self.ui.configint("ui", "timeout.warn")
1596 warntimeout = self.ui.configint("ui", "timeout.warn")
1594
1597
1595 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1598 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1596 releasefn=releasefn,
1599 releasefn=releasefn,
1597 acquirefn=acquirefn, desc=desc,
1600 acquirefn=acquirefn, desc=desc,
1598 inheritchecker=inheritchecker,
1601 inheritchecker=inheritchecker,
1599 parentlock=parentlock)
1602 parentlock=parentlock)
1600 return l
1603 return l
1601
1604
1602 def _afterlock(self, callback):
1605 def _afterlock(self, callback):
1603 """add a callback to be run when the repository is fully unlocked
1606 """add a callback to be run when the repository is fully unlocked
1604
1607
1605 The callback will be executed when the outermost lock is released
1608 The callback will be executed when the outermost lock is released
1606 (with wlock being higher level than 'lock')."""
1609 (with wlock being higher level than 'lock')."""
1607 for ref in (self._wlockref, self._lockref):
1610 for ref in (self._wlockref, self._lockref):
1608 l = ref and ref()
1611 l = ref and ref()
1609 if l and l.held:
1612 if l and l.held:
1610 l.postrelease.append(callback)
1613 l.postrelease.append(callback)
1611 break
1614 break
1612 else: # no lock have been found.
1615 else: # no lock have been found.
1613 callback()
1616 callback()
1614
1617
1615 def lock(self, wait=True):
1618 def lock(self, wait=True):
1616 '''Lock the repository store (.hg/store) and return a weak reference
1619 '''Lock the repository store (.hg/store) and return a weak reference
1617 to the lock. Use this before modifying the store (e.g. committing or
1620 to the lock. Use this before modifying the store (e.g. committing or
1618 stripping). If you are opening a transaction, get a lock as well.)
1621 stripping). If you are opening a transaction, get a lock as well.)
1619
1622
1620 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1623 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1621 'wlock' first to avoid a dead-lock hazard.'''
1624 'wlock' first to avoid a dead-lock hazard.'''
1622 l = self._currentlock(self._lockref)
1625 l = self._currentlock(self._lockref)
1623 if l is not None:
1626 if l is not None:
1624 l.lock()
1627 l.lock()
1625 return l
1628 return l
1626
1629
1627 l = self._lock(self.svfs, "lock", wait, None,
1630 l = self._lock(self.svfs, "lock", wait, None,
1628 self.invalidate, _('repository %s') % self.origroot)
1631 self.invalidate, _('repository %s') % self.origroot)
1629 self._lockref = weakref.ref(l)
1632 self._lockref = weakref.ref(l)
1630 return l
1633 return l
1631
1634
1632 def _wlockchecktransaction(self):
1635 def _wlockchecktransaction(self):
1633 if self.currenttransaction() is not None:
1636 if self.currenttransaction() is not None:
1634 raise error.LockInheritanceContractViolation(
1637 raise error.LockInheritanceContractViolation(
1635 'wlock cannot be inherited in the middle of a transaction')
1638 'wlock cannot be inherited in the middle of a transaction')
1636
1639
1637 def wlock(self, wait=True):
1640 def wlock(self, wait=True):
1638 '''Lock the non-store parts of the repository (everything under
1641 '''Lock the non-store parts of the repository (everything under
1639 .hg except .hg/store) and return a weak reference to the lock.
1642 .hg except .hg/store) and return a weak reference to the lock.
1640
1643
1641 Use this before modifying files in .hg.
1644 Use this before modifying files in .hg.
1642
1645
1643 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1646 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1644 'wlock' first to avoid a dead-lock hazard.'''
1647 'wlock' first to avoid a dead-lock hazard.'''
1645 l = self._wlockref and self._wlockref()
1648 l = self._wlockref and self._wlockref()
1646 if l is not None and l.held:
1649 if l is not None and l.held:
1647 l.lock()
1650 l.lock()
1648 return l
1651 return l
1649
1652
1650 # We do not need to check for non-waiting lock acquisition. Such
1653 # We do not need to check for non-waiting lock acquisition. Such
1651 # acquisition would not cause dead-lock as they would just fail.
1654 # acquisition would not cause dead-lock as they would just fail.
1652 if wait and (self.ui.configbool('devel', 'all-warnings')
1655 if wait and (self.ui.configbool('devel', 'all-warnings')
1653 or self.ui.configbool('devel', 'check-locks')):
1656 or self.ui.configbool('devel', 'check-locks')):
1654 if self._currentlock(self._lockref) is not None:
1657 if self._currentlock(self._lockref) is not None:
1655 self.ui.develwarn('"wlock" acquired after "lock"')
1658 self.ui.develwarn('"wlock" acquired after "lock"')
1656
1659
1657 def unlock():
1660 def unlock():
1658 if self.dirstate.pendingparentchange():
1661 if self.dirstate.pendingparentchange():
1659 self.dirstate.invalidate()
1662 self.dirstate.invalidate()
1660 else:
1663 else:
1661 self.dirstate.write(None)
1664 self.dirstate.write(None)
1662
1665
1663 self._filecache['dirstate'].refresh()
1666 self._filecache['dirstate'].refresh()
1664
1667
1665 l = self._lock(self.vfs, "wlock", wait, unlock,
1668 l = self._lock(self.vfs, "wlock", wait, unlock,
1666 self.invalidatedirstate, _('working directory of %s') %
1669 self.invalidatedirstate, _('working directory of %s') %
1667 self.origroot,
1670 self.origroot,
1668 inheritchecker=self._wlockchecktransaction,
1671 inheritchecker=self._wlockchecktransaction,
1669 parentenvvar='HG_WLOCK_LOCKER')
1672 parentenvvar='HG_WLOCK_LOCKER')
1670 self._wlockref = weakref.ref(l)
1673 self._wlockref = weakref.ref(l)
1671 return l
1674 return l
1672
1675
1673 def _currentlock(self, lockref):
1676 def _currentlock(self, lockref):
1674 """Returns the lock if it's held, or None if it's not."""
1677 """Returns the lock if it's held, or None if it's not."""
1675 if lockref is None:
1678 if lockref is None:
1676 return None
1679 return None
1677 l = lockref()
1680 l = lockref()
1678 if l is None or not l.held:
1681 if l is None or not l.held:
1679 return None
1682 return None
1680 return l
1683 return l
1681
1684
1682 def currentwlock(self):
1685 def currentwlock(self):
1683 """Returns the wlock if it's held, or None if it's not."""
1686 """Returns the wlock if it's held, or None if it's not."""
1684 return self._currentlock(self._wlockref)
1687 return self._currentlock(self._wlockref)
1685
1688
1686 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1689 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1687 """
1690 """
1688 commit an individual file as part of a larger transaction
1691 commit an individual file as part of a larger transaction
1689 """
1692 """
1690
1693
1691 fname = fctx.path()
1694 fname = fctx.path()
1692 fparent1 = manifest1.get(fname, nullid)
1695 fparent1 = manifest1.get(fname, nullid)
1693 fparent2 = manifest2.get(fname, nullid)
1696 fparent2 = manifest2.get(fname, nullid)
1694 if isinstance(fctx, context.filectx):
1697 if isinstance(fctx, context.filectx):
1695 node = fctx.filenode()
1698 node = fctx.filenode()
1696 if node in [fparent1, fparent2]:
1699 if node in [fparent1, fparent2]:
1697 self.ui.debug('reusing %s filelog entry\n' % fname)
1700 self.ui.debug('reusing %s filelog entry\n' % fname)
1698 if manifest1.flags(fname) != fctx.flags():
1701 if manifest1.flags(fname) != fctx.flags():
1699 changelist.append(fname)
1702 changelist.append(fname)
1700 return node
1703 return node
1701
1704
1702 flog = self.file(fname)
1705 flog = self.file(fname)
1703 meta = {}
1706 meta = {}
1704 copy = fctx.renamed()
1707 copy = fctx.renamed()
1705 if copy and copy[0] != fname:
1708 if copy and copy[0] != fname:
1706 # Mark the new revision of this file as a copy of another
1709 # Mark the new revision of this file as a copy of another
1707 # file. This copy data will effectively act as a parent
1710 # file. This copy data will effectively act as a parent
1708 # of this new revision. If this is a merge, the first
1711 # of this new revision. If this is a merge, the first
1709 # parent will be the nullid (meaning "look up the copy data")
1712 # parent will be the nullid (meaning "look up the copy data")
1710 # and the second one will be the other parent. For example:
1713 # and the second one will be the other parent. For example:
1711 #
1714 #
1712 # 0 --- 1 --- 3 rev1 changes file foo
1715 # 0 --- 1 --- 3 rev1 changes file foo
1713 # \ / rev2 renames foo to bar and changes it
1716 # \ / rev2 renames foo to bar and changes it
1714 # \- 2 -/ rev3 should have bar with all changes and
1717 # \- 2 -/ rev3 should have bar with all changes and
1715 # should record that bar descends from
1718 # should record that bar descends from
1716 # bar in rev2 and foo in rev1
1719 # bar in rev2 and foo in rev1
1717 #
1720 #
1718 # this allows this merge to succeed:
1721 # this allows this merge to succeed:
1719 #
1722 #
1720 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1721 # \ / merging rev3 and rev4 should use bar@rev2
1724 # \ / merging rev3 and rev4 should use bar@rev2
1722 # \- 2 --- 4 as the merge base
1725 # \- 2 --- 4 as the merge base
1723 #
1726 #
1724
1727
1725 cfname = copy[0]
1728 cfname = copy[0]
1726 crev = manifest1.get(cfname)
1729 crev = manifest1.get(cfname)
1727 newfparent = fparent2
1730 newfparent = fparent2
1728
1731
1729 if manifest2: # branch merge
1732 if manifest2: # branch merge
1730 if fparent2 == nullid or crev is None: # copied on remote side
1733 if fparent2 == nullid or crev is None: # copied on remote side
1731 if cfname in manifest2:
1734 if cfname in manifest2:
1732 crev = manifest2[cfname]
1735 crev = manifest2[cfname]
1733 newfparent = fparent1
1736 newfparent = fparent1
1734
1737
1735 # Here, we used to search backwards through history to try to find
1738 # Here, we used to search backwards through history to try to find
1736 # where the file copy came from if the source of a copy was not in
1739 # where the file copy came from if the source of a copy was not in
1737 # the parent directory. However, this doesn't actually make sense to
1740 # the parent directory. However, this doesn't actually make sense to
1738 # do (what does a copy from something not in your working copy even
1741 # do (what does a copy from something not in your working copy even
1739 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1742 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1740 # the user that copy information was dropped, so if they didn't
1743 # the user that copy information was dropped, so if they didn't
1741 # expect this outcome it can be fixed, but this is the correct
1744 # expect this outcome it can be fixed, but this is the correct
1742 # behavior in this circumstance.
1745 # behavior in this circumstance.
1743
1746
1744 if crev:
1747 if crev:
1745 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1748 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1746 meta["copy"] = cfname
1749 meta["copy"] = cfname
1747 meta["copyrev"] = hex(crev)
1750 meta["copyrev"] = hex(crev)
1748 fparent1, fparent2 = nullid, newfparent
1751 fparent1, fparent2 = nullid, newfparent
1749 else:
1752 else:
1750 self.ui.warn(_("warning: can't find ancestor for '%s' "
1753 self.ui.warn(_("warning: can't find ancestor for '%s' "
1751 "copied from '%s'!\n") % (fname, cfname))
1754 "copied from '%s'!\n") % (fname, cfname))
1752
1755
1753 elif fparent1 == nullid:
1756 elif fparent1 == nullid:
1754 fparent1, fparent2 = fparent2, nullid
1757 fparent1, fparent2 = fparent2, nullid
1755 elif fparent2 != nullid:
1758 elif fparent2 != nullid:
1756 # is one parent an ancestor of the other?
1759 # is one parent an ancestor of the other?
1757 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1760 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1758 if fparent1 in fparentancestors:
1761 if fparent1 in fparentancestors:
1759 fparent1, fparent2 = fparent2, nullid
1762 fparent1, fparent2 = fparent2, nullid
1760 elif fparent2 in fparentancestors:
1763 elif fparent2 in fparentancestors:
1761 fparent2 = nullid
1764 fparent2 = nullid
1762
1765
1763 # is the file changed?
1766 # is the file changed?
1764 text = fctx.data()
1767 text = fctx.data()
1765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1768 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1766 changelist.append(fname)
1769 changelist.append(fname)
1767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1770 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1768 # are just the flags changed during merge?
1771 # are just the flags changed during merge?
1769 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1772 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1770 changelist.append(fname)
1773 changelist.append(fname)
1771
1774
1772 return fparent1
1775 return fparent1
1773
1776
1774 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1777 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1775 """check for commit arguments that aren't committable"""
1778 """check for commit arguments that aren't committable"""
1776 if match.isexact() or match.prefix():
1779 if match.isexact() or match.prefix():
1777 matched = set(status.modified + status.added + status.removed)
1780 matched = set(status.modified + status.added + status.removed)
1778
1781
1779 for f in match.files():
1782 for f in match.files():
1780 f = self.dirstate.normalize(f)
1783 f = self.dirstate.normalize(f)
1781 if f == '.' or f in matched or f in wctx.substate:
1784 if f == '.' or f in matched or f in wctx.substate:
1782 continue
1785 continue
1783 if f in status.deleted:
1786 if f in status.deleted:
1784 fail(f, _('file not found!'))
1787 fail(f, _('file not found!'))
1785 if f in vdirs: # visited directory
1788 if f in vdirs: # visited directory
1786 d = f + '/'
1789 d = f + '/'
1787 for mf in matched:
1790 for mf in matched:
1788 if mf.startswith(d):
1791 if mf.startswith(d):
1789 break
1792 break
1790 else:
1793 else:
1791 fail(f, _("no match under directory!"))
1794 fail(f, _("no match under directory!"))
1792 elif f not in self.dirstate:
1795 elif f not in self.dirstate:
1793 fail(f, _("file not tracked!"))
1796 fail(f, _("file not tracked!"))
1794
1797
1795 @unfilteredmethod
1798 @unfilteredmethod
1796 def commit(self, text="", user=None, date=None, match=None, force=False,
1799 def commit(self, text="", user=None, date=None, match=None, force=False,
1797 editor=False, extra=None):
1800 editor=False, extra=None):
1798 """Add a new revision to current repository.
1801 """Add a new revision to current repository.
1799
1802
1800 Revision information is gathered from the working directory,
1803 Revision information is gathered from the working directory,
1801 match can be used to filter the committed files. If editor is
1804 match can be used to filter the committed files. If editor is
1802 supplied, it is called to get a commit message.
1805 supplied, it is called to get a commit message.
1803 """
1806 """
1804 if extra is None:
1807 if extra is None:
1805 extra = {}
1808 extra = {}
1806
1809
1807 def fail(f, msg):
1810 def fail(f, msg):
1808 raise error.Abort('%s: %s' % (f, msg))
1811 raise error.Abort('%s: %s' % (f, msg))
1809
1812
1810 if not match:
1813 if not match:
1811 match = matchmod.always(self.root, '')
1814 match = matchmod.always(self.root, '')
1812
1815
1813 if not force:
1816 if not force:
1814 vdirs = []
1817 vdirs = []
1815 match.explicitdir = vdirs.append
1818 match.explicitdir = vdirs.append
1816 match.bad = fail
1819 match.bad = fail
1817
1820
1818 wlock = lock = tr = None
1821 wlock = lock = tr = None
1819 try:
1822 try:
1820 wlock = self.wlock()
1823 wlock = self.wlock()
1821 lock = self.lock() # for recent changelog (see issue4368)
1824 lock = self.lock() # for recent changelog (see issue4368)
1822
1825
1823 wctx = self[None]
1826 wctx = self[None]
1824 merge = len(wctx.parents()) > 1
1827 merge = len(wctx.parents()) > 1
1825
1828
1826 if not force and merge and not match.always():
1829 if not force and merge and not match.always():
1827 raise error.Abort(_('cannot partially commit a merge '
1830 raise error.Abort(_('cannot partially commit a merge '
1828 '(do not specify files or patterns)'))
1831 '(do not specify files or patterns)'))
1829
1832
1830 status = self.status(match=match, clean=force)
1833 status = self.status(match=match, clean=force)
1831 if force:
1834 if force:
1832 status.modified.extend(status.clean) # mq may commit clean files
1835 status.modified.extend(status.clean) # mq may commit clean files
1833
1836
1834 # check subrepos
1837 # check subrepos
1835 subs, commitsubs, newstate = subrepoutil.precommit(
1838 subs, commitsubs, newstate = subrepoutil.precommit(
1836 self.ui, wctx, status, match, force=force)
1839 self.ui, wctx, status, match, force=force)
1837
1840
1838 # make sure all explicit patterns are matched
1841 # make sure all explicit patterns are matched
1839 if not force:
1842 if not force:
1840 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1843 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1841
1844
1842 cctx = context.workingcommitctx(self, status,
1845 cctx = context.workingcommitctx(self, status,
1843 text, user, date, extra)
1846 text, user, date, extra)
1844
1847
1845 # internal config: ui.allowemptycommit
1848 # internal config: ui.allowemptycommit
1846 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1849 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1847 or extra.get('close') or merge or cctx.files()
1850 or extra.get('close') or merge or cctx.files()
1848 or self.ui.configbool('ui', 'allowemptycommit'))
1851 or self.ui.configbool('ui', 'allowemptycommit'))
1849 if not allowemptycommit:
1852 if not allowemptycommit:
1850 return None
1853 return None
1851
1854
1852 if merge and cctx.deleted():
1855 if merge and cctx.deleted():
1853 raise error.Abort(_("cannot commit merge with missing files"))
1856 raise error.Abort(_("cannot commit merge with missing files"))
1854
1857
1855 ms = mergemod.mergestate.read(self)
1858 ms = mergemod.mergestate.read(self)
1856 mergeutil.checkunresolved(ms)
1859 mergeutil.checkunresolved(ms)
1857
1860
1858 if editor:
1861 if editor:
1859 cctx._text = editor(self, cctx, subs)
1862 cctx._text = editor(self, cctx, subs)
1860 edited = (text != cctx._text)
1863 edited = (text != cctx._text)
1861
1864
1862 # Save commit message in case this transaction gets rolled back
1865 # Save commit message in case this transaction gets rolled back
1863 # (e.g. by a pretxncommit hook). Leave the content alone on
1866 # (e.g. by a pretxncommit hook). Leave the content alone on
1864 # the assumption that the user will use the same editor again.
1867 # the assumption that the user will use the same editor again.
1865 msgfn = self.savecommitmessage(cctx._text)
1868 msgfn = self.savecommitmessage(cctx._text)
1866
1869
1867 # commit subs and write new state
1870 # commit subs and write new state
1868 if subs:
1871 if subs:
1869 for s in sorted(commitsubs):
1872 for s in sorted(commitsubs):
1870 sub = wctx.sub(s)
1873 sub = wctx.sub(s)
1871 self.ui.status(_('committing subrepository %s\n') %
1874 self.ui.status(_('committing subrepository %s\n') %
1872 subrepoutil.subrelpath(sub))
1875 subrepoutil.subrelpath(sub))
1873 sr = sub.commit(cctx._text, user, date)
1876 sr = sub.commit(cctx._text, user, date)
1874 newstate[s] = (newstate[s][0], sr)
1877 newstate[s] = (newstate[s][0], sr)
1875 subrepoutil.writestate(self, newstate)
1878 subrepoutil.writestate(self, newstate)
1876
1879
1877 p1, p2 = self.dirstate.parents()
1880 p1, p2 = self.dirstate.parents()
1878 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1881 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1879 try:
1882 try:
1880 self.hook("precommit", throw=True, parent1=hookp1,
1883 self.hook("precommit", throw=True, parent1=hookp1,
1881 parent2=hookp2)
1884 parent2=hookp2)
1882 tr = self.transaction('commit')
1885 tr = self.transaction('commit')
1883 ret = self.commitctx(cctx, True)
1886 ret = self.commitctx(cctx, True)
1884 except: # re-raises
1887 except: # re-raises
1885 if edited:
1888 if edited:
1886 self.ui.write(
1889 self.ui.write(
1887 _('note: commit message saved in %s\n') % msgfn)
1890 _('note: commit message saved in %s\n') % msgfn)
1888 raise
1891 raise
1889 # update bookmarks, dirstate and mergestate
1892 # update bookmarks, dirstate and mergestate
1890 bookmarks.update(self, [p1, p2], ret)
1893 bookmarks.update(self, [p1, p2], ret)
1891 cctx.markcommitted(ret)
1894 cctx.markcommitted(ret)
1892 ms.reset()
1895 ms.reset()
1893 tr.close()
1896 tr.close()
1894
1897
1895 finally:
1898 finally:
1896 lockmod.release(tr, lock, wlock)
1899 lockmod.release(tr, lock, wlock)
1897
1900
1898 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1901 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1899 # hack for command that use a temporary commit (eg: histedit)
1902 # hack for command that use a temporary commit (eg: histedit)
1900 # temporary commit got stripped before hook release
1903 # temporary commit got stripped before hook release
1901 if self.changelog.hasnode(ret):
1904 if self.changelog.hasnode(ret):
1902 self.hook("commit", node=node, parent1=parent1,
1905 self.hook("commit", node=node, parent1=parent1,
1903 parent2=parent2)
1906 parent2=parent2)
1904 self._afterlock(commithook)
1907 self._afterlock(commithook)
1905 return ret
1908 return ret
1906
1909
1907 @unfilteredmethod
1910 @unfilteredmethod
1908 def commitctx(self, ctx, error=False):
1911 def commitctx(self, ctx, error=False):
1909 """Add a new revision to current repository.
1912 """Add a new revision to current repository.
1910 Revision information is passed via the context argument.
1913 Revision information is passed via the context argument.
1911 """
1914 """
1912
1915
1913 tr = None
1916 tr = None
1914 p1, p2 = ctx.p1(), ctx.p2()
1917 p1, p2 = ctx.p1(), ctx.p2()
1915 user = ctx.user()
1918 user = ctx.user()
1916
1919
1917 lock = self.lock()
1920 lock = self.lock()
1918 try:
1921 try:
1919 tr = self.transaction("commit")
1922 tr = self.transaction("commit")
1920 trp = weakref.proxy(tr)
1923 trp = weakref.proxy(tr)
1921
1924
1922 if ctx.manifestnode():
1925 if ctx.manifestnode():
1923 # reuse an existing manifest revision
1926 # reuse an existing manifest revision
1924 mn = ctx.manifestnode()
1927 mn = ctx.manifestnode()
1925 files = ctx.files()
1928 files = ctx.files()
1926 elif ctx.files():
1929 elif ctx.files():
1927 m1ctx = p1.manifestctx()
1930 m1ctx = p1.manifestctx()
1928 m2ctx = p2.manifestctx()
1931 m2ctx = p2.manifestctx()
1929 mctx = m1ctx.copy()
1932 mctx = m1ctx.copy()
1930
1933
1931 m = mctx.read()
1934 m = mctx.read()
1932 m1 = m1ctx.read()
1935 m1 = m1ctx.read()
1933 m2 = m2ctx.read()
1936 m2 = m2ctx.read()
1934
1937
1935 # check in files
1938 # check in files
1936 added = []
1939 added = []
1937 changed = []
1940 changed = []
1938 removed = list(ctx.removed())
1941 removed = list(ctx.removed())
1939 linkrev = len(self)
1942 linkrev = len(self)
1940 self.ui.note(_("committing files:\n"))
1943 self.ui.note(_("committing files:\n"))
1941 for f in sorted(ctx.modified() + ctx.added()):
1944 for f in sorted(ctx.modified() + ctx.added()):
1942 self.ui.note(f + "\n")
1945 self.ui.note(f + "\n")
1943 try:
1946 try:
1944 fctx = ctx[f]
1947 fctx = ctx[f]
1945 if fctx is None:
1948 if fctx is None:
1946 removed.append(f)
1949 removed.append(f)
1947 else:
1950 else:
1948 added.append(f)
1951 added.append(f)
1949 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1952 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1950 trp, changed)
1953 trp, changed)
1951 m.setflag(f, fctx.flags())
1954 m.setflag(f, fctx.flags())
1952 except OSError as inst:
1955 except OSError as inst:
1953 self.ui.warn(_("trouble committing %s!\n") % f)
1956 self.ui.warn(_("trouble committing %s!\n") % f)
1954 raise
1957 raise
1955 except IOError as inst:
1958 except IOError as inst:
1956 errcode = getattr(inst, 'errno', errno.ENOENT)
1959 errcode = getattr(inst, 'errno', errno.ENOENT)
1957 if error or errcode and errcode != errno.ENOENT:
1960 if error or errcode and errcode != errno.ENOENT:
1958 self.ui.warn(_("trouble committing %s!\n") % f)
1961 self.ui.warn(_("trouble committing %s!\n") % f)
1959 raise
1962 raise
1960
1963
1961 # update manifest
1964 # update manifest
1962 self.ui.note(_("committing manifest\n"))
1965 self.ui.note(_("committing manifest\n"))
1963 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1966 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1964 drop = [f for f in removed if f in m]
1967 drop = [f for f in removed if f in m]
1965 for f in drop:
1968 for f in drop:
1966 del m[f]
1969 del m[f]
1967 mn = mctx.write(trp, linkrev,
1970 mn = mctx.write(trp, linkrev,
1968 p1.manifestnode(), p2.manifestnode(),
1971 p1.manifestnode(), p2.manifestnode(),
1969 added, drop)
1972 added, drop)
1970 files = changed + removed
1973 files = changed + removed
1971 else:
1974 else:
1972 mn = p1.manifestnode()
1975 mn = p1.manifestnode()
1973 files = []
1976 files = []
1974
1977
1975 # update changelog
1978 # update changelog
1976 self.ui.note(_("committing changelog\n"))
1979 self.ui.note(_("committing changelog\n"))
1977 self.changelog.delayupdate(tr)
1980 self.changelog.delayupdate(tr)
1978 n = self.changelog.add(mn, files, ctx.description(),
1981 n = self.changelog.add(mn, files, ctx.description(),
1979 trp, p1.node(), p2.node(),
1982 trp, p1.node(), p2.node(),
1980 user, ctx.date(), ctx.extra().copy())
1983 user, ctx.date(), ctx.extra().copy())
1981 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1984 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1982 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1985 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1983 parent2=xp2)
1986 parent2=xp2)
1984 # set the new commit is proper phase
1987 # set the new commit is proper phase
1985 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1988 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1986 if targetphase:
1989 if targetphase:
1987 # retract boundary do not alter parent changeset.
1990 # retract boundary do not alter parent changeset.
1988 # if a parent have higher the resulting phase will
1991 # if a parent have higher the resulting phase will
1989 # be compliant anyway
1992 # be compliant anyway
1990 #
1993 #
1991 # if minimal phase was 0 we don't need to retract anything
1994 # if minimal phase was 0 we don't need to retract anything
1992 phases.registernew(self, tr, targetphase, [n])
1995 phases.registernew(self, tr, targetphase, [n])
1993 tr.close()
1996 tr.close()
1994 return n
1997 return n
1995 finally:
1998 finally:
1996 if tr:
1999 if tr:
1997 tr.release()
2000 tr.release()
1998 lock.release()
2001 lock.release()
1999
2002
2000 @unfilteredmethod
2003 @unfilteredmethod
2001 def destroying(self):
2004 def destroying(self):
2002 '''Inform the repository that nodes are about to be destroyed.
2005 '''Inform the repository that nodes are about to be destroyed.
2003 Intended for use by strip and rollback, so there's a common
2006 Intended for use by strip and rollback, so there's a common
2004 place for anything that has to be done before destroying history.
2007 place for anything that has to be done before destroying history.
2005
2008
2006 This is mostly useful for saving state that is in memory and waiting
2009 This is mostly useful for saving state that is in memory and waiting
2007 to be flushed when the current lock is released. Because a call to
2010 to be flushed when the current lock is released. Because a call to
2008 destroyed is imminent, the repo will be invalidated causing those
2011 destroyed is imminent, the repo will be invalidated causing those
2009 changes to stay in memory (waiting for the next unlock), or vanish
2012 changes to stay in memory (waiting for the next unlock), or vanish
2010 completely.
2013 completely.
2011 '''
2014 '''
2012 # When using the same lock to commit and strip, the phasecache is left
2015 # When using the same lock to commit and strip, the phasecache is left
2013 # dirty after committing. Then when we strip, the repo is invalidated,
2016 # dirty after committing. Then when we strip, the repo is invalidated,
2014 # causing those changes to disappear.
2017 # causing those changes to disappear.
2015 if '_phasecache' in vars(self):
2018 if '_phasecache' in vars(self):
2016 self._phasecache.write()
2019 self._phasecache.write()
2017
2020
2018 @unfilteredmethod
2021 @unfilteredmethod
2019 def destroyed(self):
2022 def destroyed(self):
2020 '''Inform the repository that nodes have been destroyed.
2023 '''Inform the repository that nodes have been destroyed.
2021 Intended for use by strip and rollback, so there's a common
2024 Intended for use by strip and rollback, so there's a common
2022 place for anything that has to be done after destroying history.
2025 place for anything that has to be done after destroying history.
2023 '''
2026 '''
2024 # When one tries to:
2027 # When one tries to:
2025 # 1) destroy nodes thus calling this method (e.g. strip)
2028 # 1) destroy nodes thus calling this method (e.g. strip)
2026 # 2) use phasecache somewhere (e.g. commit)
2029 # 2) use phasecache somewhere (e.g. commit)
2027 #
2030 #
2028 # then 2) will fail because the phasecache contains nodes that were
2031 # then 2) will fail because the phasecache contains nodes that were
2029 # removed. We can either remove phasecache from the filecache,
2032 # removed. We can either remove phasecache from the filecache,
2030 # causing it to reload next time it is accessed, or simply filter
2033 # causing it to reload next time it is accessed, or simply filter
2031 # the removed nodes now and write the updated cache.
2034 # the removed nodes now and write the updated cache.
2032 self._phasecache.filterunknown(self)
2035 self._phasecache.filterunknown(self)
2033 self._phasecache.write()
2036 self._phasecache.write()
2034
2037
2035 # refresh all repository caches
2038 # refresh all repository caches
2036 self.updatecaches()
2039 self.updatecaches()
2037
2040
2038 # Ensure the persistent tag cache is updated. Doing it now
2041 # Ensure the persistent tag cache is updated. Doing it now
2039 # means that the tag cache only has to worry about destroyed
2042 # means that the tag cache only has to worry about destroyed
2040 # heads immediately after a strip/rollback. That in turn
2043 # heads immediately after a strip/rollback. That in turn
2041 # guarantees that "cachetip == currenttip" (comparing both rev
2044 # guarantees that "cachetip == currenttip" (comparing both rev
2042 # and node) always means no nodes have been added or destroyed.
2045 # and node) always means no nodes have been added or destroyed.
2043
2046
2044 # XXX this is suboptimal when qrefresh'ing: we strip the current
2047 # XXX this is suboptimal when qrefresh'ing: we strip the current
2045 # head, refresh the tag cache, then immediately add a new head.
2048 # head, refresh the tag cache, then immediately add a new head.
2046 # But I think doing it this way is necessary for the "instant
2049 # But I think doing it this way is necessary for the "instant
2047 # tag cache retrieval" case to work.
2050 # tag cache retrieval" case to work.
2048 self.invalidate()
2051 self.invalidate()
2049
2052
2050 def status(self, node1='.', node2=None, match=None,
2053 def status(self, node1='.', node2=None, match=None,
2051 ignored=False, clean=False, unknown=False,
2054 ignored=False, clean=False, unknown=False,
2052 listsubrepos=False):
2055 listsubrepos=False):
2053 '''a convenience method that calls node1.status(node2)'''
2056 '''a convenience method that calls node1.status(node2)'''
2054 return self[node1].status(node2, match, ignored, clean, unknown,
2057 return self[node1].status(node2, match, ignored, clean, unknown,
2055 listsubrepos)
2058 listsubrepos)
2056
2059
2057 def addpostdsstatus(self, ps):
2060 def addpostdsstatus(self, ps):
2058 """Add a callback to run within the wlock, at the point at which status
2061 """Add a callback to run within the wlock, at the point at which status
2059 fixups happen.
2062 fixups happen.
2060
2063
2061 On status completion, callback(wctx, status) will be called with the
2064 On status completion, callback(wctx, status) will be called with the
2062 wlock held, unless the dirstate has changed from underneath or the wlock
2065 wlock held, unless the dirstate has changed from underneath or the wlock
2063 couldn't be grabbed.
2066 couldn't be grabbed.
2064
2067
2065 Callbacks should not capture and use a cached copy of the dirstate --
2068 Callbacks should not capture and use a cached copy of the dirstate --
2066 it might change in the meanwhile. Instead, they should access the
2069 it might change in the meanwhile. Instead, they should access the
2067 dirstate via wctx.repo().dirstate.
2070 dirstate via wctx.repo().dirstate.
2068
2071
2069 This list is emptied out after each status run -- extensions should
2072 This list is emptied out after each status run -- extensions should
2070 make sure it adds to this list each time dirstate.status is called.
2073 make sure it adds to this list each time dirstate.status is called.
2071 Extensions should also make sure they don't call this for statuses
2074 Extensions should also make sure they don't call this for statuses
2072 that don't involve the dirstate.
2075 that don't involve the dirstate.
2073 """
2076 """
2074
2077
2075 # The list is located here for uniqueness reasons -- it is actually
2078 # The list is located here for uniqueness reasons -- it is actually
2076 # managed by the workingctx, but that isn't unique per-repo.
2079 # managed by the workingctx, but that isn't unique per-repo.
2077 self._postdsstatus.append(ps)
2080 self._postdsstatus.append(ps)
2078
2081
2079 def postdsstatus(self):
2082 def postdsstatus(self):
2080 """Used by workingctx to get the list of post-dirstate-status hooks."""
2083 """Used by workingctx to get the list of post-dirstate-status hooks."""
2081 return self._postdsstatus
2084 return self._postdsstatus
2082
2085
2083 def clearpostdsstatus(self):
2086 def clearpostdsstatus(self):
2084 """Used by workingctx to clear post-dirstate-status hooks."""
2087 """Used by workingctx to clear post-dirstate-status hooks."""
2085 del self._postdsstatus[:]
2088 del self._postdsstatus[:]
2086
2089
2087 def heads(self, start=None):
2090 def heads(self, start=None):
2088 if start is None:
2091 if start is None:
2089 cl = self.changelog
2092 cl = self.changelog
2090 headrevs = reversed(cl.headrevs())
2093 headrevs = reversed(cl.headrevs())
2091 return [cl.node(rev) for rev in headrevs]
2094 return [cl.node(rev) for rev in headrevs]
2092
2095
2093 heads = self.changelog.heads(start)
2096 heads = self.changelog.heads(start)
2094 # sort the output in rev descending order
2097 # sort the output in rev descending order
2095 return sorted(heads, key=self.changelog.rev, reverse=True)
2098 return sorted(heads, key=self.changelog.rev, reverse=True)
2096
2099
2097 def branchheads(self, branch=None, start=None, closed=False):
2100 def branchheads(self, branch=None, start=None, closed=False):
2098 '''return a (possibly filtered) list of heads for the given branch
2101 '''return a (possibly filtered) list of heads for the given branch
2099
2102
2100 Heads are returned in topological order, from newest to oldest.
2103 Heads are returned in topological order, from newest to oldest.
2101 If branch is None, use the dirstate branch.
2104 If branch is None, use the dirstate branch.
2102 If start is not None, return only heads reachable from start.
2105 If start is not None, return only heads reachable from start.
2103 If closed is True, return heads that are marked as closed as well.
2106 If closed is True, return heads that are marked as closed as well.
2104 '''
2107 '''
2105 if branch is None:
2108 if branch is None:
2106 branch = self[None].branch()
2109 branch = self[None].branch()
2107 branches = self.branchmap()
2110 branches = self.branchmap()
2108 if branch not in branches:
2111 if branch not in branches:
2109 return []
2112 return []
2110 # the cache returns heads ordered lowest to highest
2113 # the cache returns heads ordered lowest to highest
2111 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2114 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2112 if start is not None:
2115 if start is not None:
2113 # filter out the heads that cannot be reached from startrev
2116 # filter out the heads that cannot be reached from startrev
2114 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2117 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2115 bheads = [h for h in bheads if h in fbheads]
2118 bheads = [h for h in bheads if h in fbheads]
2116 return bheads
2119 return bheads
2117
2120
2118 def branches(self, nodes):
2121 def branches(self, nodes):
2119 if not nodes:
2122 if not nodes:
2120 nodes = [self.changelog.tip()]
2123 nodes = [self.changelog.tip()]
2121 b = []
2124 b = []
2122 for n in nodes:
2125 for n in nodes:
2123 t = n
2126 t = n
2124 while True:
2127 while True:
2125 p = self.changelog.parents(n)
2128 p = self.changelog.parents(n)
2126 if p[1] != nullid or p[0] == nullid:
2129 if p[1] != nullid or p[0] == nullid:
2127 b.append((t, n, p[0], p[1]))
2130 b.append((t, n, p[0], p[1]))
2128 break
2131 break
2129 n = p[0]
2132 n = p[0]
2130 return b
2133 return b
2131
2134
2132 def between(self, pairs):
2135 def between(self, pairs):
2133 r = []
2136 r = []
2134
2137
2135 for top, bottom in pairs:
2138 for top, bottom in pairs:
2136 n, l, i = top, [], 0
2139 n, l, i = top, [], 0
2137 f = 1
2140 f = 1
2138
2141
2139 while n != bottom and n != nullid:
2142 while n != bottom and n != nullid:
2140 p = self.changelog.parents(n)[0]
2143 p = self.changelog.parents(n)[0]
2141 if i == f:
2144 if i == f:
2142 l.append(n)
2145 l.append(n)
2143 f = f * 2
2146 f = f * 2
2144 n = p
2147 n = p
2145 i += 1
2148 i += 1
2146
2149
2147 r.append(l)
2150 r.append(l)
2148
2151
2149 return r
2152 return r
2150
2153
2151 def checkpush(self, pushop):
2154 def checkpush(self, pushop):
2152 """Extensions can override this function if additional checks have
2155 """Extensions can override this function if additional checks have
2153 to be performed before pushing, or call it if they override push
2156 to be performed before pushing, or call it if they override push
2154 command.
2157 command.
2155 """
2158 """
2156
2159
2157 @unfilteredpropertycache
2160 @unfilteredpropertycache
2158 def prepushoutgoinghooks(self):
2161 def prepushoutgoinghooks(self):
2159 """Return util.hooks consists of a pushop with repo, remote, outgoing
2162 """Return util.hooks consists of a pushop with repo, remote, outgoing
2160 methods, which are called before pushing changesets.
2163 methods, which are called before pushing changesets.
2161 """
2164 """
2162 return util.hooks()
2165 return util.hooks()
2163
2166
2164 def pushkey(self, namespace, key, old, new):
2167 def pushkey(self, namespace, key, old, new):
2165 try:
2168 try:
2166 tr = self.currenttransaction()
2169 tr = self.currenttransaction()
2167 hookargs = {}
2170 hookargs = {}
2168 if tr is not None:
2171 if tr is not None:
2169 hookargs.update(tr.hookargs)
2172 hookargs.update(tr.hookargs)
2170 hookargs['namespace'] = namespace
2173 hookargs['namespace'] = namespace
2171 hookargs['key'] = key
2174 hookargs['key'] = key
2172 hookargs['old'] = old
2175 hookargs['old'] = old
2173 hookargs['new'] = new
2176 hookargs['new'] = new
2174 self.hook('prepushkey', throw=True, **hookargs)
2177 self.hook('prepushkey', throw=True, **hookargs)
2175 except error.HookAbort as exc:
2178 except error.HookAbort as exc:
2176 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2179 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2177 if exc.hint:
2180 if exc.hint:
2178 self.ui.write_err(_("(%s)\n") % exc.hint)
2181 self.ui.write_err(_("(%s)\n") % exc.hint)
2179 return False
2182 return False
2180 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2183 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2181 ret = pushkey.push(self, namespace, key, old, new)
2184 ret = pushkey.push(self, namespace, key, old, new)
2182 def runhook():
2185 def runhook():
2183 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2186 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2184 ret=ret)
2187 ret=ret)
2185 self._afterlock(runhook)
2188 self._afterlock(runhook)
2186 return ret
2189 return ret
2187
2190
2188 def listkeys(self, namespace):
2191 def listkeys(self, namespace):
2189 self.hook('prelistkeys', throw=True, namespace=namespace)
2192 self.hook('prelistkeys', throw=True, namespace=namespace)
2190 self.ui.debug('listing keys for "%s"\n' % namespace)
2193 self.ui.debug('listing keys for "%s"\n' % namespace)
2191 values = pushkey.list(self, namespace)
2194 values = pushkey.list(self, namespace)
2192 self.hook('listkeys', namespace=namespace, values=values)
2195 self.hook('listkeys', namespace=namespace, values=values)
2193 return values
2196 return values
2194
2197
2195 def debugwireargs(self, one, two, three=None, four=None, five=None):
2198 def debugwireargs(self, one, two, three=None, four=None, five=None):
2196 '''used to test argument passing over the wire'''
2199 '''used to test argument passing over the wire'''
2197 return "%s %s %s %s %s" % (one, two, three, four, five)
2200 return "%s %s %s %s %s" % (one, two, three, four, five)
2198
2201
2199 def savecommitmessage(self, text):
2202 def savecommitmessage(self, text):
2200 fp = self.vfs('last-message.txt', 'wb')
2203 fp = self.vfs('last-message.txt', 'wb')
2201 try:
2204 try:
2202 fp.write(text)
2205 fp.write(text)
2203 finally:
2206 finally:
2204 fp.close()
2207 fp.close()
2205 return self.pathto(fp.name[len(self.root) + 1:])
2208 return self.pathto(fp.name[len(self.root) + 1:])
2206
2209
2207 # used to avoid circular references so destructors work
2210 # used to avoid circular references so destructors work
2208 def aftertrans(files):
2211 def aftertrans(files):
2209 renamefiles = [tuple(t) for t in files]
2212 renamefiles = [tuple(t) for t in files]
2210 def a():
2213 def a():
2211 for vfs, src, dest in renamefiles:
2214 for vfs, src, dest in renamefiles:
2212 # if src and dest refer to a same file, vfs.rename is a no-op,
2215 # if src and dest refer to a same file, vfs.rename is a no-op,
2213 # leaving both src and dest on disk. delete dest to make sure
2216 # leaving both src and dest on disk. delete dest to make sure
2214 # the rename couldn't be such a no-op.
2217 # the rename couldn't be such a no-op.
2215 vfs.tryunlink(dest)
2218 vfs.tryunlink(dest)
2216 try:
2219 try:
2217 vfs.rename(src, dest)
2220 vfs.rename(src, dest)
2218 except OSError: # journal file does not yet exist
2221 except OSError: # journal file does not yet exist
2219 pass
2222 pass
2220 return a
2223 return a
2221
2224
2222 def undoname(fn):
2225 def undoname(fn):
2223 base, name = os.path.split(fn)
2226 base, name = os.path.split(fn)
2224 assert name.startswith('journal')
2227 assert name.startswith('journal')
2225 return os.path.join(base, name.replace('journal', 'undo', 1))
2228 return os.path.join(base, name.replace('journal', 'undo', 1))
2226
2229
2227 def instance(ui, path, create):
2230 def instance(ui, path, create):
2228 return localrepository(ui, util.urllocalpath(path), create)
2231 return localrepository(ui, util.urllocalpath(path), create)
2229
2232
2230 def islocal(path):
2233 def islocal(path):
2231 return True
2234 return True
2232
2235
2233 def newreporequirements(repo):
2236 def newreporequirements(repo):
2234 """Determine the set of requirements for a new local repository.
2237 """Determine the set of requirements for a new local repository.
2235
2238
2236 Extensions can wrap this function to specify custom requirements for
2239 Extensions can wrap this function to specify custom requirements for
2237 new repositories.
2240 new repositories.
2238 """
2241 """
2239 ui = repo.ui
2242 ui = repo.ui
2240 requirements = {'revlogv1'}
2243 requirements = {'revlogv1'}
2241 if ui.configbool('format', 'usestore'):
2244 if ui.configbool('format', 'usestore'):
2242 requirements.add('store')
2245 requirements.add('store')
2243 if ui.configbool('format', 'usefncache'):
2246 if ui.configbool('format', 'usefncache'):
2244 requirements.add('fncache')
2247 requirements.add('fncache')
2245 if ui.configbool('format', 'dotencode'):
2248 if ui.configbool('format', 'dotencode'):
2246 requirements.add('dotencode')
2249 requirements.add('dotencode')
2247
2250
2248 compengine = ui.config('experimental', 'format.compression')
2251 compengine = ui.config('experimental', 'format.compression')
2249 if compengine not in util.compengines:
2252 if compengine not in util.compengines:
2250 raise error.Abort(_('compression engine %s defined by '
2253 raise error.Abort(_('compression engine %s defined by '
2251 'experimental.format.compression not available') %
2254 'experimental.format.compression not available') %
2252 compengine,
2255 compengine,
2253 hint=_('run "hg debuginstall" to list available '
2256 hint=_('run "hg debuginstall" to list available '
2254 'compression engines'))
2257 'compression engines'))
2255
2258
2256 # zlib is the historical default and doesn't need an explicit requirement.
2259 # zlib is the historical default and doesn't need an explicit requirement.
2257 if compengine != 'zlib':
2260 if compengine != 'zlib':
2258 requirements.add('exp-compression-%s' % compengine)
2261 requirements.add('exp-compression-%s' % compengine)
2259
2262
2260 if scmutil.gdinitconfig(ui):
2263 if scmutil.gdinitconfig(ui):
2261 requirements.add('generaldelta')
2264 requirements.add('generaldelta')
2262 if ui.configbool('experimental', 'treemanifest'):
2265 if ui.configbool('experimental', 'treemanifest'):
2263 requirements.add('treemanifest')
2266 requirements.add('treemanifest')
2264 if ui.configbool('experimental', 'manifestv2'):
2265 requirements.add('manifestv2')
2266
2267
2267 revlogv2 = ui.config('experimental', 'revlogv2')
2268 revlogv2 = ui.config('experimental', 'revlogv2')
2268 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2269 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2269 requirements.remove('revlogv1')
2270 requirements.remove('revlogv1')
2270 # generaldelta is implied by revlogv2.
2271 # generaldelta is implied by revlogv2.
2271 requirements.discard('generaldelta')
2272 requirements.discard('generaldelta')
2272 requirements.add(REVLOGV2_REQUIREMENT)
2273 requirements.add(REVLOGV2_REQUIREMENT)
2273
2274
2274 return requirements
2275 return requirements
@@ -1,1649 +1,1573 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import os
13 import struct
12 import struct
14
13
15 from .i18n import _
14 from .i18n import _
16 from .node import (
15 from .node import (
17 bin,
16 bin,
18 hex,
17 hex,
19 )
18 )
20 from . import (
19 from . import (
21 error,
20 error,
22 mdiff,
21 mdiff,
23 policy,
22 policy,
24 revlog,
23 revlog,
25 util,
24 util,
26 )
25 )
27
26
28 parsers = policy.importmod(r'parsers')
27 parsers = policy.importmod(r'parsers')
29 propertycache = util.propertycache
28 propertycache = util.propertycache
30
29
31 def _parsev1(data):
30 def _parse(data):
32 # This method does a little bit of excessive-looking
31 # This method does a little bit of excessive-looking
33 # precondition checking. This is so that the behavior of this
32 # precondition checking. This is so that the behavior of this
34 # class exactly matches its C counterpart to try and help
33 # class exactly matches its C counterpart to try and help
35 # prevent surprise breakage for anyone that develops against
34 # prevent surprise breakage for anyone that develops against
36 # the pure version.
35 # the pure version.
37 if data and data[-1:] != '\n':
36 if data and data[-1:] != '\n':
38 raise ValueError('Manifest did not end in a newline.')
37 raise ValueError('Manifest did not end in a newline.')
39 prev = None
38 prev = None
40 for l in data.splitlines():
39 for l in data.splitlines():
41 if prev is not None and prev > l:
40 if prev is not None and prev > l:
42 raise ValueError('Manifest lines not in sorted order.')
41 raise ValueError('Manifest lines not in sorted order.')
43 prev = l
42 prev = l
44 f, n = l.split('\0')
43 f, n = l.split('\0')
45 if len(n) > 40:
44 if len(n) > 40:
46 yield f, bin(n[:40]), n[40:]
45 yield f, bin(n[:40]), n[40:]
47 else:
46 else:
48 yield f, bin(n), ''
47 yield f, bin(n), ''
49
48
50 def _parsev2(data):
49 def _text(it):
51 metadataend = data.find('\n')
52 # Just ignore metadata for now
53 pos = metadataend + 1
54 prevf = ''
55 while pos < len(data):
56 end = data.find('\n', pos + 1) # +1 to skip stem length byte
57 if end == -1:
58 raise ValueError('Manifest ended with incomplete file entry.')
59 stemlen = ord(data[pos:pos + 1])
60 items = data[pos + 1:end].split('\0')
61 f = prevf[:stemlen] + items[0]
62 if prevf > f:
63 raise ValueError('Manifest entries not in sorted order.')
64 fl = items[1]
65 # Just ignore metadata (items[2:] for now)
66 n = data[end + 1:end + 21]
67 yield f, n, fl
68 pos = end + 22
69 prevf = f
70
71 def _parse(data):
72 """Generates (path, node, flags) tuples from a manifest text"""
73 if data.startswith('\0'):
74 return iter(_parsev2(data))
75 else:
76 return iter(_parsev1(data))
77
78 def _text(it, usemanifestv2):
79 """Given an iterator over (path, node, flags) tuples, returns a manifest
80 text"""
81 if usemanifestv2:
82 return _textv2(it)
83 else:
84 return _textv1(it)
85
86 def _textv1(it):
87 files = []
50 files = []
88 lines = []
51 lines = []
89 _hex = revlog.hex
52 _hex = revlog.hex
90 for f, n, fl in it:
53 for f, n, fl in it:
91 files.append(f)
54 files.append(f)
92 # if this is changed to support newlines in filenames,
55 # if this is changed to support newlines in filenames,
93 # be sure to check the templates/ dir again (especially *-raw.tmpl)
56 # be sure to check the templates/ dir again (especially *-raw.tmpl)
94 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
57 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
95
58
96 _checkforbidden(files)
59 _checkforbidden(files)
97 return ''.join(lines)
60 return ''.join(lines)
98
61
99 def _textv2(it):
100 files = []
101 lines = ['\0\n']
102 prevf = ''
103 for f, n, fl in it:
104 files.append(f)
105 stem = os.path.commonprefix([prevf, f])
106 stemlen = min(len(stem), 255)
107 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
108 prevf = f
109 _checkforbidden(files)
110 return ''.join(lines)
111
112 class lazymanifestiter(object):
62 class lazymanifestiter(object):
113 def __init__(self, lm):
63 def __init__(self, lm):
114 self.pos = 0
64 self.pos = 0
115 self.lm = lm
65 self.lm = lm
116
66
117 def __iter__(self):
67 def __iter__(self):
118 return self
68 return self
119
69
120 def next(self):
70 def next(self):
121 try:
71 try:
122 data, pos = self.lm._get(self.pos)
72 data, pos = self.lm._get(self.pos)
123 except IndexError:
73 except IndexError:
124 raise StopIteration
74 raise StopIteration
125 if pos == -1:
75 if pos == -1:
126 self.pos += 1
76 self.pos += 1
127 return data[0]
77 return data[0]
128 self.pos += 1
78 self.pos += 1
129 zeropos = data.find('\x00', pos)
79 zeropos = data.find('\x00', pos)
130 return data[pos:zeropos]
80 return data[pos:zeropos]
131
81
132 __next__ = next
82 __next__ = next
133
83
134 class lazymanifestiterentries(object):
84 class lazymanifestiterentries(object):
135 def __init__(self, lm):
85 def __init__(self, lm):
136 self.lm = lm
86 self.lm = lm
137 self.pos = 0
87 self.pos = 0
138
88
139 def __iter__(self):
89 def __iter__(self):
140 return self
90 return self
141
91
142 def next(self):
92 def next(self):
143 try:
93 try:
144 data, pos = self.lm._get(self.pos)
94 data, pos = self.lm._get(self.pos)
145 except IndexError:
95 except IndexError:
146 raise StopIteration
96 raise StopIteration
147 if pos == -1:
97 if pos == -1:
148 self.pos += 1
98 self.pos += 1
149 return data
99 return data
150 zeropos = data.find('\x00', pos)
100 zeropos = data.find('\x00', pos)
151 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
101 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
152 zeropos + 1, 40)
102 zeropos + 1, 40)
153 flags = self.lm._getflags(data, self.pos, zeropos)
103 flags = self.lm._getflags(data, self.pos, zeropos)
154 self.pos += 1
104 self.pos += 1
155 return (data[pos:zeropos], hashval, flags)
105 return (data[pos:zeropos], hashval, flags)
156
106
157 __next__ = next
107 __next__ = next
158
108
159 def unhexlify(data, extra, pos, length):
109 def unhexlify(data, extra, pos, length):
160 s = bin(data[pos:pos + length])
110 s = bin(data[pos:pos + length])
161 if extra:
111 if extra:
162 s += chr(extra & 0xff)
112 s += chr(extra & 0xff)
163 return s
113 return s
164
114
165 def _cmp(a, b):
115 def _cmp(a, b):
166 return (a > b) - (a < b)
116 return (a > b) - (a < b)
167
117
168 class _lazymanifest(object):
118 class _lazymanifest(object):
169 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
119 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
170 if positions is None:
120 if positions is None:
171 self.positions = self.findlines(data)
121 self.positions = self.findlines(data)
172 self.extrainfo = [0] * len(self.positions)
122 self.extrainfo = [0] * len(self.positions)
173 self.data = data
123 self.data = data
174 self.extradata = []
124 self.extradata = []
175 else:
125 else:
176 self.positions = positions[:]
126 self.positions = positions[:]
177 self.extrainfo = extrainfo[:]
127 self.extrainfo = extrainfo[:]
178 self.extradata = extradata[:]
128 self.extradata = extradata[:]
179 self.data = data
129 self.data = data
180
130
181 def findlines(self, data):
131 def findlines(self, data):
182 if not data:
132 if not data:
183 return []
133 return []
184 pos = data.find("\n")
134 pos = data.find("\n")
185 if pos == -1 or data[-1:] != '\n':
135 if pos == -1 or data[-1:] != '\n':
186 raise ValueError("Manifest did not end in a newline.")
136 raise ValueError("Manifest did not end in a newline.")
187 positions = [0]
137 positions = [0]
188 prev = data[:data.find('\x00')]
138 prev = data[:data.find('\x00')]
189 while pos < len(data) - 1 and pos != -1:
139 while pos < len(data) - 1 and pos != -1:
190 positions.append(pos + 1)
140 positions.append(pos + 1)
191 nexts = data[pos + 1:data.find('\x00', pos + 1)]
141 nexts = data[pos + 1:data.find('\x00', pos + 1)]
192 if nexts < prev:
142 if nexts < prev:
193 raise ValueError("Manifest lines not in sorted order.")
143 raise ValueError("Manifest lines not in sorted order.")
194 prev = nexts
144 prev = nexts
195 pos = data.find("\n", pos + 1)
145 pos = data.find("\n", pos + 1)
196 return positions
146 return positions
197
147
198 def _get(self, index):
148 def _get(self, index):
199 # get the position encoded in pos:
149 # get the position encoded in pos:
200 # positive number is an index in 'data'
150 # positive number is an index in 'data'
201 # negative number is in extrapieces
151 # negative number is in extrapieces
202 pos = self.positions[index]
152 pos = self.positions[index]
203 if pos >= 0:
153 if pos >= 0:
204 return self.data, pos
154 return self.data, pos
205 return self.extradata[-pos - 1], -1
155 return self.extradata[-pos - 1], -1
206
156
207 def _getkey(self, pos):
157 def _getkey(self, pos):
208 if pos >= 0:
158 if pos >= 0:
209 return self.data[pos:self.data.find('\x00', pos + 1)]
159 return self.data[pos:self.data.find('\x00', pos + 1)]
210 return self.extradata[-pos - 1][0]
160 return self.extradata[-pos - 1][0]
211
161
212 def bsearch(self, key):
162 def bsearch(self, key):
213 first = 0
163 first = 0
214 last = len(self.positions) - 1
164 last = len(self.positions) - 1
215
165
216 while first <= last:
166 while first <= last:
217 midpoint = (first + last)//2
167 midpoint = (first + last)//2
218 nextpos = self.positions[midpoint]
168 nextpos = self.positions[midpoint]
219 candidate = self._getkey(nextpos)
169 candidate = self._getkey(nextpos)
220 r = _cmp(key, candidate)
170 r = _cmp(key, candidate)
221 if r == 0:
171 if r == 0:
222 return midpoint
172 return midpoint
223 else:
173 else:
224 if r < 0:
174 if r < 0:
225 last = midpoint - 1
175 last = midpoint - 1
226 else:
176 else:
227 first = midpoint + 1
177 first = midpoint + 1
228 return -1
178 return -1
229
179
230 def bsearch2(self, key):
180 def bsearch2(self, key):
231 # same as the above, but will always return the position
181 # same as the above, but will always return the position
232 # done for performance reasons
182 # done for performance reasons
233 first = 0
183 first = 0
234 last = len(self.positions) - 1
184 last = len(self.positions) - 1
235
185
236 while first <= last:
186 while first <= last:
237 midpoint = (first + last)//2
187 midpoint = (first + last)//2
238 nextpos = self.positions[midpoint]
188 nextpos = self.positions[midpoint]
239 candidate = self._getkey(nextpos)
189 candidate = self._getkey(nextpos)
240 r = _cmp(key, candidate)
190 r = _cmp(key, candidate)
241 if r == 0:
191 if r == 0:
242 return (midpoint, True)
192 return (midpoint, True)
243 else:
193 else:
244 if r < 0:
194 if r < 0:
245 last = midpoint - 1
195 last = midpoint - 1
246 else:
196 else:
247 first = midpoint + 1
197 first = midpoint + 1
248 return (first, False)
198 return (first, False)
249
199
250 def __contains__(self, key):
200 def __contains__(self, key):
251 return self.bsearch(key) != -1
201 return self.bsearch(key) != -1
252
202
253 def _getflags(self, data, needle, pos):
203 def _getflags(self, data, needle, pos):
254 start = pos + 41
204 start = pos + 41
255 end = data.find("\n", start)
205 end = data.find("\n", start)
256 if end == -1:
206 if end == -1:
257 end = len(data) - 1
207 end = len(data) - 1
258 if start == end:
208 if start == end:
259 return ''
209 return ''
260 return self.data[start:end]
210 return self.data[start:end]
261
211
262 def __getitem__(self, key):
212 def __getitem__(self, key):
263 if not isinstance(key, bytes):
213 if not isinstance(key, bytes):
264 raise TypeError("getitem: manifest keys must be a bytes.")
214 raise TypeError("getitem: manifest keys must be a bytes.")
265 needle = self.bsearch(key)
215 needle = self.bsearch(key)
266 if needle == -1:
216 if needle == -1:
267 raise KeyError
217 raise KeyError
268 data, pos = self._get(needle)
218 data, pos = self._get(needle)
269 if pos == -1:
219 if pos == -1:
270 return (data[1], data[2])
220 return (data[1], data[2])
271 zeropos = data.find('\x00', pos)
221 zeropos = data.find('\x00', pos)
272 assert 0 <= needle <= len(self.positions)
222 assert 0 <= needle <= len(self.positions)
273 assert len(self.extrainfo) == len(self.positions)
223 assert len(self.extrainfo) == len(self.positions)
274 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
224 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
275 flags = self._getflags(data, needle, zeropos)
225 flags = self._getflags(data, needle, zeropos)
276 return (hashval, flags)
226 return (hashval, flags)
277
227
278 def __delitem__(self, key):
228 def __delitem__(self, key):
279 needle, found = self.bsearch2(key)
229 needle, found = self.bsearch2(key)
280 if not found:
230 if not found:
281 raise KeyError
231 raise KeyError
282 cur = self.positions[needle]
232 cur = self.positions[needle]
283 self.positions = self.positions[:needle] + self.positions[needle + 1:]
233 self.positions = self.positions[:needle] + self.positions[needle + 1:]
284 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
234 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
285 if cur >= 0:
235 if cur >= 0:
286 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
236 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
287
237
288 def __setitem__(self, key, value):
238 def __setitem__(self, key, value):
289 if not isinstance(key, bytes):
239 if not isinstance(key, bytes):
290 raise TypeError("setitem: manifest keys must be a byte string.")
240 raise TypeError("setitem: manifest keys must be a byte string.")
291 if not isinstance(value, tuple) or len(value) != 2:
241 if not isinstance(value, tuple) or len(value) != 2:
292 raise TypeError("Manifest values must be a tuple of (node, flags).")
242 raise TypeError("Manifest values must be a tuple of (node, flags).")
293 hashval = value[0]
243 hashval = value[0]
294 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
244 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
295 raise TypeError("node must be a 20-byte byte string")
245 raise TypeError("node must be a 20-byte byte string")
296 flags = value[1]
246 flags = value[1]
297 if len(hashval) == 22:
247 if len(hashval) == 22:
298 hashval = hashval[:-1]
248 hashval = hashval[:-1]
299 if not isinstance(flags, bytes) or len(flags) > 1:
249 if not isinstance(flags, bytes) or len(flags) > 1:
300 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
250 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
301 needle, found = self.bsearch2(key)
251 needle, found = self.bsearch2(key)
302 if found:
252 if found:
303 # put the item
253 # put the item
304 pos = self.positions[needle]
254 pos = self.positions[needle]
305 if pos < 0:
255 if pos < 0:
306 self.extradata[-pos - 1] = (key, hashval, value[1])
256 self.extradata[-pos - 1] = (key, hashval, value[1])
307 else:
257 else:
308 # just don't bother
258 # just don't bother
309 self.extradata.append((key, hashval, value[1]))
259 self.extradata.append((key, hashval, value[1]))
310 self.positions[needle] = -len(self.extradata)
260 self.positions[needle] = -len(self.extradata)
311 else:
261 else:
312 # not found, put it in with extra positions
262 # not found, put it in with extra positions
313 self.extradata.append((key, hashval, value[1]))
263 self.extradata.append((key, hashval, value[1]))
314 self.positions = (self.positions[:needle] + [-len(self.extradata)]
264 self.positions = (self.positions[:needle] + [-len(self.extradata)]
315 + self.positions[needle:])
265 + self.positions[needle:])
316 self.extrainfo = (self.extrainfo[:needle] + [0] +
266 self.extrainfo = (self.extrainfo[:needle] + [0] +
317 self.extrainfo[needle:])
267 self.extrainfo[needle:])
318
268
319 def copy(self):
269 def copy(self):
320 # XXX call _compact like in C?
270 # XXX call _compact like in C?
321 return _lazymanifest(self.data, self.positions, self.extrainfo,
271 return _lazymanifest(self.data, self.positions, self.extrainfo,
322 self.extradata)
272 self.extradata)
323
273
324 def _compact(self):
274 def _compact(self):
325 # hopefully not called TOO often
275 # hopefully not called TOO often
326 if len(self.extradata) == 0:
276 if len(self.extradata) == 0:
327 return
277 return
328 l = []
278 l = []
329 last_cut = 0
279 last_cut = 0
330 i = 0
280 i = 0
331 offset = 0
281 offset = 0
332 self.extrainfo = [0] * len(self.positions)
282 self.extrainfo = [0] * len(self.positions)
333 while i < len(self.positions):
283 while i < len(self.positions):
334 if self.positions[i] >= 0:
284 if self.positions[i] >= 0:
335 cur = self.positions[i]
285 cur = self.positions[i]
336 last_cut = cur
286 last_cut = cur
337 while True:
287 while True:
338 self.positions[i] = offset
288 self.positions[i] = offset
339 i += 1
289 i += 1
340 if i == len(self.positions) or self.positions[i] < 0:
290 if i == len(self.positions) or self.positions[i] < 0:
341 break
291 break
342 offset += self.positions[i] - cur
292 offset += self.positions[i] - cur
343 cur = self.positions[i]
293 cur = self.positions[i]
344 end_cut = self.data.find('\n', cur)
294 end_cut = self.data.find('\n', cur)
345 if end_cut != -1:
295 if end_cut != -1:
346 end_cut += 1
296 end_cut += 1
347 offset += end_cut - cur
297 offset += end_cut - cur
348 l.append(self.data[last_cut:end_cut])
298 l.append(self.data[last_cut:end_cut])
349 else:
299 else:
350 while i < len(self.positions) and self.positions[i] < 0:
300 while i < len(self.positions) and self.positions[i] < 0:
351 cur = self.positions[i]
301 cur = self.positions[i]
352 t = self.extradata[-cur - 1]
302 t = self.extradata[-cur - 1]
353 l.append(self._pack(t))
303 l.append(self._pack(t))
354 self.positions[i] = offset
304 self.positions[i] = offset
355 if len(t[1]) > 20:
305 if len(t[1]) > 20:
356 self.extrainfo[i] = ord(t[1][21])
306 self.extrainfo[i] = ord(t[1][21])
357 offset += len(l[-1])
307 offset += len(l[-1])
358 i += 1
308 i += 1
359 self.data = ''.join(l)
309 self.data = ''.join(l)
360 self.extradata = []
310 self.extradata = []
361
311
362 def _pack(self, d):
312 def _pack(self, d):
363 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
313 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
364
314
365 def text(self):
315 def text(self):
366 self._compact()
316 self._compact()
367 return self.data
317 return self.data
368
318
369 def diff(self, m2, clean=False):
319 def diff(self, m2, clean=False):
370 '''Finds changes between the current manifest and m2.'''
320 '''Finds changes between the current manifest and m2.'''
371 # XXX think whether efficiency matters here
321 # XXX think whether efficiency matters here
372 diff = {}
322 diff = {}
373
323
374 for fn, e1, flags in self.iterentries():
324 for fn, e1, flags in self.iterentries():
375 if fn not in m2:
325 if fn not in m2:
376 diff[fn] = (e1, flags), (None, '')
326 diff[fn] = (e1, flags), (None, '')
377 else:
327 else:
378 e2 = m2[fn]
328 e2 = m2[fn]
379 if (e1, flags) != e2:
329 if (e1, flags) != e2:
380 diff[fn] = (e1, flags), e2
330 diff[fn] = (e1, flags), e2
381 elif clean:
331 elif clean:
382 diff[fn] = None
332 diff[fn] = None
383
333
384 for fn, e2, flags in m2.iterentries():
334 for fn, e2, flags in m2.iterentries():
385 if fn not in self:
335 if fn not in self:
386 diff[fn] = (None, ''), (e2, flags)
336 diff[fn] = (None, ''), (e2, flags)
387
337
388 return diff
338 return diff
389
339
390 def iterentries(self):
340 def iterentries(self):
391 return lazymanifestiterentries(self)
341 return lazymanifestiterentries(self)
392
342
393 def iterkeys(self):
343 def iterkeys(self):
394 return lazymanifestiter(self)
344 return lazymanifestiter(self)
395
345
396 def __iter__(self):
346 def __iter__(self):
397 return lazymanifestiter(self)
347 return lazymanifestiter(self)
398
348
399 def __len__(self):
349 def __len__(self):
400 return len(self.positions)
350 return len(self.positions)
401
351
402 def filtercopy(self, filterfn):
352 def filtercopy(self, filterfn):
403 # XXX should be optimized
353 # XXX should be optimized
404 c = _lazymanifest('')
354 c = _lazymanifest('')
405 for f, n, fl in self.iterentries():
355 for f, n, fl in self.iterentries():
406 if filterfn(f):
356 if filterfn(f):
407 c[f] = n, fl
357 c[f] = n, fl
408 return c
358 return c
409
359
410 try:
360 try:
411 _lazymanifest = parsers.lazymanifest
361 _lazymanifest = parsers.lazymanifest
412 except AttributeError:
362 except AttributeError:
413 pass
363 pass
414
364
415 class manifestdict(object):
365 class manifestdict(object):
416 def __init__(self, data=''):
366 def __init__(self, data=''):
417 if data.startswith('\0'):
418 #_lazymanifest can not parse v2
419 self._lm = _lazymanifest('')
420 for f, n, fl in _parsev2(data):
421 self._lm[f] = n, fl
422 else:
423 self._lm = _lazymanifest(data)
367 self._lm = _lazymanifest(data)
424
368
425 def __getitem__(self, key):
369 def __getitem__(self, key):
426 return self._lm[key][0]
370 return self._lm[key][0]
427
371
428 def find(self, key):
372 def find(self, key):
429 return self._lm[key]
373 return self._lm[key]
430
374
431 def __len__(self):
375 def __len__(self):
432 return len(self._lm)
376 return len(self._lm)
433
377
434 def __nonzero__(self):
378 def __nonzero__(self):
435 # nonzero is covered by the __len__ function, but implementing it here
379 # nonzero is covered by the __len__ function, but implementing it here
436 # makes it easier for extensions to override.
380 # makes it easier for extensions to override.
437 return len(self._lm) != 0
381 return len(self._lm) != 0
438
382
439 __bool__ = __nonzero__
383 __bool__ = __nonzero__
440
384
441 def __setitem__(self, key, node):
385 def __setitem__(self, key, node):
442 self._lm[key] = node, self.flags(key, '')
386 self._lm[key] = node, self.flags(key, '')
443
387
444 def __contains__(self, key):
388 def __contains__(self, key):
445 if key is None:
389 if key is None:
446 return False
390 return False
447 return key in self._lm
391 return key in self._lm
448
392
449 def __delitem__(self, key):
393 def __delitem__(self, key):
450 del self._lm[key]
394 del self._lm[key]
451
395
452 def __iter__(self):
396 def __iter__(self):
453 return self._lm.__iter__()
397 return self._lm.__iter__()
454
398
455 def iterkeys(self):
399 def iterkeys(self):
456 return self._lm.iterkeys()
400 return self._lm.iterkeys()
457
401
458 def keys(self):
402 def keys(self):
459 return list(self.iterkeys())
403 return list(self.iterkeys())
460
404
461 def filesnotin(self, m2, match=None):
405 def filesnotin(self, m2, match=None):
462 '''Set of files in this manifest that are not in the other'''
406 '''Set of files in this manifest that are not in the other'''
463 if match:
407 if match:
464 m1 = self.matches(match)
408 m1 = self.matches(match)
465 m2 = m2.matches(match)
409 m2 = m2.matches(match)
466 return m1.filesnotin(m2)
410 return m1.filesnotin(m2)
467 diff = self.diff(m2)
411 diff = self.diff(m2)
468 files = set(filepath
412 files = set(filepath
469 for filepath, hashflags in diff.iteritems()
413 for filepath, hashflags in diff.iteritems()
470 if hashflags[1][0] is None)
414 if hashflags[1][0] is None)
471 return files
415 return files
472
416
473 @propertycache
417 @propertycache
474 def _dirs(self):
418 def _dirs(self):
475 return util.dirs(self)
419 return util.dirs(self)
476
420
477 def dirs(self):
421 def dirs(self):
478 return self._dirs
422 return self._dirs
479
423
480 def hasdir(self, dir):
424 def hasdir(self, dir):
481 return dir in self._dirs
425 return dir in self._dirs
482
426
483 def _filesfastpath(self, match):
427 def _filesfastpath(self, match):
484 '''Checks whether we can correctly and quickly iterate over matcher
428 '''Checks whether we can correctly and quickly iterate over matcher
485 files instead of over manifest files.'''
429 files instead of over manifest files.'''
486 files = match.files()
430 files = match.files()
487 return (len(files) < 100 and (match.isexact() or
431 return (len(files) < 100 and (match.isexact() or
488 (match.prefix() and all(fn in self for fn in files))))
432 (match.prefix() and all(fn in self for fn in files))))
489
433
490 def walk(self, match):
434 def walk(self, match):
491 '''Generates matching file names.
435 '''Generates matching file names.
492
436
493 Equivalent to manifest.matches(match).iterkeys(), but without creating
437 Equivalent to manifest.matches(match).iterkeys(), but without creating
494 an entirely new manifest.
438 an entirely new manifest.
495
439
496 It also reports nonexistent files by marking them bad with match.bad().
440 It also reports nonexistent files by marking them bad with match.bad().
497 '''
441 '''
498 if match.always():
442 if match.always():
499 for f in iter(self):
443 for f in iter(self):
500 yield f
444 yield f
501 return
445 return
502
446
503 fset = set(match.files())
447 fset = set(match.files())
504
448
505 # avoid the entire walk if we're only looking for specific files
449 # avoid the entire walk if we're only looking for specific files
506 if self._filesfastpath(match):
450 if self._filesfastpath(match):
507 for fn in sorted(fset):
451 for fn in sorted(fset):
508 yield fn
452 yield fn
509 return
453 return
510
454
511 for fn in self:
455 for fn in self:
512 if fn in fset:
456 if fn in fset:
513 # specified pattern is the exact name
457 # specified pattern is the exact name
514 fset.remove(fn)
458 fset.remove(fn)
515 if match(fn):
459 if match(fn):
516 yield fn
460 yield fn
517
461
518 # for dirstate.walk, files=['.'] means "walk the whole tree".
462 # for dirstate.walk, files=['.'] means "walk the whole tree".
519 # follow that here, too
463 # follow that here, too
520 fset.discard('.')
464 fset.discard('.')
521
465
522 for fn in sorted(fset):
466 for fn in sorted(fset):
523 if not self.hasdir(fn):
467 if not self.hasdir(fn):
524 match.bad(fn, None)
468 match.bad(fn, None)
525
469
526 def matches(self, match):
470 def matches(self, match):
527 '''generate a new manifest filtered by the match argument'''
471 '''generate a new manifest filtered by the match argument'''
528 if match.always():
472 if match.always():
529 return self.copy()
473 return self.copy()
530
474
531 if self._filesfastpath(match):
475 if self._filesfastpath(match):
532 m = manifestdict()
476 m = manifestdict()
533 lm = self._lm
477 lm = self._lm
534 for fn in match.files():
478 for fn in match.files():
535 if fn in lm:
479 if fn in lm:
536 m._lm[fn] = lm[fn]
480 m._lm[fn] = lm[fn]
537 return m
481 return m
538
482
539 m = manifestdict()
483 m = manifestdict()
540 m._lm = self._lm.filtercopy(match)
484 m._lm = self._lm.filtercopy(match)
541 return m
485 return m
542
486
543 def diff(self, m2, match=None, clean=False):
487 def diff(self, m2, match=None, clean=False):
544 '''Finds changes between the current manifest and m2.
488 '''Finds changes between the current manifest and m2.
545
489
546 Args:
490 Args:
547 m2: the manifest to which this manifest should be compared.
491 m2: the manifest to which this manifest should be compared.
548 clean: if true, include files unchanged between these manifests
492 clean: if true, include files unchanged between these manifests
549 with a None value in the returned dictionary.
493 with a None value in the returned dictionary.
550
494
551 The result is returned as a dict with filename as key and
495 The result is returned as a dict with filename as key and
552 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
496 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
553 nodeid in the current/other manifest and fl1/fl2 is the flag
497 nodeid in the current/other manifest and fl1/fl2 is the flag
554 in the current/other manifest. Where the file does not exist,
498 in the current/other manifest. Where the file does not exist,
555 the nodeid will be None and the flags will be the empty
499 the nodeid will be None and the flags will be the empty
556 string.
500 string.
557 '''
501 '''
558 if match:
502 if match:
559 m1 = self.matches(match)
503 m1 = self.matches(match)
560 m2 = m2.matches(match)
504 m2 = m2.matches(match)
561 return m1.diff(m2, clean=clean)
505 return m1.diff(m2, clean=clean)
562 return self._lm.diff(m2._lm, clean)
506 return self._lm.diff(m2._lm, clean)
563
507
564 def setflag(self, key, flag):
508 def setflag(self, key, flag):
565 self._lm[key] = self[key], flag
509 self._lm[key] = self[key], flag
566
510
567 def get(self, key, default=None):
511 def get(self, key, default=None):
568 try:
512 try:
569 return self._lm[key][0]
513 return self._lm[key][0]
570 except KeyError:
514 except KeyError:
571 return default
515 return default
572
516
573 def flags(self, key, default=''):
517 def flags(self, key, default=''):
574 try:
518 try:
575 return self._lm[key][1]
519 return self._lm[key][1]
576 except KeyError:
520 except KeyError:
577 return default
521 return default
578
522
579 def copy(self):
523 def copy(self):
580 c = manifestdict()
524 c = manifestdict()
581 c._lm = self._lm.copy()
525 c._lm = self._lm.copy()
582 return c
526 return c
583
527
584 def items(self):
528 def items(self):
585 return (x[:2] for x in self._lm.iterentries())
529 return (x[:2] for x in self._lm.iterentries())
586
530
587 iteritems = items
531 iteritems = items
588
532
589 def iterentries(self):
533 def iterentries(self):
590 return self._lm.iterentries()
534 return self._lm.iterentries()
591
535
592 def text(self, usemanifestv2=False):
536 def text(self):
593 if usemanifestv2:
537 # most likely uses native version
594 return _textv2(self._lm.iterentries())
595 else:
596 # use (probably) native version for v1
597 return self._lm.text()
538 return self._lm.text()
598
539
599 def fastdelta(self, base, changes):
540 def fastdelta(self, base, changes):
600 """Given a base manifest text as a bytearray and a list of changes
541 """Given a base manifest text as a bytearray and a list of changes
601 relative to that text, compute a delta that can be used by revlog.
542 relative to that text, compute a delta that can be used by revlog.
602 """
543 """
603 delta = []
544 delta = []
604 dstart = None
545 dstart = None
605 dend = None
546 dend = None
606 dline = [""]
547 dline = [""]
607 start = 0
548 start = 0
608 # zero copy representation of base as a buffer
549 # zero copy representation of base as a buffer
609 addbuf = util.buffer(base)
550 addbuf = util.buffer(base)
610
551
611 changes = list(changes)
552 changes = list(changes)
612 if len(changes) < 1000:
553 if len(changes) < 1000:
613 # start with a readonly loop that finds the offset of
554 # start with a readonly loop that finds the offset of
614 # each line and creates the deltas
555 # each line and creates the deltas
615 for f, todelete in changes:
556 for f, todelete in changes:
616 # bs will either be the index of the item or the insert point
557 # bs will either be the index of the item or the insert point
617 start, end = _msearch(addbuf, f, start)
558 start, end = _msearch(addbuf, f, start)
618 if not todelete:
559 if not todelete:
619 h, fl = self._lm[f]
560 h, fl = self._lm[f]
620 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
561 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
621 else:
562 else:
622 if start == end:
563 if start == end:
623 # item we want to delete was not found, error out
564 # item we want to delete was not found, error out
624 raise AssertionError(
565 raise AssertionError(
625 _("failed to remove %s from manifest") % f)
566 _("failed to remove %s from manifest") % f)
626 l = ""
567 l = ""
627 if dstart is not None and dstart <= start and dend >= start:
568 if dstart is not None and dstart <= start and dend >= start:
628 if dend < end:
569 if dend < end:
629 dend = end
570 dend = end
630 if l:
571 if l:
631 dline.append(l)
572 dline.append(l)
632 else:
573 else:
633 if dstart is not None:
574 if dstart is not None:
634 delta.append([dstart, dend, "".join(dline)])
575 delta.append([dstart, dend, "".join(dline)])
635 dstart = start
576 dstart = start
636 dend = end
577 dend = end
637 dline = [l]
578 dline = [l]
638
579
639 if dstart is not None:
580 if dstart is not None:
640 delta.append([dstart, dend, "".join(dline)])
581 delta.append([dstart, dend, "".join(dline)])
641 # apply the delta to the base, and get a delta for addrevision
582 # apply the delta to the base, and get a delta for addrevision
642 deltatext, arraytext = _addlistdelta(base, delta)
583 deltatext, arraytext = _addlistdelta(base, delta)
643 else:
584 else:
644 # For large changes, it's much cheaper to just build the text and
585 # For large changes, it's much cheaper to just build the text and
645 # diff it.
586 # diff it.
646 arraytext = bytearray(self.text())
587 arraytext = bytearray(self.text())
647 deltatext = mdiff.textdiff(
588 deltatext = mdiff.textdiff(
648 util.buffer(base), util.buffer(arraytext))
589 util.buffer(base), util.buffer(arraytext))
649
590
650 return arraytext, deltatext
591 return arraytext, deltatext
651
592
652 def _msearch(m, s, lo=0, hi=None):
593 def _msearch(m, s, lo=0, hi=None):
653 '''return a tuple (start, end) that says where to find s within m.
594 '''return a tuple (start, end) that says where to find s within m.
654
595
655 If the string is found m[start:end] are the line containing
596 If the string is found m[start:end] are the line containing
656 that string. If start == end the string was not found and
597 that string. If start == end the string was not found and
657 they indicate the proper sorted insertion point.
598 they indicate the proper sorted insertion point.
658
599
659 m should be a buffer, a memoryview or a byte string.
600 m should be a buffer, a memoryview or a byte string.
660 s is a byte string'''
601 s is a byte string'''
661 def advance(i, c):
602 def advance(i, c):
662 while i < lenm and m[i:i + 1] != c:
603 while i < lenm and m[i:i + 1] != c:
663 i += 1
604 i += 1
664 return i
605 return i
665 if not s:
606 if not s:
666 return (lo, lo)
607 return (lo, lo)
667 lenm = len(m)
608 lenm = len(m)
668 if not hi:
609 if not hi:
669 hi = lenm
610 hi = lenm
670 while lo < hi:
611 while lo < hi:
671 mid = (lo + hi) // 2
612 mid = (lo + hi) // 2
672 start = mid
613 start = mid
673 while start > 0 and m[start - 1:start] != '\n':
614 while start > 0 and m[start - 1:start] != '\n':
674 start -= 1
615 start -= 1
675 end = advance(start, '\0')
616 end = advance(start, '\0')
676 if bytes(m[start:end]) < s:
617 if bytes(m[start:end]) < s:
677 # we know that after the null there are 40 bytes of sha1
618 # we know that after the null there are 40 bytes of sha1
678 # this translates to the bisect lo = mid + 1
619 # this translates to the bisect lo = mid + 1
679 lo = advance(end + 40, '\n') + 1
620 lo = advance(end + 40, '\n') + 1
680 else:
621 else:
681 # this translates to the bisect hi = mid
622 # this translates to the bisect hi = mid
682 hi = start
623 hi = start
683 end = advance(lo, '\0')
624 end = advance(lo, '\0')
684 found = m[lo:end]
625 found = m[lo:end]
685 if s == found:
626 if s == found:
686 # we know that after the null there are 40 bytes of sha1
627 # we know that after the null there are 40 bytes of sha1
687 end = advance(end + 40, '\n')
628 end = advance(end + 40, '\n')
688 return (lo, end + 1)
629 return (lo, end + 1)
689 else:
630 else:
690 return (lo, lo)
631 return (lo, lo)
691
632
692 def _checkforbidden(l):
633 def _checkforbidden(l):
693 """Check filenames for illegal characters."""
634 """Check filenames for illegal characters."""
694 for f in l:
635 for f in l:
695 if '\n' in f or '\r' in f:
636 if '\n' in f or '\r' in f:
696 raise error.RevlogError(
637 raise error.RevlogError(
697 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
638 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
698
639
699
640
700 # apply the changes collected during the bisect loop to our addlist
641 # apply the changes collected during the bisect loop to our addlist
701 # return a delta suitable for addrevision
642 # return a delta suitable for addrevision
702 def _addlistdelta(addlist, x):
643 def _addlistdelta(addlist, x):
703 # for large addlist arrays, building a new array is cheaper
644 # for large addlist arrays, building a new array is cheaper
704 # than repeatedly modifying the existing one
645 # than repeatedly modifying the existing one
705 currentposition = 0
646 currentposition = 0
706 newaddlist = bytearray()
647 newaddlist = bytearray()
707
648
708 for start, end, content in x:
649 for start, end, content in x:
709 newaddlist += addlist[currentposition:start]
650 newaddlist += addlist[currentposition:start]
710 if content:
651 if content:
711 newaddlist += bytearray(content)
652 newaddlist += bytearray(content)
712
653
713 currentposition = end
654 currentposition = end
714
655
715 newaddlist += addlist[currentposition:]
656 newaddlist += addlist[currentposition:]
716
657
717 deltatext = "".join(struct.pack(">lll", start, end, len(content))
658 deltatext = "".join(struct.pack(">lll", start, end, len(content))
718 + content for start, end, content in x)
659 + content for start, end, content in x)
719 return deltatext, newaddlist
660 return deltatext, newaddlist
720
661
721 def _splittopdir(f):
662 def _splittopdir(f):
722 if '/' in f:
663 if '/' in f:
723 dir, subpath = f.split('/', 1)
664 dir, subpath = f.split('/', 1)
724 return dir + '/', subpath
665 return dir + '/', subpath
725 else:
666 else:
726 return '', f
667 return '', f
727
668
728 _noop = lambda s: None
669 _noop = lambda s: None
729
670
730 class treemanifest(object):
671 class treemanifest(object):
731 def __init__(self, dir='', text=''):
672 def __init__(self, dir='', text=''):
732 self._dir = dir
673 self._dir = dir
733 self._node = revlog.nullid
674 self._node = revlog.nullid
734 self._loadfunc = _noop
675 self._loadfunc = _noop
735 self._copyfunc = _noop
676 self._copyfunc = _noop
736 self._dirty = False
677 self._dirty = False
737 self._dirs = {}
678 self._dirs = {}
738 # Using _lazymanifest here is a little slower than plain old dicts
679 # Using _lazymanifest here is a little slower than plain old dicts
739 self._files = {}
680 self._files = {}
740 self._flags = {}
681 self._flags = {}
741 if text:
682 if text:
742 def readsubtree(subdir, subm):
683 def readsubtree(subdir, subm):
743 raise AssertionError('treemanifest constructor only accepts '
684 raise AssertionError('treemanifest constructor only accepts '
744 'flat manifests')
685 'flat manifests')
745 self.parse(text, readsubtree)
686 self.parse(text, readsubtree)
746 self._dirty = True # Mark flat manifest dirty after parsing
687 self._dirty = True # Mark flat manifest dirty after parsing
747
688
748 def _subpath(self, path):
689 def _subpath(self, path):
749 return self._dir + path
690 return self._dir + path
750
691
751 def __len__(self):
692 def __len__(self):
752 self._load()
693 self._load()
753 size = len(self._files)
694 size = len(self._files)
754 for m in self._dirs.values():
695 for m in self._dirs.values():
755 size += m.__len__()
696 size += m.__len__()
756 return size
697 return size
757
698
758 def __nonzero__(self):
699 def __nonzero__(self):
759 # Faster than "__len() != 0" since it avoids loading sub-manifests
700 # Faster than "__len() != 0" since it avoids loading sub-manifests
760 return not self._isempty()
701 return not self._isempty()
761
702
762 __bool__ = __nonzero__
703 __bool__ = __nonzero__
763
704
764 def _isempty(self):
705 def _isempty(self):
765 self._load() # for consistency; already loaded by all callers
706 self._load() # for consistency; already loaded by all callers
766 return (not self._files and (not self._dirs or
707 return (not self._files and (not self._dirs or
767 all(m._isempty() for m in self._dirs.values())))
708 all(m._isempty() for m in self._dirs.values())))
768
709
769 def __repr__(self):
710 def __repr__(self):
770 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
711 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
771 (self._dir, revlog.hex(self._node),
712 (self._dir, revlog.hex(self._node),
772 bool(self._loadfunc is _noop),
713 bool(self._loadfunc is _noop),
773 self._dirty, id(self)))
714 self._dirty, id(self)))
774
715
775 def dir(self):
716 def dir(self):
776 '''The directory that this tree manifest represents, including a
717 '''The directory that this tree manifest represents, including a
777 trailing '/'. Empty string for the repo root directory.'''
718 trailing '/'. Empty string for the repo root directory.'''
778 return self._dir
719 return self._dir
779
720
780 def node(self):
721 def node(self):
781 '''This node of this instance. nullid for unsaved instances. Should
722 '''This node of this instance. nullid for unsaved instances. Should
782 be updated when the instance is read or written from a revlog.
723 be updated when the instance is read or written from a revlog.
783 '''
724 '''
784 assert not self._dirty
725 assert not self._dirty
785 return self._node
726 return self._node
786
727
787 def setnode(self, node):
728 def setnode(self, node):
788 self._node = node
729 self._node = node
789 self._dirty = False
730 self._dirty = False
790
731
791 def iterentries(self):
732 def iterentries(self):
792 self._load()
733 self._load()
793 for p, n in sorted(itertools.chain(self._dirs.items(),
734 for p, n in sorted(itertools.chain(self._dirs.items(),
794 self._files.items())):
735 self._files.items())):
795 if p in self._files:
736 if p in self._files:
796 yield self._subpath(p), n, self._flags.get(p, '')
737 yield self._subpath(p), n, self._flags.get(p, '')
797 else:
738 else:
798 for x in n.iterentries():
739 for x in n.iterentries():
799 yield x
740 yield x
800
741
801 def items(self):
742 def items(self):
802 self._load()
743 self._load()
803 for p, n in sorted(itertools.chain(self._dirs.items(),
744 for p, n in sorted(itertools.chain(self._dirs.items(),
804 self._files.items())):
745 self._files.items())):
805 if p in self._files:
746 if p in self._files:
806 yield self._subpath(p), n
747 yield self._subpath(p), n
807 else:
748 else:
808 for f, sn in n.iteritems():
749 for f, sn in n.iteritems():
809 yield f, sn
750 yield f, sn
810
751
811 iteritems = items
752 iteritems = items
812
753
813 def iterkeys(self):
754 def iterkeys(self):
814 self._load()
755 self._load()
815 for p in sorted(itertools.chain(self._dirs, self._files)):
756 for p in sorted(itertools.chain(self._dirs, self._files)):
816 if p in self._files:
757 if p in self._files:
817 yield self._subpath(p)
758 yield self._subpath(p)
818 else:
759 else:
819 for f in self._dirs[p]:
760 for f in self._dirs[p]:
820 yield f
761 yield f
821
762
822 def keys(self):
763 def keys(self):
823 return list(self.iterkeys())
764 return list(self.iterkeys())
824
765
825 def __iter__(self):
766 def __iter__(self):
826 return self.iterkeys()
767 return self.iterkeys()
827
768
828 def __contains__(self, f):
769 def __contains__(self, f):
829 if f is None:
770 if f is None:
830 return False
771 return False
831 self._load()
772 self._load()
832 dir, subpath = _splittopdir(f)
773 dir, subpath = _splittopdir(f)
833 if dir:
774 if dir:
834 if dir not in self._dirs:
775 if dir not in self._dirs:
835 return False
776 return False
836 return self._dirs[dir].__contains__(subpath)
777 return self._dirs[dir].__contains__(subpath)
837 else:
778 else:
838 return f in self._files
779 return f in self._files
839
780
840 def get(self, f, default=None):
781 def get(self, f, default=None):
841 self._load()
782 self._load()
842 dir, subpath = _splittopdir(f)
783 dir, subpath = _splittopdir(f)
843 if dir:
784 if dir:
844 if dir not in self._dirs:
785 if dir not in self._dirs:
845 return default
786 return default
846 return self._dirs[dir].get(subpath, default)
787 return self._dirs[dir].get(subpath, default)
847 else:
788 else:
848 return self._files.get(f, default)
789 return self._files.get(f, default)
849
790
850 def __getitem__(self, f):
791 def __getitem__(self, f):
851 self._load()
792 self._load()
852 dir, subpath = _splittopdir(f)
793 dir, subpath = _splittopdir(f)
853 if dir:
794 if dir:
854 return self._dirs[dir].__getitem__(subpath)
795 return self._dirs[dir].__getitem__(subpath)
855 else:
796 else:
856 return self._files[f]
797 return self._files[f]
857
798
858 def flags(self, f):
799 def flags(self, f):
859 self._load()
800 self._load()
860 dir, subpath = _splittopdir(f)
801 dir, subpath = _splittopdir(f)
861 if dir:
802 if dir:
862 if dir not in self._dirs:
803 if dir not in self._dirs:
863 return ''
804 return ''
864 return self._dirs[dir].flags(subpath)
805 return self._dirs[dir].flags(subpath)
865 else:
806 else:
866 if f in self._dirs:
807 if f in self._dirs:
867 return ''
808 return ''
868 return self._flags.get(f, '')
809 return self._flags.get(f, '')
869
810
870 def find(self, f):
811 def find(self, f):
871 self._load()
812 self._load()
872 dir, subpath = _splittopdir(f)
813 dir, subpath = _splittopdir(f)
873 if dir:
814 if dir:
874 return self._dirs[dir].find(subpath)
815 return self._dirs[dir].find(subpath)
875 else:
816 else:
876 return self._files[f], self._flags.get(f, '')
817 return self._files[f], self._flags.get(f, '')
877
818
878 def __delitem__(self, f):
819 def __delitem__(self, f):
879 self._load()
820 self._load()
880 dir, subpath = _splittopdir(f)
821 dir, subpath = _splittopdir(f)
881 if dir:
822 if dir:
882 self._dirs[dir].__delitem__(subpath)
823 self._dirs[dir].__delitem__(subpath)
883 # If the directory is now empty, remove it
824 # If the directory is now empty, remove it
884 if self._dirs[dir]._isempty():
825 if self._dirs[dir]._isempty():
885 del self._dirs[dir]
826 del self._dirs[dir]
886 else:
827 else:
887 del self._files[f]
828 del self._files[f]
888 if f in self._flags:
829 if f in self._flags:
889 del self._flags[f]
830 del self._flags[f]
890 self._dirty = True
831 self._dirty = True
891
832
892 def __setitem__(self, f, n):
833 def __setitem__(self, f, n):
893 assert n is not None
834 assert n is not None
894 self._load()
835 self._load()
895 dir, subpath = _splittopdir(f)
836 dir, subpath = _splittopdir(f)
896 if dir:
837 if dir:
897 if dir not in self._dirs:
838 if dir not in self._dirs:
898 self._dirs[dir] = treemanifest(self._subpath(dir))
839 self._dirs[dir] = treemanifest(self._subpath(dir))
899 self._dirs[dir].__setitem__(subpath, n)
840 self._dirs[dir].__setitem__(subpath, n)
900 else:
841 else:
901 self._files[f] = n[:21] # to match manifestdict's behavior
842 self._files[f] = n[:21] # to match manifestdict's behavior
902 self._dirty = True
843 self._dirty = True
903
844
904 def _load(self):
845 def _load(self):
905 if self._loadfunc is not _noop:
846 if self._loadfunc is not _noop:
906 lf, self._loadfunc = self._loadfunc, _noop
847 lf, self._loadfunc = self._loadfunc, _noop
907 lf(self)
848 lf(self)
908 elif self._copyfunc is not _noop:
849 elif self._copyfunc is not _noop:
909 cf, self._copyfunc = self._copyfunc, _noop
850 cf, self._copyfunc = self._copyfunc, _noop
910 cf(self)
851 cf(self)
911
852
912 def setflag(self, f, flags):
853 def setflag(self, f, flags):
913 """Set the flags (symlink, executable) for path f."""
854 """Set the flags (symlink, executable) for path f."""
914 self._load()
855 self._load()
915 dir, subpath = _splittopdir(f)
856 dir, subpath = _splittopdir(f)
916 if dir:
857 if dir:
917 if dir not in self._dirs:
858 if dir not in self._dirs:
918 self._dirs[dir] = treemanifest(self._subpath(dir))
859 self._dirs[dir] = treemanifest(self._subpath(dir))
919 self._dirs[dir].setflag(subpath, flags)
860 self._dirs[dir].setflag(subpath, flags)
920 else:
861 else:
921 self._flags[f] = flags
862 self._flags[f] = flags
922 self._dirty = True
863 self._dirty = True
923
864
924 def copy(self):
865 def copy(self):
925 copy = treemanifest(self._dir)
866 copy = treemanifest(self._dir)
926 copy._node = self._node
867 copy._node = self._node
927 copy._dirty = self._dirty
868 copy._dirty = self._dirty
928 if self._copyfunc is _noop:
869 if self._copyfunc is _noop:
929 def _copyfunc(s):
870 def _copyfunc(s):
930 self._load()
871 self._load()
931 for d in self._dirs:
872 for d in self._dirs:
932 s._dirs[d] = self._dirs[d].copy()
873 s._dirs[d] = self._dirs[d].copy()
933 s._files = dict.copy(self._files)
874 s._files = dict.copy(self._files)
934 s._flags = dict.copy(self._flags)
875 s._flags = dict.copy(self._flags)
935 if self._loadfunc is _noop:
876 if self._loadfunc is _noop:
936 _copyfunc(copy)
877 _copyfunc(copy)
937 else:
878 else:
938 copy._copyfunc = _copyfunc
879 copy._copyfunc = _copyfunc
939 else:
880 else:
940 copy._copyfunc = self._copyfunc
881 copy._copyfunc = self._copyfunc
941 return copy
882 return copy
942
883
943 def filesnotin(self, m2, match=None):
884 def filesnotin(self, m2, match=None):
944 '''Set of files in this manifest that are not in the other'''
885 '''Set of files in this manifest that are not in the other'''
945 if match:
886 if match:
946 m1 = self.matches(match)
887 m1 = self.matches(match)
947 m2 = m2.matches(match)
888 m2 = m2.matches(match)
948 return m1.filesnotin(m2)
889 return m1.filesnotin(m2)
949
890
950 files = set()
891 files = set()
951 def _filesnotin(t1, t2):
892 def _filesnotin(t1, t2):
952 if t1._node == t2._node and not t1._dirty and not t2._dirty:
893 if t1._node == t2._node and not t1._dirty and not t2._dirty:
953 return
894 return
954 t1._load()
895 t1._load()
955 t2._load()
896 t2._load()
956 for d, m1 in t1._dirs.iteritems():
897 for d, m1 in t1._dirs.iteritems():
957 if d in t2._dirs:
898 if d in t2._dirs:
958 m2 = t2._dirs[d]
899 m2 = t2._dirs[d]
959 _filesnotin(m1, m2)
900 _filesnotin(m1, m2)
960 else:
901 else:
961 files.update(m1.iterkeys())
902 files.update(m1.iterkeys())
962
903
963 for fn in t1._files:
904 for fn in t1._files:
964 if fn not in t2._files:
905 if fn not in t2._files:
965 files.add(t1._subpath(fn))
906 files.add(t1._subpath(fn))
966
907
967 _filesnotin(self, m2)
908 _filesnotin(self, m2)
968 return files
909 return files
969
910
970 @propertycache
911 @propertycache
971 def _alldirs(self):
912 def _alldirs(self):
972 return util.dirs(self)
913 return util.dirs(self)
973
914
974 def dirs(self):
915 def dirs(self):
975 return self._alldirs
916 return self._alldirs
976
917
977 def hasdir(self, dir):
918 def hasdir(self, dir):
978 self._load()
919 self._load()
979 topdir, subdir = _splittopdir(dir)
920 topdir, subdir = _splittopdir(dir)
980 if topdir:
921 if topdir:
981 if topdir in self._dirs:
922 if topdir in self._dirs:
982 return self._dirs[topdir].hasdir(subdir)
923 return self._dirs[topdir].hasdir(subdir)
983 return False
924 return False
984 return (dir + '/') in self._dirs
925 return (dir + '/') in self._dirs
985
926
986 def walk(self, match):
927 def walk(self, match):
987 '''Generates matching file names.
928 '''Generates matching file names.
988
929
989 Equivalent to manifest.matches(match).iterkeys(), but without creating
930 Equivalent to manifest.matches(match).iterkeys(), but without creating
990 an entirely new manifest.
931 an entirely new manifest.
991
932
992 It also reports nonexistent files by marking them bad with match.bad().
933 It also reports nonexistent files by marking them bad with match.bad().
993 '''
934 '''
994 if match.always():
935 if match.always():
995 for f in iter(self):
936 for f in iter(self):
996 yield f
937 yield f
997 return
938 return
998
939
999 fset = set(match.files())
940 fset = set(match.files())
1000
941
1001 for fn in self._walk(match):
942 for fn in self._walk(match):
1002 if fn in fset:
943 if fn in fset:
1003 # specified pattern is the exact name
944 # specified pattern is the exact name
1004 fset.remove(fn)
945 fset.remove(fn)
1005 yield fn
946 yield fn
1006
947
1007 # for dirstate.walk, files=['.'] means "walk the whole tree".
948 # for dirstate.walk, files=['.'] means "walk the whole tree".
1008 # follow that here, too
949 # follow that here, too
1009 fset.discard('.')
950 fset.discard('.')
1010
951
1011 for fn in sorted(fset):
952 for fn in sorted(fset):
1012 if not self.hasdir(fn):
953 if not self.hasdir(fn):
1013 match.bad(fn, None)
954 match.bad(fn, None)
1014
955
1015 def _walk(self, match):
956 def _walk(self, match):
1016 '''Recursively generates matching file names for walk().'''
957 '''Recursively generates matching file names for walk().'''
1017 if not match.visitdir(self._dir[:-1] or '.'):
958 if not match.visitdir(self._dir[:-1] or '.'):
1018 return
959 return
1019
960
1020 # yield this dir's files and walk its submanifests
961 # yield this dir's files and walk its submanifests
1021 self._load()
962 self._load()
1022 for p in sorted(list(self._dirs) + list(self._files)):
963 for p in sorted(list(self._dirs) + list(self._files)):
1023 if p in self._files:
964 if p in self._files:
1024 fullp = self._subpath(p)
965 fullp = self._subpath(p)
1025 if match(fullp):
966 if match(fullp):
1026 yield fullp
967 yield fullp
1027 else:
968 else:
1028 for f in self._dirs[p]._walk(match):
969 for f in self._dirs[p]._walk(match):
1029 yield f
970 yield f
1030
971
1031 def matches(self, match):
972 def matches(self, match):
1032 '''generate a new manifest filtered by the match argument'''
973 '''generate a new manifest filtered by the match argument'''
1033 if match.always():
974 if match.always():
1034 return self.copy()
975 return self.copy()
1035
976
1036 return self._matches(match)
977 return self._matches(match)
1037
978
1038 def _matches(self, match):
979 def _matches(self, match):
1039 '''recursively generate a new manifest filtered by the match argument.
980 '''recursively generate a new manifest filtered by the match argument.
1040 '''
981 '''
1041
982
1042 visit = match.visitdir(self._dir[:-1] or '.')
983 visit = match.visitdir(self._dir[:-1] or '.')
1043 if visit == 'all':
984 if visit == 'all':
1044 return self.copy()
985 return self.copy()
1045 ret = treemanifest(self._dir)
986 ret = treemanifest(self._dir)
1046 if not visit:
987 if not visit:
1047 return ret
988 return ret
1048
989
1049 self._load()
990 self._load()
1050 for fn in self._files:
991 for fn in self._files:
1051 fullp = self._subpath(fn)
992 fullp = self._subpath(fn)
1052 if not match(fullp):
993 if not match(fullp):
1053 continue
994 continue
1054 ret._files[fn] = self._files[fn]
995 ret._files[fn] = self._files[fn]
1055 if fn in self._flags:
996 if fn in self._flags:
1056 ret._flags[fn] = self._flags[fn]
997 ret._flags[fn] = self._flags[fn]
1057
998
1058 for dir, subm in self._dirs.iteritems():
999 for dir, subm in self._dirs.iteritems():
1059 m = subm._matches(match)
1000 m = subm._matches(match)
1060 if not m._isempty():
1001 if not m._isempty():
1061 ret._dirs[dir] = m
1002 ret._dirs[dir] = m
1062
1003
1063 if not ret._isempty():
1004 if not ret._isempty():
1064 ret._dirty = True
1005 ret._dirty = True
1065 return ret
1006 return ret
1066
1007
1067 def diff(self, m2, match=None, clean=False):
1008 def diff(self, m2, match=None, clean=False):
1068 '''Finds changes between the current manifest and m2.
1009 '''Finds changes between the current manifest and m2.
1069
1010
1070 Args:
1011 Args:
1071 m2: the manifest to which this manifest should be compared.
1012 m2: the manifest to which this manifest should be compared.
1072 clean: if true, include files unchanged between these manifests
1013 clean: if true, include files unchanged between these manifests
1073 with a None value in the returned dictionary.
1014 with a None value in the returned dictionary.
1074
1015
1075 The result is returned as a dict with filename as key and
1016 The result is returned as a dict with filename as key and
1076 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1017 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1077 nodeid in the current/other manifest and fl1/fl2 is the flag
1018 nodeid in the current/other manifest and fl1/fl2 is the flag
1078 in the current/other manifest. Where the file does not exist,
1019 in the current/other manifest. Where the file does not exist,
1079 the nodeid will be None and the flags will be the empty
1020 the nodeid will be None and the flags will be the empty
1080 string.
1021 string.
1081 '''
1022 '''
1082 if match:
1023 if match:
1083 m1 = self.matches(match)
1024 m1 = self.matches(match)
1084 m2 = m2.matches(match)
1025 m2 = m2.matches(match)
1085 return m1.diff(m2, clean=clean)
1026 return m1.diff(m2, clean=clean)
1086 result = {}
1027 result = {}
1087 emptytree = treemanifest()
1028 emptytree = treemanifest()
1088 def _diff(t1, t2):
1029 def _diff(t1, t2):
1089 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1030 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1090 return
1031 return
1091 t1._load()
1032 t1._load()
1092 t2._load()
1033 t2._load()
1093 for d, m1 in t1._dirs.iteritems():
1034 for d, m1 in t1._dirs.iteritems():
1094 m2 = t2._dirs.get(d, emptytree)
1035 m2 = t2._dirs.get(d, emptytree)
1095 _diff(m1, m2)
1036 _diff(m1, m2)
1096
1037
1097 for d, m2 in t2._dirs.iteritems():
1038 for d, m2 in t2._dirs.iteritems():
1098 if d not in t1._dirs:
1039 if d not in t1._dirs:
1099 _diff(emptytree, m2)
1040 _diff(emptytree, m2)
1100
1041
1101 for fn, n1 in t1._files.iteritems():
1042 for fn, n1 in t1._files.iteritems():
1102 fl1 = t1._flags.get(fn, '')
1043 fl1 = t1._flags.get(fn, '')
1103 n2 = t2._files.get(fn, None)
1044 n2 = t2._files.get(fn, None)
1104 fl2 = t2._flags.get(fn, '')
1045 fl2 = t2._flags.get(fn, '')
1105 if n1 != n2 or fl1 != fl2:
1046 if n1 != n2 or fl1 != fl2:
1106 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1047 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1107 elif clean:
1048 elif clean:
1108 result[t1._subpath(fn)] = None
1049 result[t1._subpath(fn)] = None
1109
1050
1110 for fn, n2 in t2._files.iteritems():
1051 for fn, n2 in t2._files.iteritems():
1111 if fn not in t1._files:
1052 if fn not in t1._files:
1112 fl2 = t2._flags.get(fn, '')
1053 fl2 = t2._flags.get(fn, '')
1113 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1054 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1114
1055
1115 _diff(self, m2)
1056 _diff(self, m2)
1116 return result
1057 return result
1117
1058
1118 def unmodifiedsince(self, m2):
1059 def unmodifiedsince(self, m2):
1119 return not self._dirty and not m2._dirty and self._node == m2._node
1060 return not self._dirty and not m2._dirty and self._node == m2._node
1120
1061
1121 def parse(self, text, readsubtree):
1062 def parse(self, text, readsubtree):
1122 for f, n, fl in _parse(text):
1063 for f, n, fl in _parse(text):
1123 if fl == 't':
1064 if fl == 't':
1124 f = f + '/'
1065 f = f + '/'
1125 self._dirs[f] = readsubtree(self._subpath(f), n)
1066 self._dirs[f] = readsubtree(self._subpath(f), n)
1126 elif '/' in f:
1067 elif '/' in f:
1127 # This is a flat manifest, so use __setitem__ and setflag rather
1068 # This is a flat manifest, so use __setitem__ and setflag rather
1128 # than assigning directly to _files and _flags, so we can
1069 # than assigning directly to _files and _flags, so we can
1129 # assign a path in a subdirectory, and to mark dirty (compared
1070 # assign a path in a subdirectory, and to mark dirty (compared
1130 # to nullid).
1071 # to nullid).
1131 self[f] = n
1072 self[f] = n
1132 if fl:
1073 if fl:
1133 self.setflag(f, fl)
1074 self.setflag(f, fl)
1134 else:
1075 else:
1135 # Assigning to _files and _flags avoids marking as dirty,
1076 # Assigning to _files and _flags avoids marking as dirty,
1136 # and should be a little faster.
1077 # and should be a little faster.
1137 self._files[f] = n
1078 self._files[f] = n
1138 if fl:
1079 if fl:
1139 self._flags[f] = fl
1080 self._flags[f] = fl
1140
1081
1141 def text(self, usemanifestv2=False):
1082 def text(self):
1142 """Get the full data of this manifest as a bytestring."""
1083 """Get the full data of this manifest as a bytestring."""
1143 self._load()
1084 self._load()
1144 return _text(self.iterentries(), usemanifestv2)
1085 return _text(self.iterentries())
1145
1086
1146 def dirtext(self, usemanifestv2=False):
1087 def dirtext(self):
1147 """Get the full data of this directory as a bytestring. Make sure that
1088 """Get the full data of this directory as a bytestring. Make sure that
1148 any submanifests have been written first, so their nodeids are correct.
1089 any submanifests have been written first, so their nodeids are correct.
1149 """
1090 """
1150 self._load()
1091 self._load()
1151 flags = self.flags
1092 flags = self.flags
1152 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1093 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1153 files = [(f, self._files[f], flags(f)) for f in self._files]
1094 files = [(f, self._files[f], flags(f)) for f in self._files]
1154 return _text(sorted(dirs + files), usemanifestv2)
1095 return _text(sorted(dirs + files))
1155
1096
1156 def read(self, gettext, readsubtree):
1097 def read(self, gettext, readsubtree):
1157 def _load_for_read(s):
1098 def _load_for_read(s):
1158 s.parse(gettext(), readsubtree)
1099 s.parse(gettext(), readsubtree)
1159 s._dirty = False
1100 s._dirty = False
1160 self._loadfunc = _load_for_read
1101 self._loadfunc = _load_for_read
1161
1102
1162 def writesubtrees(self, m1, m2, writesubtree):
1103 def writesubtrees(self, m1, m2, writesubtree):
1163 self._load() # for consistency; should never have any effect here
1104 self._load() # for consistency; should never have any effect here
1164 m1._load()
1105 m1._load()
1165 m2._load()
1106 m2._load()
1166 emptytree = treemanifest()
1107 emptytree = treemanifest()
1167 for d, subm in self._dirs.iteritems():
1108 for d, subm in self._dirs.iteritems():
1168 subp1 = m1._dirs.get(d, emptytree)._node
1109 subp1 = m1._dirs.get(d, emptytree)._node
1169 subp2 = m2._dirs.get(d, emptytree)._node
1110 subp2 = m2._dirs.get(d, emptytree)._node
1170 if subp1 == revlog.nullid:
1111 if subp1 == revlog.nullid:
1171 subp1, subp2 = subp2, subp1
1112 subp1, subp2 = subp2, subp1
1172 writesubtree(subm, subp1, subp2)
1113 writesubtree(subm, subp1, subp2)
1173
1114
1174 def walksubtrees(self, matcher=None):
1115 def walksubtrees(self, matcher=None):
1175 """Returns an iterator of the subtrees of this manifest, including this
1116 """Returns an iterator of the subtrees of this manifest, including this
1176 manifest itself.
1117 manifest itself.
1177
1118
1178 If `matcher` is provided, it only returns subtrees that match.
1119 If `matcher` is provided, it only returns subtrees that match.
1179 """
1120 """
1180 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1121 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1181 return
1122 return
1182 if not matcher or matcher(self._dir[:-1]):
1123 if not matcher or matcher(self._dir[:-1]):
1183 yield self
1124 yield self
1184
1125
1185 self._load()
1126 self._load()
1186 for d, subm in self._dirs.iteritems():
1127 for d, subm in self._dirs.iteritems():
1187 for subtree in subm.walksubtrees(matcher=matcher):
1128 for subtree in subm.walksubtrees(matcher=matcher):
1188 yield subtree
1129 yield subtree
1189
1130
1190 class manifestrevlog(revlog.revlog):
1131 class manifestrevlog(revlog.revlog):
1191 '''A revlog that stores manifest texts. This is responsible for caching the
1132 '''A revlog that stores manifest texts. This is responsible for caching the
1192 full-text manifest contents.
1133 full-text manifest contents.
1193 '''
1134 '''
1194 def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
1135 def __init__(self, opener, dir='', dirlogcache=None, indexfile=None,
1195 treemanifest=False):
1136 treemanifest=False):
1196 """Constructs a new manifest revlog
1137 """Constructs a new manifest revlog
1197
1138
1198 `indexfile` - used by extensions to have two manifests at once, like
1139 `indexfile` - used by extensions to have two manifests at once, like
1199 when transitioning between flatmanifeset and treemanifests.
1140 when transitioning between flatmanifeset and treemanifests.
1200
1141
1201 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1142 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1202 options can also be used to make this a tree manifest revlog. The opener
1143 options can also be used to make this a tree manifest revlog. The opener
1203 option takes precedence, so if it is set to True, we ignore whatever
1144 option takes precedence, so if it is set to True, we ignore whatever
1204 value is passed in to the constructor.
1145 value is passed in to the constructor.
1205 """
1146 """
1206 # During normal operations, we expect to deal with not more than four
1147 # During normal operations, we expect to deal with not more than four
1207 # revs at a time (such as during commit --amend). When rebasing large
1148 # revs at a time (such as during commit --amend). When rebasing large
1208 # stacks of commits, the number can go up, hence the config knob below.
1149 # stacks of commits, the number can go up, hence the config knob below.
1209 cachesize = 4
1150 cachesize = 4
1210 optiontreemanifest = False
1151 optiontreemanifest = False
1211 usemanifestv2 = False
1212 opts = getattr(opener, 'options', None)
1152 opts = getattr(opener, 'options', None)
1213 if opts is not None:
1153 if opts is not None:
1214 cachesize = opts.get('manifestcachesize', cachesize)
1154 cachesize = opts.get('manifestcachesize', cachesize)
1215 optiontreemanifest = opts.get('treemanifest', False)
1155 optiontreemanifest = opts.get('treemanifest', False)
1216 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1217
1156
1218 self._treeondisk = optiontreemanifest or treemanifest
1157 self._treeondisk = optiontreemanifest or treemanifest
1219 self._usemanifestv2 = usemanifestv2
1220
1158
1221 self._fulltextcache = util.lrucachedict(cachesize)
1159 self._fulltextcache = util.lrucachedict(cachesize)
1222
1160
1223 if dir:
1161 if dir:
1224 assert self._treeondisk, 'opts is %r' % opts
1162 assert self._treeondisk, 'opts is %r' % opts
1225 if not dir.endswith('/'):
1163 if not dir.endswith('/'):
1226 dir = dir + '/'
1164 dir = dir + '/'
1227
1165
1228 if indexfile is None:
1166 if indexfile is None:
1229 indexfile = '00manifest.i'
1167 indexfile = '00manifest.i'
1230 if dir:
1168 if dir:
1231 indexfile = "meta/" + dir + indexfile
1169 indexfile = "meta/" + dir + indexfile
1232
1170
1233 self._dir = dir
1171 self._dir = dir
1234 # The dirlogcache is kept on the root manifest log
1172 # The dirlogcache is kept on the root manifest log
1235 if dir:
1173 if dir:
1236 self._dirlogcache = dirlogcache
1174 self._dirlogcache = dirlogcache
1237 else:
1175 else:
1238 self._dirlogcache = {'': self}
1176 self._dirlogcache = {'': self}
1239
1177
1240 super(manifestrevlog, self).__init__(opener, indexfile,
1178 super(manifestrevlog, self).__init__(opener, indexfile,
1241 # only root indexfile is cached
1179 # only root indexfile is cached
1242 checkambig=not bool(dir),
1180 checkambig=not bool(dir),
1243 mmaplargeindex=True)
1181 mmaplargeindex=True)
1244
1182
1245 @property
1183 @property
1246 def fulltextcache(self):
1184 def fulltextcache(self):
1247 return self._fulltextcache
1185 return self._fulltextcache
1248
1186
1249 def clearcaches(self):
1187 def clearcaches(self):
1250 super(manifestrevlog, self).clearcaches()
1188 super(manifestrevlog, self).clearcaches()
1251 self._fulltextcache.clear()
1189 self._fulltextcache.clear()
1252 self._dirlogcache = {'': self}
1190 self._dirlogcache = {'': self}
1253
1191
1254 def dirlog(self, d):
1192 def dirlog(self, d):
1255 if d:
1193 if d:
1256 assert self._treeondisk
1194 assert self._treeondisk
1257 if d not in self._dirlogcache:
1195 if d not in self._dirlogcache:
1258 mfrevlog = manifestrevlog(self.opener, d,
1196 mfrevlog = manifestrevlog(self.opener, d,
1259 self._dirlogcache,
1197 self._dirlogcache,
1260 treemanifest=self._treeondisk)
1198 treemanifest=self._treeondisk)
1261 self._dirlogcache[d] = mfrevlog
1199 self._dirlogcache[d] = mfrevlog
1262 return self._dirlogcache[d]
1200 return self._dirlogcache[d]
1263
1201
1264 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1202 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1265 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1203 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1266 and not self._usemanifestv2):
1267 # If our first parent is in the manifest cache, we can
1204 # If our first parent is in the manifest cache, we can
1268 # compute a delta here using properties we know about the
1205 # compute a delta here using properties we know about the
1269 # manifest up-front, which may save time later for the
1206 # manifest up-front, which may save time later for the
1270 # revlog layer.
1207 # revlog layer.
1271
1208
1272 _checkforbidden(added)
1209 _checkforbidden(added)
1273 # combine the changed lists into one sorted iterator
1210 # combine the changed lists into one sorted iterator
1274 work = heapq.merge([(x, False) for x in added],
1211 work = heapq.merge([(x, False) for x in added],
1275 [(x, True) for x in removed])
1212 [(x, True) for x in removed])
1276
1213
1277 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1214 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1278 cachedelta = self.rev(p1), deltatext
1215 cachedelta = self.rev(p1), deltatext
1279 text = util.buffer(arraytext)
1216 text = util.buffer(arraytext)
1280 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1217 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1281 else:
1218 else:
1282 # The first parent manifest isn't already loaded, so we'll
1219 # The first parent manifest isn't already loaded, so we'll
1283 # just encode a fulltext of the manifest and pass that
1220 # just encode a fulltext of the manifest and pass that
1284 # through to the revlog layer, and let it handle the delta
1221 # through to the revlog layer, and let it handle the delta
1285 # process.
1222 # process.
1286 if self._treeondisk:
1223 if self._treeondisk:
1287 assert readtree, "readtree must be set for treemanifest writes"
1224 assert readtree, "readtree must be set for treemanifest writes"
1288 m1 = readtree(self._dir, p1)
1225 m1 = readtree(self._dir, p1)
1289 m2 = readtree(self._dir, p2)
1226 m2 = readtree(self._dir, p2)
1290 n = self._addtree(m, transaction, link, m1, m2, readtree)
1227 n = self._addtree(m, transaction, link, m1, m2, readtree)
1291 arraytext = None
1228 arraytext = None
1292 else:
1229 else:
1293 text = m.text(self._usemanifestv2)
1230 text = m.text()
1294 n = self.addrevision(text, transaction, link, p1, p2)
1231 n = self.addrevision(text, transaction, link, p1, p2)
1295 arraytext = bytearray(text)
1232 arraytext = bytearray(text)
1296
1233
1297 if arraytext is not None:
1234 if arraytext is not None:
1298 self.fulltextcache[n] = arraytext
1235 self.fulltextcache[n] = arraytext
1299
1236
1300 return n
1237 return n
1301
1238
1302 def _addtree(self, m, transaction, link, m1, m2, readtree):
1239 def _addtree(self, m, transaction, link, m1, m2, readtree):
1303 # If the manifest is unchanged compared to one parent,
1240 # If the manifest is unchanged compared to one parent,
1304 # don't write a new revision
1241 # don't write a new revision
1305 if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
1242 if self._dir != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(m2)):
1306 return m.node()
1243 return m.node()
1307 def writesubtree(subm, subp1, subp2):
1244 def writesubtree(subm, subp1, subp2):
1308 sublog = self.dirlog(subm.dir())
1245 sublog = self.dirlog(subm.dir())
1309 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1246 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1310 readtree=readtree)
1247 readtree=readtree)
1311 m.writesubtrees(m1, m2, writesubtree)
1248 m.writesubtrees(m1, m2, writesubtree)
1312 text = m.dirtext(self._usemanifestv2)
1249 text = m.dirtext()
1313 n = None
1250 n = None
1314 if self._dir != '':
1251 if self._dir != '':
1315 # Double-check whether contents are unchanged to one parent
1252 # Double-check whether contents are unchanged to one parent
1316 if text == m1.dirtext(self._usemanifestv2):
1253 if text == m1.dirtext():
1317 n = m1.node()
1254 n = m1.node()
1318 elif text == m2.dirtext(self._usemanifestv2):
1255 elif text == m2.dirtext():
1319 n = m2.node()
1256 n = m2.node()
1320
1257
1321 if not n:
1258 if not n:
1322 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1259 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1323
1260
1324 # Save nodeid so parent manifest can calculate its nodeid
1261 # Save nodeid so parent manifest can calculate its nodeid
1325 m.setnode(n)
1262 m.setnode(n)
1326 return n
1263 return n
1327
1264
1328 class manifestlog(object):
1265 class manifestlog(object):
1329 """A collection class representing the collection of manifest snapshots
1266 """A collection class representing the collection of manifest snapshots
1330 referenced by commits in the repository.
1267 referenced by commits in the repository.
1331
1268
1332 In this situation, 'manifest' refers to the abstract concept of a snapshot
1269 In this situation, 'manifest' refers to the abstract concept of a snapshot
1333 of the list of files in the given commit. Consumers of the output of this
1270 of the list of files in the given commit. Consumers of the output of this
1334 class do not care about the implementation details of the actual manifests
1271 class do not care about the implementation details of the actual manifests
1335 they receive (i.e. tree or flat or lazily loaded, etc)."""
1272 they receive (i.e. tree or flat or lazily loaded, etc)."""
1336 def __init__(self, opener, repo):
1273 def __init__(self, opener, repo):
1337 usetreemanifest = False
1274 usetreemanifest = False
1338 cachesize = 4
1275 cachesize = 4
1339
1276
1340 opts = getattr(opener, 'options', None)
1277 opts = getattr(opener, 'options', None)
1341 if opts is not None:
1278 if opts is not None:
1342 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1279 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1343 cachesize = opts.get('manifestcachesize', cachesize)
1280 cachesize = opts.get('manifestcachesize', cachesize)
1344 self._treeinmem = usetreemanifest
1281 self._treeinmem = usetreemanifest
1345
1282
1346 self._revlog = repo._constructmanifest()
1283 self._revlog = repo._constructmanifest()
1347
1284
1348 # A cache of the manifestctx or treemanifestctx for each directory
1285 # A cache of the manifestctx or treemanifestctx for each directory
1349 self._dirmancache = {}
1286 self._dirmancache = {}
1350 self._dirmancache[''] = util.lrucachedict(cachesize)
1287 self._dirmancache[''] = util.lrucachedict(cachesize)
1351
1288
1352 self.cachesize = cachesize
1289 self.cachesize = cachesize
1353
1290
1354 def __getitem__(self, node):
1291 def __getitem__(self, node):
1355 """Retrieves the manifest instance for the given node. Throws a
1292 """Retrieves the manifest instance for the given node. Throws a
1356 LookupError if not found.
1293 LookupError if not found.
1357 """
1294 """
1358 return self.get('', node)
1295 return self.get('', node)
1359
1296
1360 def get(self, dir, node, verify=True):
1297 def get(self, dir, node, verify=True):
1361 """Retrieves the manifest instance for the given node. Throws a
1298 """Retrieves the manifest instance for the given node. Throws a
1362 LookupError if not found.
1299 LookupError if not found.
1363
1300
1364 `verify` - if True an exception will be thrown if the node is not in
1301 `verify` - if True an exception will be thrown if the node is not in
1365 the revlog
1302 the revlog
1366 """
1303 """
1367 if node in self._dirmancache.get(dir, ()):
1304 if node in self._dirmancache.get(dir, ()):
1368 return self._dirmancache[dir][node]
1305 return self._dirmancache[dir][node]
1369
1306
1370 if dir:
1307 if dir:
1371 if self._revlog._treeondisk:
1308 if self._revlog._treeondisk:
1372 if verify:
1309 if verify:
1373 dirlog = self._revlog.dirlog(dir)
1310 dirlog = self._revlog.dirlog(dir)
1374 if node not in dirlog.nodemap:
1311 if node not in dirlog.nodemap:
1375 raise LookupError(node, dirlog.indexfile,
1312 raise LookupError(node, dirlog.indexfile,
1376 _('no node'))
1313 _('no node'))
1377 m = treemanifestctx(self, dir, node)
1314 m = treemanifestctx(self, dir, node)
1378 else:
1315 else:
1379 raise error.Abort(
1316 raise error.Abort(
1380 _("cannot ask for manifest directory '%s' in a flat "
1317 _("cannot ask for manifest directory '%s' in a flat "
1381 "manifest") % dir)
1318 "manifest") % dir)
1382 else:
1319 else:
1383 if verify:
1320 if verify:
1384 if node not in self._revlog.nodemap:
1321 if node not in self._revlog.nodemap:
1385 raise LookupError(node, self._revlog.indexfile,
1322 raise LookupError(node, self._revlog.indexfile,
1386 _('no node'))
1323 _('no node'))
1387 if self._treeinmem:
1324 if self._treeinmem:
1388 m = treemanifestctx(self, '', node)
1325 m = treemanifestctx(self, '', node)
1389 else:
1326 else:
1390 m = manifestctx(self, node)
1327 m = manifestctx(self, node)
1391
1328
1392 if node != revlog.nullid:
1329 if node != revlog.nullid:
1393 mancache = self._dirmancache.get(dir)
1330 mancache = self._dirmancache.get(dir)
1394 if not mancache:
1331 if not mancache:
1395 mancache = util.lrucachedict(self.cachesize)
1332 mancache = util.lrucachedict(self.cachesize)
1396 self._dirmancache[dir] = mancache
1333 self._dirmancache[dir] = mancache
1397 mancache[node] = m
1334 mancache[node] = m
1398 return m
1335 return m
1399
1336
1400 def clearcaches(self):
1337 def clearcaches(self):
1401 self._dirmancache.clear()
1338 self._dirmancache.clear()
1402 self._revlog.clearcaches()
1339 self._revlog.clearcaches()
1403
1340
1404 class memmanifestctx(object):
1341 class memmanifestctx(object):
1405 def __init__(self, manifestlog):
1342 def __init__(self, manifestlog):
1406 self._manifestlog = manifestlog
1343 self._manifestlog = manifestlog
1407 self._manifestdict = manifestdict()
1344 self._manifestdict = manifestdict()
1408
1345
1409 def _revlog(self):
1346 def _revlog(self):
1410 return self._manifestlog._revlog
1347 return self._manifestlog._revlog
1411
1348
1412 def new(self):
1349 def new(self):
1413 return memmanifestctx(self._manifestlog)
1350 return memmanifestctx(self._manifestlog)
1414
1351
1415 def copy(self):
1352 def copy(self):
1416 memmf = memmanifestctx(self._manifestlog)
1353 memmf = memmanifestctx(self._manifestlog)
1417 memmf._manifestdict = self.read().copy()
1354 memmf._manifestdict = self.read().copy()
1418 return memmf
1355 return memmf
1419
1356
1420 def read(self):
1357 def read(self):
1421 return self._manifestdict
1358 return self._manifestdict
1422
1359
1423 def write(self, transaction, link, p1, p2, added, removed):
1360 def write(self, transaction, link, p1, p2, added, removed):
1424 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1361 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1425 added, removed)
1362 added, removed)
1426
1363
1427 class manifestctx(object):
1364 class manifestctx(object):
1428 """A class representing a single revision of a manifest, including its
1365 """A class representing a single revision of a manifest, including its
1429 contents, its parent revs, and its linkrev.
1366 contents, its parent revs, and its linkrev.
1430 """
1367 """
1431 def __init__(self, manifestlog, node):
1368 def __init__(self, manifestlog, node):
1432 self._manifestlog = manifestlog
1369 self._manifestlog = manifestlog
1433 self._data = None
1370 self._data = None
1434
1371
1435 self._node = node
1372 self._node = node
1436
1373
1437 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1374 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1438 # but let's add it later when something needs it and we can load it
1375 # but let's add it later when something needs it and we can load it
1439 # lazily.
1376 # lazily.
1440 #self.p1, self.p2 = revlog.parents(node)
1377 #self.p1, self.p2 = revlog.parents(node)
1441 #rev = revlog.rev(node)
1378 #rev = revlog.rev(node)
1442 #self.linkrev = revlog.linkrev(rev)
1379 #self.linkrev = revlog.linkrev(rev)
1443
1380
1444 def _revlog(self):
1381 def _revlog(self):
1445 return self._manifestlog._revlog
1382 return self._manifestlog._revlog
1446
1383
1447 def node(self):
1384 def node(self):
1448 return self._node
1385 return self._node
1449
1386
1450 def new(self):
1387 def new(self):
1451 return memmanifestctx(self._manifestlog)
1388 return memmanifestctx(self._manifestlog)
1452
1389
1453 def copy(self):
1390 def copy(self):
1454 memmf = memmanifestctx(self._manifestlog)
1391 memmf = memmanifestctx(self._manifestlog)
1455 memmf._manifestdict = self.read().copy()
1392 memmf._manifestdict = self.read().copy()
1456 return memmf
1393 return memmf
1457
1394
1458 @propertycache
1395 @propertycache
1459 def parents(self):
1396 def parents(self):
1460 return self._revlog().parents(self._node)
1397 return self._revlog().parents(self._node)
1461
1398
1462 def read(self):
1399 def read(self):
1463 if self._data is None:
1400 if self._data is None:
1464 if self._node == revlog.nullid:
1401 if self._node == revlog.nullid:
1465 self._data = manifestdict()
1402 self._data = manifestdict()
1466 else:
1403 else:
1467 rl = self._revlog()
1404 rl = self._revlog()
1468 text = rl.revision(self._node)
1405 text = rl.revision(self._node)
1469 arraytext = bytearray(text)
1406 arraytext = bytearray(text)
1470 rl._fulltextcache[self._node] = arraytext
1407 rl._fulltextcache[self._node] = arraytext
1471 self._data = manifestdict(text)
1408 self._data = manifestdict(text)
1472 return self._data
1409 return self._data
1473
1410
1474 def readfast(self, shallow=False):
1411 def readfast(self, shallow=False):
1475 '''Calls either readdelta or read, based on which would be less work.
1412 '''Calls either readdelta or read, based on which would be less work.
1476 readdelta is called if the delta is against the p1, and therefore can be
1413 readdelta is called if the delta is against the p1, and therefore can be
1477 read quickly.
1414 read quickly.
1478
1415
1479 If `shallow` is True, nothing changes since this is a flat manifest.
1416 If `shallow` is True, nothing changes since this is a flat manifest.
1480 '''
1417 '''
1481 rl = self._revlog()
1418 rl = self._revlog()
1482 r = rl.rev(self._node)
1419 r = rl.rev(self._node)
1483 deltaparent = rl.deltaparent(r)
1420 deltaparent = rl.deltaparent(r)
1484 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1421 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1485 return self.readdelta()
1422 return self.readdelta()
1486 return self.read()
1423 return self.read()
1487
1424
1488 def readdelta(self, shallow=False):
1425 def readdelta(self, shallow=False):
1489 '''Returns a manifest containing just the entries that are present
1426 '''Returns a manifest containing just the entries that are present
1490 in this manifest, but not in its p1 manifest. This is efficient to read
1427 in this manifest, but not in its p1 manifest. This is efficient to read
1491 if the revlog delta is already p1.
1428 if the revlog delta is already p1.
1492
1429
1493 Changing the value of `shallow` has no effect on flat manifests.
1430 Changing the value of `shallow` has no effect on flat manifests.
1494 '''
1431 '''
1495 revlog = self._revlog()
1432 revlog = self._revlog()
1496 if revlog._usemanifestv2:
1497 # Need to perform a slow delta
1498 r0 = revlog.deltaparent(revlog.rev(self._node))
1499 m0 = self._manifestlog[revlog.node(r0)].read()
1500 m1 = self.read()
1501 md = manifestdict()
1502 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1503 if n1:
1504 md[f] = n1
1505 if fl1:
1506 md.setflag(f, fl1)
1507 return md
1508
1509 r = revlog.rev(self._node)
1433 r = revlog.rev(self._node)
1510 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1434 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1511 return manifestdict(d)
1435 return manifestdict(d)
1512
1436
1513 def find(self, key):
1437 def find(self, key):
1514 return self.read().find(key)
1438 return self.read().find(key)
1515
1439
1516 class memtreemanifestctx(object):
1440 class memtreemanifestctx(object):
1517 def __init__(self, manifestlog, dir=''):
1441 def __init__(self, manifestlog, dir=''):
1518 self._manifestlog = manifestlog
1442 self._manifestlog = manifestlog
1519 self._dir = dir
1443 self._dir = dir
1520 self._treemanifest = treemanifest()
1444 self._treemanifest = treemanifest()
1521
1445
1522 def _revlog(self):
1446 def _revlog(self):
1523 return self._manifestlog._revlog
1447 return self._manifestlog._revlog
1524
1448
1525 def new(self, dir=''):
1449 def new(self, dir=''):
1526 return memtreemanifestctx(self._manifestlog, dir=dir)
1450 return memtreemanifestctx(self._manifestlog, dir=dir)
1527
1451
1528 def copy(self):
1452 def copy(self):
1529 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1453 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1530 memmf._treemanifest = self._treemanifest.copy()
1454 memmf._treemanifest = self._treemanifest.copy()
1531 return memmf
1455 return memmf
1532
1456
1533 def read(self):
1457 def read(self):
1534 return self._treemanifest
1458 return self._treemanifest
1535
1459
1536 def write(self, transaction, link, p1, p2, added, removed):
1460 def write(self, transaction, link, p1, p2, added, removed):
1537 def readtree(dir, node):
1461 def readtree(dir, node):
1538 return self._manifestlog.get(dir, node).read()
1462 return self._manifestlog.get(dir, node).read()
1539 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1463 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1540 added, removed, readtree=readtree)
1464 added, removed, readtree=readtree)
1541
1465
1542 class treemanifestctx(object):
1466 class treemanifestctx(object):
1543 def __init__(self, manifestlog, dir, node):
1467 def __init__(self, manifestlog, dir, node):
1544 self._manifestlog = manifestlog
1468 self._manifestlog = manifestlog
1545 self._dir = dir
1469 self._dir = dir
1546 self._data = None
1470 self._data = None
1547
1471
1548 self._node = node
1472 self._node = node
1549
1473
1550 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1474 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1551 # we can instantiate treemanifestctx objects for directories we don't
1475 # we can instantiate treemanifestctx objects for directories we don't
1552 # have on disk.
1476 # have on disk.
1553 #self.p1, self.p2 = revlog.parents(node)
1477 #self.p1, self.p2 = revlog.parents(node)
1554 #rev = revlog.rev(node)
1478 #rev = revlog.rev(node)
1555 #self.linkrev = revlog.linkrev(rev)
1479 #self.linkrev = revlog.linkrev(rev)
1556
1480
1557 def _revlog(self):
1481 def _revlog(self):
1558 return self._manifestlog._revlog.dirlog(self._dir)
1482 return self._manifestlog._revlog.dirlog(self._dir)
1559
1483
1560 def read(self):
1484 def read(self):
1561 if self._data is None:
1485 if self._data is None:
1562 rl = self._revlog()
1486 rl = self._revlog()
1563 if self._node == revlog.nullid:
1487 if self._node == revlog.nullid:
1564 self._data = treemanifest()
1488 self._data = treemanifest()
1565 elif rl._treeondisk:
1489 elif rl._treeondisk:
1566 m = treemanifest(dir=self._dir)
1490 m = treemanifest(dir=self._dir)
1567 def gettext():
1491 def gettext():
1568 return rl.revision(self._node)
1492 return rl.revision(self._node)
1569 def readsubtree(dir, subm):
1493 def readsubtree(dir, subm):
1570 # Set verify to False since we need to be able to create
1494 # Set verify to False since we need to be able to create
1571 # subtrees for trees that don't exist on disk.
1495 # subtrees for trees that don't exist on disk.
1572 return self._manifestlog.get(dir, subm, verify=False).read()
1496 return self._manifestlog.get(dir, subm, verify=False).read()
1573 m.read(gettext, readsubtree)
1497 m.read(gettext, readsubtree)
1574 m.setnode(self._node)
1498 m.setnode(self._node)
1575 self._data = m
1499 self._data = m
1576 else:
1500 else:
1577 text = rl.revision(self._node)
1501 text = rl.revision(self._node)
1578 arraytext = bytearray(text)
1502 arraytext = bytearray(text)
1579 rl.fulltextcache[self._node] = arraytext
1503 rl.fulltextcache[self._node] = arraytext
1580 self._data = treemanifest(dir=self._dir, text=text)
1504 self._data = treemanifest(dir=self._dir, text=text)
1581
1505
1582 return self._data
1506 return self._data
1583
1507
1584 def node(self):
1508 def node(self):
1585 return self._node
1509 return self._node
1586
1510
1587 def new(self, dir=''):
1511 def new(self, dir=''):
1588 return memtreemanifestctx(self._manifestlog, dir=dir)
1512 return memtreemanifestctx(self._manifestlog, dir=dir)
1589
1513
1590 def copy(self):
1514 def copy(self):
1591 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1515 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1592 memmf._treemanifest = self.read().copy()
1516 memmf._treemanifest = self.read().copy()
1593 return memmf
1517 return memmf
1594
1518
1595 @propertycache
1519 @propertycache
1596 def parents(self):
1520 def parents(self):
1597 return self._revlog().parents(self._node)
1521 return self._revlog().parents(self._node)
1598
1522
1599 def readdelta(self, shallow=False):
1523 def readdelta(self, shallow=False):
1600 '''Returns a manifest containing just the entries that are present
1524 '''Returns a manifest containing just the entries that are present
1601 in this manifest, but not in its p1 manifest. This is efficient to read
1525 in this manifest, but not in its p1 manifest. This is efficient to read
1602 if the revlog delta is already p1.
1526 if the revlog delta is already p1.
1603
1527
1604 If `shallow` is True, this will read the delta for this directory,
1528 If `shallow` is True, this will read the delta for this directory,
1605 without recursively reading subdirectory manifests. Instead, any
1529 without recursively reading subdirectory manifests. Instead, any
1606 subdirectory entry will be reported as it appears in the manifest, i.e.
1530 subdirectory entry will be reported as it appears in the manifest, i.e.
1607 the subdirectory will be reported among files and distinguished only by
1531 the subdirectory will be reported among files and distinguished only by
1608 its 't' flag.
1532 its 't' flag.
1609 '''
1533 '''
1610 revlog = self._revlog()
1534 revlog = self._revlog()
1611 if shallow and not revlog._usemanifestv2:
1535 if shallow:
1612 r = revlog.rev(self._node)
1536 r = revlog.rev(self._node)
1613 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1537 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1614 return manifestdict(d)
1538 return manifestdict(d)
1615 else:
1539 else:
1616 # Need to perform a slow delta
1540 # Need to perform a slow delta
1617 r0 = revlog.deltaparent(revlog.rev(self._node))
1541 r0 = revlog.deltaparent(revlog.rev(self._node))
1618 m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
1542 m0 = self._manifestlog.get(self._dir, revlog.node(r0)).read()
1619 m1 = self.read()
1543 m1 = self.read()
1620 md = treemanifest(dir=self._dir)
1544 md = treemanifest(dir=self._dir)
1621 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1545 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1622 if n1:
1546 if n1:
1623 md[f] = n1
1547 md[f] = n1
1624 if fl1:
1548 if fl1:
1625 md.setflag(f, fl1)
1549 md.setflag(f, fl1)
1626 return md
1550 return md
1627
1551
1628 def readfast(self, shallow=False):
1552 def readfast(self, shallow=False):
1629 '''Calls either readdelta or read, based on which would be less work.
1553 '''Calls either readdelta or read, based on which would be less work.
1630 readdelta is called if the delta is against the p1, and therefore can be
1554 readdelta is called if the delta is against the p1, and therefore can be
1631 read quickly.
1555 read quickly.
1632
1556
1633 If `shallow` is True, it only returns the entries from this manifest,
1557 If `shallow` is True, it only returns the entries from this manifest,
1634 and not any submanifests.
1558 and not any submanifests.
1635 '''
1559 '''
1636 rl = self._revlog()
1560 rl = self._revlog()
1637 r = rl.rev(self._node)
1561 r = rl.rev(self._node)
1638 deltaparent = rl.deltaparent(r)
1562 deltaparent = rl.deltaparent(r)
1639 if (deltaparent != revlog.nullrev and
1563 if (deltaparent != revlog.nullrev and
1640 deltaparent in rl.parentrevs(r)):
1564 deltaparent in rl.parentrevs(r)):
1641 return self.readdelta(shallow=shallow)
1565 return self.readdelta(shallow=shallow)
1642
1566
1643 if shallow:
1567 if shallow:
1644 return manifestdict(rl.revision(self._node))
1568 return manifestdict(rl.revision(self._node))
1645 else:
1569 else:
1646 return self.read()
1570 return self.read()
1647
1571
1648 def find(self, key):
1572 def find(self, key):
1649 return self.read().find(key)
1573 return self.read().find(key)
@@ -1,867 +1,866 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import tempfile
11 import tempfile
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'manifestv2',
50 'treemanifest',
49 'treemanifest',
51 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
52 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
53 'parentdelta',
52 'parentdelta',
54 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
55 'shared',
54 'shared',
56 }
55 }
57
56
58 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
59 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
60
59
61 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
62 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
63 to be allowed.
62 to be allowed.
64 """
63 """
65 return set()
64 return set()
66
65
67 def supporteddestrequirements(repo):
66 def supporteddestrequirements(repo):
68 """Obtain requirements that upgrade supports in the destination.
67 """Obtain requirements that upgrade supports in the destination.
69
68
70 If the result of the upgrade would create requirements not in this set,
69 If the result of the upgrade would create requirements not in this set,
71 the upgrade is disallowed.
70 the upgrade is disallowed.
72
71
73 Extensions should monkeypatch this to add their custom requirements.
72 Extensions should monkeypatch this to add their custom requirements.
74 """
73 """
75 return {
74 return {
76 'dotencode',
75 'dotencode',
77 'fncache',
76 'fncache',
78 'generaldelta',
77 'generaldelta',
79 'revlogv1',
78 'revlogv1',
80 'store',
79 'store',
81 }
80 }
82
81
83 def allowednewrequirements(repo):
82 def allowednewrequirements(repo):
84 """Obtain requirements that can be added to a repository during upgrade.
83 """Obtain requirements that can be added to a repository during upgrade.
85
84
86 This is used to disallow proposed requirements from being added when
85 This is used to disallow proposed requirements from being added when
87 they weren't present before.
86 they weren't present before.
88
87
89 We use a list of allowed requirement additions instead of a list of known
88 We use a list of allowed requirement additions instead of a list of known
90 bad additions because the whitelist approach is safer and will prevent
89 bad additions because the whitelist approach is safer and will prevent
91 future, unknown requirements from accidentally being added.
90 future, unknown requirements from accidentally being added.
92 """
91 """
93 return {
92 return {
94 'dotencode',
93 'dotencode',
95 'fncache',
94 'fncache',
96 'generaldelta',
95 'generaldelta',
97 }
96 }
98
97
99 def preservedrequirements(repo):
98 def preservedrequirements(repo):
100 return set()
99 return set()
101
100
102 deficiency = 'deficiency'
101 deficiency = 'deficiency'
103 optimisation = 'optimization'
102 optimisation = 'optimization'
104
103
105 class improvement(object):
104 class improvement(object):
106 """Represents an improvement that can be made as part of an upgrade.
105 """Represents an improvement that can be made as part of an upgrade.
107
106
108 The following attributes are defined on each instance:
107 The following attributes are defined on each instance:
109
108
110 name
109 name
111 Machine-readable string uniquely identifying this improvement. It
110 Machine-readable string uniquely identifying this improvement. It
112 will be mapped to an action later in the upgrade process.
111 will be mapped to an action later in the upgrade process.
113
112
114 type
113 type
115 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
116 problem. An optimization is an action (sometimes optional) that
115 problem. An optimization is an action (sometimes optional) that
117 can be taken to further improve the state of the repository.
116 can be taken to further improve the state of the repository.
118
117
119 description
118 description
120 Message intended for humans explaining the improvement in more detail,
119 Message intended for humans explaining the improvement in more detail,
121 including the implications of it. For ``deficiency`` types, should be
120 including the implications of it. For ``deficiency`` types, should be
122 worded in the present tense. For ``optimisation`` types, should be
121 worded in the present tense. For ``optimisation`` types, should be
123 worded in the future tense.
122 worded in the future tense.
124
123
125 upgrademessage
124 upgrademessage
126 Message intended for humans explaining what an upgrade addressing this
125 Message intended for humans explaining what an upgrade addressing this
127 issue will do. Should be worded in the future tense.
126 issue will do. Should be worded in the future tense.
128 """
127 """
129 def __init__(self, name, type, description, upgrademessage):
128 def __init__(self, name, type, description, upgrademessage):
130 self.name = name
129 self.name = name
131 self.type = type
130 self.type = type
132 self.description = description
131 self.description = description
133 self.upgrademessage = upgrademessage
132 self.upgrademessage = upgrademessage
134
133
135 def __eq__(self, other):
134 def __eq__(self, other):
136 if not isinstance(other, improvement):
135 if not isinstance(other, improvement):
137 # This is what python tell use to do
136 # This is what python tell use to do
138 return NotImplemented
137 return NotImplemented
139 return self.name == other.name
138 return self.name == other.name
140
139
141 def __ne__(self, other):
140 def __ne__(self, other):
142 return not self == other
141 return not self == other
143
142
144 def __hash__(self):
143 def __hash__(self):
145 return hash(self.name)
144 return hash(self.name)
146
145
147 allformatvariant = []
146 allformatvariant = []
148
147
149 def registerformatvariant(cls):
148 def registerformatvariant(cls):
150 allformatvariant.append(cls)
149 allformatvariant.append(cls)
151 return cls
150 return cls
152
151
153 class formatvariant(improvement):
152 class formatvariant(improvement):
154 """an improvement subclass dedicated to repository format"""
153 """an improvement subclass dedicated to repository format"""
155 type = deficiency
154 type = deficiency
156 ### The following attributes should be defined for each class:
155 ### The following attributes should be defined for each class:
157
156
158 # machine-readable string uniquely identifying this improvement. it will be
157 # machine-readable string uniquely identifying this improvement. it will be
159 # mapped to an action later in the upgrade process.
158 # mapped to an action later in the upgrade process.
160 name = None
159 name = None
161
160
162 # message intended for humans explaining the improvement in more detail,
161 # message intended for humans explaining the improvement in more detail,
163 # including the implications of it ``deficiency`` types, should be worded
162 # including the implications of it ``deficiency`` types, should be worded
164 # in the present tense.
163 # in the present tense.
165 description = None
164 description = None
166
165
167 # message intended for humans explaining what an upgrade addressing this
166 # message intended for humans explaining what an upgrade addressing this
168 # issue will do. should be worded in the future tense.
167 # issue will do. should be worded in the future tense.
169 upgrademessage = None
168 upgrademessage = None
170
169
171 # value of current Mercurial default for new repository
170 # value of current Mercurial default for new repository
172 default = None
171 default = None
173
172
174 def __init__(self):
173 def __init__(self):
175 raise NotImplementedError()
174 raise NotImplementedError()
176
175
177 @staticmethod
176 @staticmethod
178 def fromrepo(repo):
177 def fromrepo(repo):
179 """current value of the variant in the repository"""
178 """current value of the variant in the repository"""
180 raise NotImplementedError()
179 raise NotImplementedError()
181
180
182 @staticmethod
181 @staticmethod
183 def fromconfig(repo):
182 def fromconfig(repo):
184 """current value of the variant in the configuration"""
183 """current value of the variant in the configuration"""
185 raise NotImplementedError()
184 raise NotImplementedError()
186
185
187 class requirementformatvariant(formatvariant):
186 class requirementformatvariant(formatvariant):
188 """formatvariant based on a 'requirement' name.
187 """formatvariant based on a 'requirement' name.
189
188
190 Many format variant are controlled by a 'requirement'. We define a small
189 Many format variant are controlled by a 'requirement'. We define a small
191 subclass to factor the code.
190 subclass to factor the code.
192 """
191 """
193
192
194 # the requirement that control this format variant
193 # the requirement that control this format variant
195 _requirement = None
194 _requirement = None
196
195
197 @staticmethod
196 @staticmethod
198 def _newreporequirements(repo):
197 def _newreporequirements(repo):
199 return localrepo.newreporequirements(repo)
198 return localrepo.newreporequirements(repo)
200
199
201 @classmethod
200 @classmethod
202 def fromrepo(cls, repo):
201 def fromrepo(cls, repo):
203 assert cls._requirement is not None
202 assert cls._requirement is not None
204 return cls._requirement in repo.requirements
203 return cls._requirement in repo.requirements
205
204
206 @classmethod
205 @classmethod
207 def fromconfig(cls, repo):
206 def fromconfig(cls, repo):
208 assert cls._requirement is not None
207 assert cls._requirement is not None
209 return cls._requirement in cls._newreporequirements(repo)
208 return cls._requirement in cls._newreporequirements(repo)
210
209
211 @registerformatvariant
210 @registerformatvariant
212 class fncache(requirementformatvariant):
211 class fncache(requirementformatvariant):
213 name = 'fncache'
212 name = 'fncache'
214
213
215 _requirement = 'fncache'
214 _requirement = 'fncache'
216
215
217 default = True
216 default = True
218
217
219 description = _('long and reserved filenames may not work correctly; '
218 description = _('long and reserved filenames may not work correctly; '
220 'repository performance is sub-optimal')
219 'repository performance is sub-optimal')
221
220
222 upgrademessage = _('repository will be more resilient to storing '
221 upgrademessage = _('repository will be more resilient to storing '
223 'certain paths and performance of certain '
222 'certain paths and performance of certain '
224 'operations should be improved')
223 'operations should be improved')
225
224
226 @registerformatvariant
225 @registerformatvariant
227 class dotencode(requirementformatvariant):
226 class dotencode(requirementformatvariant):
228 name = 'dotencode'
227 name = 'dotencode'
229
228
230 _requirement = 'dotencode'
229 _requirement = 'dotencode'
231
230
232 default = True
231 default = True
233
232
234 description = _('storage of filenames beginning with a period or '
233 description = _('storage of filenames beginning with a period or '
235 'space may not work correctly')
234 'space may not work correctly')
236
235
237 upgrademessage = _('repository will be better able to store files '
236 upgrademessage = _('repository will be better able to store files '
238 'beginning with a space or period')
237 'beginning with a space or period')
239
238
240 @registerformatvariant
239 @registerformatvariant
241 class generaldelta(requirementformatvariant):
240 class generaldelta(requirementformatvariant):
242 name = 'generaldelta'
241 name = 'generaldelta'
243
242
244 _requirement = 'generaldelta'
243 _requirement = 'generaldelta'
245
244
246 default = True
245 default = True
247
246
248 description = _('deltas within internal storage are unable to '
247 description = _('deltas within internal storage are unable to '
249 'choose optimal revisions; repository is larger and '
248 'choose optimal revisions; repository is larger and '
250 'slower than it could be; interaction with other '
249 'slower than it could be; interaction with other '
251 'repositories may require extra network and CPU '
250 'repositories may require extra network and CPU '
252 'resources, making "hg push" and "hg pull" slower')
251 'resources, making "hg push" and "hg pull" slower')
253
252
254 upgrademessage = _('repository storage will be able to create '
253 upgrademessage = _('repository storage will be able to create '
255 'optimal deltas; new repository data will be '
254 'optimal deltas; new repository data will be '
256 'smaller and read times should decrease; '
255 'smaller and read times should decrease; '
257 'interacting with other repositories using this '
256 'interacting with other repositories using this '
258 'storage model should require less network and '
257 'storage model should require less network and '
259 'CPU resources, making "hg push" and "hg pull" '
258 'CPU resources, making "hg push" and "hg pull" '
260 'faster')
259 'faster')
261
260
262 @registerformatvariant
261 @registerformatvariant
263 class removecldeltachain(formatvariant):
262 class removecldeltachain(formatvariant):
264 name = 'plain-cl-delta'
263 name = 'plain-cl-delta'
265
264
266 default = True
265 default = True
267
266
268 description = _('changelog storage is using deltas instead of '
267 description = _('changelog storage is using deltas instead of '
269 'raw entries; changelog reading and any '
268 'raw entries; changelog reading and any '
270 'operation relying on changelog data are slower '
269 'operation relying on changelog data are slower '
271 'than they could be')
270 'than they could be')
272
271
273 upgrademessage = _('changelog storage will be reformated to '
272 upgrademessage = _('changelog storage will be reformated to '
274 'store raw entries; changelog reading will be '
273 'store raw entries; changelog reading will be '
275 'faster; changelog size may be reduced')
274 'faster; changelog size may be reduced')
276
275
277 @staticmethod
276 @staticmethod
278 def fromrepo(repo):
277 def fromrepo(repo):
279 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
280 # changelogs with deltas.
279 # changelogs with deltas.
281 cl = repo.changelog
280 cl = repo.changelog
282 chainbase = cl.chainbase
281 chainbase = cl.chainbase
283 return all(rev == chainbase(rev) for rev in cl)
282 return all(rev == chainbase(rev) for rev in cl)
284
283
285 @staticmethod
284 @staticmethod
286 def fromconfig(repo):
285 def fromconfig(repo):
287 return True
286 return True
288
287
289 @registerformatvariant
288 @registerformatvariant
290 class compressionengine(formatvariant):
289 class compressionengine(formatvariant):
291 name = 'compression'
290 name = 'compression'
292 default = 'zlib'
291 default = 'zlib'
293
292
294 description = _('Compresion algorithm used to compress data. '
293 description = _('Compresion algorithm used to compress data. '
295 'Some engine are faster than other')
294 'Some engine are faster than other')
296
295
297 upgrademessage = _('revlog content will be recompressed with the new '
296 upgrademessage = _('revlog content will be recompressed with the new '
298 'algorithm.')
297 'algorithm.')
299
298
300 @classmethod
299 @classmethod
301 def fromrepo(cls, repo):
300 def fromrepo(cls, repo):
302 for req in repo.requirements:
301 for req in repo.requirements:
303 if req.startswith('exp-compression-'):
302 if req.startswith('exp-compression-'):
304 return req.split('-', 2)[2]
303 return req.split('-', 2)[2]
305 return 'zlib'
304 return 'zlib'
306
305
307 @classmethod
306 @classmethod
308 def fromconfig(cls, repo):
307 def fromconfig(cls, repo):
309 return repo.ui.config('experimental', 'format.compression')
308 return repo.ui.config('experimental', 'format.compression')
310
309
311 def finddeficiencies(repo):
310 def finddeficiencies(repo):
312 """returns a list of deficiencies that the repo suffer from"""
311 """returns a list of deficiencies that the repo suffer from"""
313 deficiencies = []
312 deficiencies = []
314
313
315 # We could detect lack of revlogv1 and store here, but they were added
314 # We could detect lack of revlogv1 and store here, but they were added
316 # in 0.9.2 and we don't support upgrading repos without these
315 # in 0.9.2 and we don't support upgrading repos without these
317 # requirements, so let's not bother.
316 # requirements, so let's not bother.
318
317
319 for fv in allformatvariant:
318 for fv in allformatvariant:
320 if not fv.fromrepo(repo):
319 if not fv.fromrepo(repo):
321 deficiencies.append(fv)
320 deficiencies.append(fv)
322
321
323 return deficiencies
322 return deficiencies
324
323
325 def findoptimizations(repo):
324 def findoptimizations(repo):
326 """Determine optimisation that could be used during upgrade"""
325 """Determine optimisation that could be used during upgrade"""
327 # These are unconditionally added. There is logic later that figures out
326 # These are unconditionally added. There is logic later that figures out
328 # which ones to apply.
327 # which ones to apply.
329 optimizations = []
328 optimizations = []
330
329
331 optimizations.append(improvement(
330 optimizations.append(improvement(
332 name='redeltaparent',
331 name='redeltaparent',
333 type=optimisation,
332 type=optimisation,
334 description=_('deltas within internal storage will be recalculated to '
333 description=_('deltas within internal storage will be recalculated to '
335 'choose an optimal base revision where this was not '
334 'choose an optimal base revision where this was not '
336 'already done; the size of the repository may shrink and '
335 'already done; the size of the repository may shrink and '
337 'various operations may become faster; the first time '
336 'various operations may become faster; the first time '
338 'this optimization is performed could slow down upgrade '
337 'this optimization is performed could slow down upgrade '
339 'execution considerably; subsequent invocations should '
338 'execution considerably; subsequent invocations should '
340 'not run noticeably slower'),
339 'not run noticeably slower'),
341 upgrademessage=_('deltas within internal storage will choose a new '
340 upgrademessage=_('deltas within internal storage will choose a new '
342 'base revision if needed')))
341 'base revision if needed')))
343
342
344 optimizations.append(improvement(
343 optimizations.append(improvement(
345 name='redeltamultibase',
344 name='redeltamultibase',
346 type=optimisation,
345 type=optimisation,
347 description=_('deltas within internal storage will be recalculated '
346 description=_('deltas within internal storage will be recalculated '
348 'against multiple base revision and the smallest '
347 'against multiple base revision and the smallest '
349 'difference will be used; the size of the repository may '
348 'difference will be used; the size of the repository may '
350 'shrink significantly when there are many merges; this '
349 'shrink significantly when there are many merges; this '
351 'optimization will slow down execution in proportion to '
350 'optimization will slow down execution in proportion to '
352 'the number of merges in the repository and the amount '
351 'the number of merges in the repository and the amount '
353 'of files in the repository; this slow down should not '
352 'of files in the repository; this slow down should not '
354 'be significant unless there are tens of thousands of '
353 'be significant unless there are tens of thousands of '
355 'files and thousands of merges'),
354 'files and thousands of merges'),
356 upgrademessage=_('deltas within internal storage will choose an '
355 upgrademessage=_('deltas within internal storage will choose an '
357 'optimal delta by computing deltas against multiple '
356 'optimal delta by computing deltas against multiple '
358 'parents; may slow down execution time '
357 'parents; may slow down execution time '
359 'significantly')))
358 'significantly')))
360
359
361 optimizations.append(improvement(
360 optimizations.append(improvement(
362 name='redeltaall',
361 name='redeltaall',
363 type=optimisation,
362 type=optimisation,
364 description=_('deltas within internal storage will always be '
363 description=_('deltas within internal storage will always be '
365 'recalculated without reusing prior deltas; this will '
364 'recalculated without reusing prior deltas; this will '
366 'likely make execution run several times slower; this '
365 'likely make execution run several times slower; this '
367 'optimization is typically not needed'),
366 'optimization is typically not needed'),
368 upgrademessage=_('deltas within internal storage will be fully '
367 upgrademessage=_('deltas within internal storage will be fully '
369 'recomputed; this will likely drastically slow down '
368 'recomputed; this will likely drastically slow down '
370 'execution time')))
369 'execution time')))
371
370
372 optimizations.append(improvement(
371 optimizations.append(improvement(
373 name='redeltafulladd',
372 name='redeltafulladd',
374 type=optimisation,
373 type=optimisation,
375 description=_('every revision will be re-added as if it was new '
374 description=_('every revision will be re-added as if it was new '
376 'content. It will go through the full storage '
375 'content. It will go through the full storage '
377 'mechanism giving extensions a chance to process it '
376 'mechanism giving extensions a chance to process it '
378 '(eg. lfs). This is similar to "redeltaall" but even '
377 '(eg. lfs). This is similar to "redeltaall" but even '
379 'slower since more logic is involved.'),
378 'slower since more logic is involved.'),
380 upgrademessage=_('each revision will be added as new content to the '
379 upgrademessage=_('each revision will be added as new content to the '
381 'internal storage; this will likely drastically slow '
380 'internal storage; this will likely drastically slow '
382 'down execution time, but some extensions might need '
381 'down execution time, but some extensions might need '
383 'it')))
382 'it')))
384
383
385 return optimizations
384 return optimizations
386
385
387 def determineactions(repo, deficiencies, sourcereqs, destreqs):
386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
388 """Determine upgrade actions that will be performed.
387 """Determine upgrade actions that will be performed.
389
388
390 Given a list of improvements as returned by ``finddeficiencies`` and
389 Given a list of improvements as returned by ``finddeficiencies`` and
391 ``findoptimizations``, determine the list of upgrade actions that
390 ``findoptimizations``, determine the list of upgrade actions that
392 will be performed.
391 will be performed.
393
392
394 The role of this function is to filter improvements if needed, apply
393 The role of this function is to filter improvements if needed, apply
395 recommended optimizations from the improvements list that make sense,
394 recommended optimizations from the improvements list that make sense,
396 etc.
395 etc.
397
396
398 Returns a list of action names.
397 Returns a list of action names.
399 """
398 """
400 newactions = []
399 newactions = []
401
400
402 knownreqs = supporteddestrequirements(repo)
401 knownreqs = supporteddestrequirements(repo)
403
402
404 for d in deficiencies:
403 for d in deficiencies:
405 name = d.name
404 name = d.name
406
405
407 # If the action is a requirement that doesn't show up in the
406 # If the action is a requirement that doesn't show up in the
408 # destination requirements, prune the action.
407 # destination requirements, prune the action.
409 if name in knownreqs and name not in destreqs:
408 if name in knownreqs and name not in destreqs:
410 continue
409 continue
411
410
412 newactions.append(d)
411 newactions.append(d)
413
412
414 # FUTURE consider adding some optimizations here for certain transitions.
413 # FUTURE consider adding some optimizations here for certain transitions.
415 # e.g. adding generaldelta could schedule parent redeltas.
414 # e.g. adding generaldelta could schedule parent redeltas.
416
415
417 return newactions
416 return newactions
418
417
419 def _revlogfrompath(repo, path):
418 def _revlogfrompath(repo, path):
420 """Obtain a revlog from a repo path.
419 """Obtain a revlog from a repo path.
421
420
422 An instance of the appropriate class is returned.
421 An instance of the appropriate class is returned.
423 """
422 """
424 if path == '00changelog.i':
423 if path == '00changelog.i':
425 return changelog.changelog(repo.svfs)
424 return changelog.changelog(repo.svfs)
426 elif path.endswith('00manifest.i'):
425 elif path.endswith('00manifest.i'):
427 mandir = path[:-len('00manifest.i')]
426 mandir = path[:-len('00manifest.i')]
428 return manifest.manifestrevlog(repo.svfs, dir=mandir)
427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
429 else:
428 else:
430 #reverse of "/".join(("data", path + ".i"))
429 #reverse of "/".join(("data", path + ".i"))
431 return filelog.filelog(repo.svfs, path[5:-2])
430 return filelog.filelog(repo.svfs, path[5:-2])
432
431
433 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
434 """Copy revlogs between 2 repos."""
433 """Copy revlogs between 2 repos."""
435 revcount = 0
434 revcount = 0
436 srcsize = 0
435 srcsize = 0
437 srcrawsize = 0
436 srcrawsize = 0
438 dstsize = 0
437 dstsize = 0
439 fcount = 0
438 fcount = 0
440 frevcount = 0
439 frevcount = 0
441 fsrcsize = 0
440 fsrcsize = 0
442 frawsize = 0
441 frawsize = 0
443 fdstsize = 0
442 fdstsize = 0
444 mcount = 0
443 mcount = 0
445 mrevcount = 0
444 mrevcount = 0
446 msrcsize = 0
445 msrcsize = 0
447 mrawsize = 0
446 mrawsize = 0
448 mdstsize = 0
447 mdstsize = 0
449 crevcount = 0
448 crevcount = 0
450 csrcsize = 0
449 csrcsize = 0
451 crawsize = 0
450 crawsize = 0
452 cdstsize = 0
451 cdstsize = 0
453
452
454 # Perform a pass to collect metadata. This validates we can open all
453 # Perform a pass to collect metadata. This validates we can open all
455 # source files and allows a unified progress bar to be displayed.
454 # source files and allows a unified progress bar to be displayed.
456 for unencoded, encoded, size in srcrepo.store.walk():
455 for unencoded, encoded, size in srcrepo.store.walk():
457 if unencoded.endswith('.d'):
456 if unencoded.endswith('.d'):
458 continue
457 continue
459
458
460 rl = _revlogfrompath(srcrepo, unencoded)
459 rl = _revlogfrompath(srcrepo, unencoded)
461 revcount += len(rl)
460 revcount += len(rl)
462
461
463 datasize = 0
462 datasize = 0
464 rawsize = 0
463 rawsize = 0
465 idx = rl.index
464 idx = rl.index
466 for rev in rl:
465 for rev in rl:
467 e = idx[rev]
466 e = idx[rev]
468 datasize += e[1]
467 datasize += e[1]
469 rawsize += e[2]
468 rawsize += e[2]
470
469
471 srcsize += datasize
470 srcsize += datasize
472 srcrawsize += rawsize
471 srcrawsize += rawsize
473
472
474 # This is for the separate progress bars.
473 # This is for the separate progress bars.
475 if isinstance(rl, changelog.changelog):
474 if isinstance(rl, changelog.changelog):
476 crevcount += len(rl)
475 crevcount += len(rl)
477 csrcsize += datasize
476 csrcsize += datasize
478 crawsize += rawsize
477 crawsize += rawsize
479 elif isinstance(rl, manifest.manifestrevlog):
478 elif isinstance(rl, manifest.manifestrevlog):
480 mcount += 1
479 mcount += 1
481 mrevcount += len(rl)
480 mrevcount += len(rl)
482 msrcsize += datasize
481 msrcsize += datasize
483 mrawsize += rawsize
482 mrawsize += rawsize
484 elif isinstance(rl, revlog.revlog):
483 elif isinstance(rl, revlog.revlog):
485 fcount += 1
484 fcount += 1
486 frevcount += len(rl)
485 frevcount += len(rl)
487 fsrcsize += datasize
486 fsrcsize += datasize
488 frawsize += rawsize
487 frawsize += rawsize
489
488
490 if not revcount:
489 if not revcount:
491 return
490 return
492
491
493 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
492 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
494 '%d in changelog)\n') %
493 '%d in changelog)\n') %
495 (revcount, frevcount, mrevcount, crevcount))
494 (revcount, frevcount, mrevcount, crevcount))
496 ui.write(_('migrating %s in store; %s tracked data\n') % (
495 ui.write(_('migrating %s in store; %s tracked data\n') % (
497 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
496 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
498
497
499 # Used to keep track of progress.
498 # Used to keep track of progress.
500 progress = []
499 progress = []
501 def oncopiedrevision(rl, rev, node):
500 def oncopiedrevision(rl, rev, node):
502 progress[1] += 1
501 progress[1] += 1
503 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
502 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
504
503
505 # Do the actual copying.
504 # Do the actual copying.
506 # FUTURE this operation can be farmed off to worker processes.
505 # FUTURE this operation can be farmed off to worker processes.
507 seen = set()
506 seen = set()
508 for unencoded, encoded, size in srcrepo.store.walk():
507 for unencoded, encoded, size in srcrepo.store.walk():
509 if unencoded.endswith('.d'):
508 if unencoded.endswith('.d'):
510 continue
509 continue
511
510
512 oldrl = _revlogfrompath(srcrepo, unencoded)
511 oldrl = _revlogfrompath(srcrepo, unencoded)
513 newrl = _revlogfrompath(dstrepo, unencoded)
512 newrl = _revlogfrompath(dstrepo, unencoded)
514
513
515 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
514 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
516 ui.write(_('finished migrating %d manifest revisions across %d '
515 ui.write(_('finished migrating %d manifest revisions across %d '
517 'manifests; change in size: %s\n') %
516 'manifests; change in size: %s\n') %
518 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
517 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
519
518
520 ui.write(_('migrating changelog containing %d revisions '
519 ui.write(_('migrating changelog containing %d revisions '
521 '(%s in store; %s tracked data)\n') %
520 '(%s in store; %s tracked data)\n') %
522 (crevcount, util.bytecount(csrcsize),
521 (crevcount, util.bytecount(csrcsize),
523 util.bytecount(crawsize)))
522 util.bytecount(crawsize)))
524 seen.add('c')
523 seen.add('c')
525 progress[:] = [_('changelog revisions'), 0, crevcount]
524 progress[:] = [_('changelog revisions'), 0, crevcount]
526 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
525 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
527 ui.write(_('finished migrating %d filelog revisions across %d '
526 ui.write(_('finished migrating %d filelog revisions across %d '
528 'filelogs; change in size: %s\n') %
527 'filelogs; change in size: %s\n') %
529 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
528 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
530
529
531 ui.write(_('migrating %d manifests containing %d revisions '
530 ui.write(_('migrating %d manifests containing %d revisions '
532 '(%s in store; %s tracked data)\n') %
531 '(%s in store; %s tracked data)\n') %
533 (mcount, mrevcount, util.bytecount(msrcsize),
532 (mcount, mrevcount, util.bytecount(msrcsize),
534 util.bytecount(mrawsize)))
533 util.bytecount(mrawsize)))
535 seen.add('m')
534 seen.add('m')
536 progress[:] = [_('manifest revisions'), 0, mrevcount]
535 progress[:] = [_('manifest revisions'), 0, mrevcount]
537 elif 'f' not in seen:
536 elif 'f' not in seen:
538 ui.write(_('migrating %d filelogs containing %d revisions '
537 ui.write(_('migrating %d filelogs containing %d revisions '
539 '(%s in store; %s tracked data)\n') %
538 '(%s in store; %s tracked data)\n') %
540 (fcount, frevcount, util.bytecount(fsrcsize),
539 (fcount, frevcount, util.bytecount(fsrcsize),
541 util.bytecount(frawsize)))
540 util.bytecount(frawsize)))
542 seen.add('f')
541 seen.add('f')
543 progress[:] = [_('file revisions'), 0, frevcount]
542 progress[:] = [_('file revisions'), 0, frevcount]
544
543
545 ui.progress(progress[0], progress[1], total=progress[2])
544 ui.progress(progress[0], progress[1], total=progress[2])
546
545
547 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
546 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
548 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
547 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
549 deltareuse=deltareuse,
548 deltareuse=deltareuse,
550 aggressivemergedeltas=aggressivemergedeltas)
549 aggressivemergedeltas=aggressivemergedeltas)
551
550
552 datasize = 0
551 datasize = 0
553 idx = newrl.index
552 idx = newrl.index
554 for rev in newrl:
553 for rev in newrl:
555 datasize += idx[rev][1]
554 datasize += idx[rev][1]
556
555
557 dstsize += datasize
556 dstsize += datasize
558
557
559 if isinstance(newrl, changelog.changelog):
558 if isinstance(newrl, changelog.changelog):
560 cdstsize += datasize
559 cdstsize += datasize
561 elif isinstance(newrl, manifest.manifestrevlog):
560 elif isinstance(newrl, manifest.manifestrevlog):
562 mdstsize += datasize
561 mdstsize += datasize
563 else:
562 else:
564 fdstsize += datasize
563 fdstsize += datasize
565
564
566 ui.progress(progress[0], None)
565 ui.progress(progress[0], None)
567
566
568 ui.write(_('finished migrating %d changelog revisions; change in size: '
567 ui.write(_('finished migrating %d changelog revisions; change in size: '
569 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
568 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
570
569
571 ui.write(_('finished migrating %d total revisions; total change in store '
570 ui.write(_('finished migrating %d total revisions; total change in store '
572 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
571 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
573
572
574 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
573 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
575 """Determine whether to copy a store file during upgrade.
574 """Determine whether to copy a store file during upgrade.
576
575
577 This function is called when migrating store files from ``srcrepo`` to
576 This function is called when migrating store files from ``srcrepo`` to
578 ``dstrepo`` as part of upgrading a repository.
577 ``dstrepo`` as part of upgrading a repository.
579
578
580 Args:
579 Args:
581 srcrepo: repo we are copying from
580 srcrepo: repo we are copying from
582 dstrepo: repo we are copying to
581 dstrepo: repo we are copying to
583 requirements: set of requirements for ``dstrepo``
582 requirements: set of requirements for ``dstrepo``
584 path: store file being examined
583 path: store file being examined
585 mode: the ``ST_MODE`` file type of ``path``
584 mode: the ``ST_MODE`` file type of ``path``
586 st: ``stat`` data structure for ``path``
585 st: ``stat`` data structure for ``path``
587
586
588 Function should return ``True`` if the file is to be copied.
587 Function should return ``True`` if the file is to be copied.
589 """
588 """
590 # Skip revlogs.
589 # Skip revlogs.
591 if path.endswith(('.i', '.d')):
590 if path.endswith(('.i', '.d')):
592 return False
591 return False
593 # Skip transaction related files.
592 # Skip transaction related files.
594 if path.startswith('undo'):
593 if path.startswith('undo'):
595 return False
594 return False
596 # Only copy regular files.
595 # Only copy regular files.
597 if mode != stat.S_IFREG:
596 if mode != stat.S_IFREG:
598 return False
597 return False
599 # Skip other skipped files.
598 # Skip other skipped files.
600 if path in ('lock', 'fncache'):
599 if path in ('lock', 'fncache'):
601 return False
600 return False
602
601
603 return True
602 return True
604
603
605 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
604 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
606 """Hook point for extensions to perform additional actions during upgrade.
605 """Hook point for extensions to perform additional actions during upgrade.
607
606
608 This function is called after revlogs and store files have been copied but
607 This function is called after revlogs and store files have been copied but
609 before the new store is swapped into the original location.
608 before the new store is swapped into the original location.
610 """
609 """
611
610
612 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
611 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
613 """Do the low-level work of upgrading a repository.
612 """Do the low-level work of upgrading a repository.
614
613
615 The upgrade is effectively performed as a copy between a source
614 The upgrade is effectively performed as a copy between a source
616 repository and a temporary destination repository.
615 repository and a temporary destination repository.
617
616
618 The source repository is unmodified for as long as possible so the
617 The source repository is unmodified for as long as possible so the
619 upgrade can abort at any time without causing loss of service for
618 upgrade can abort at any time without causing loss of service for
620 readers and without corrupting the source repository.
619 readers and without corrupting the source repository.
621 """
620 """
622 assert srcrepo.currentwlock()
621 assert srcrepo.currentwlock()
623 assert dstrepo.currentwlock()
622 assert dstrepo.currentwlock()
624
623
625 ui.write(_('(it is safe to interrupt this process any time before '
624 ui.write(_('(it is safe to interrupt this process any time before '
626 'data migration completes)\n'))
625 'data migration completes)\n'))
627
626
628 if 'redeltaall' in actions:
627 if 'redeltaall' in actions:
629 deltareuse = revlog.revlog.DELTAREUSENEVER
628 deltareuse = revlog.revlog.DELTAREUSENEVER
630 elif 'redeltaparent' in actions:
629 elif 'redeltaparent' in actions:
631 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
630 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
632 elif 'redeltamultibase' in actions:
631 elif 'redeltamultibase' in actions:
633 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
634 elif 'redeltafulladd' in actions:
633 elif 'redeltafulladd' in actions:
635 deltareuse = revlog.revlog.DELTAREUSEFULLADD
634 deltareuse = revlog.revlog.DELTAREUSEFULLADD
636 else:
635 else:
637 deltareuse = revlog.revlog.DELTAREUSEALWAYS
636 deltareuse = revlog.revlog.DELTAREUSEALWAYS
638
637
639 with dstrepo.transaction('upgrade') as tr:
638 with dstrepo.transaction('upgrade') as tr:
640 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
639 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
641 'redeltamultibase' in actions)
640 'redeltamultibase' in actions)
642
641
643 # Now copy other files in the store directory.
642 # Now copy other files in the store directory.
644 # The sorted() makes execution deterministic.
643 # The sorted() makes execution deterministic.
645 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
644 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
646 if not _filterstorefile(srcrepo, dstrepo, requirements,
645 if not _filterstorefile(srcrepo, dstrepo, requirements,
647 p, kind, st):
646 p, kind, st):
648 continue
647 continue
649
648
650 srcrepo.ui.write(_('copying %s\n') % p)
649 srcrepo.ui.write(_('copying %s\n') % p)
651 src = srcrepo.store.rawvfs.join(p)
650 src = srcrepo.store.rawvfs.join(p)
652 dst = dstrepo.store.rawvfs.join(p)
651 dst = dstrepo.store.rawvfs.join(p)
653 util.copyfile(src, dst, copystat=True)
652 util.copyfile(src, dst, copystat=True)
654
653
655 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
654 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
656
655
657 ui.write(_('data fully migrated to temporary repository\n'))
656 ui.write(_('data fully migrated to temporary repository\n'))
658
657
659 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
658 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
660 backupvfs = vfsmod.vfs(backuppath)
659 backupvfs = vfsmod.vfs(backuppath)
661
660
662 # Make a backup of requires file first, as it is the first to be modified.
661 # Make a backup of requires file first, as it is the first to be modified.
663 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
662 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
664
663
665 # We install an arbitrary requirement that clients must not support
664 # We install an arbitrary requirement that clients must not support
666 # as a mechanism to lock out new clients during the data swap. This is
665 # as a mechanism to lock out new clients during the data swap. This is
667 # better than allowing a client to continue while the repository is in
666 # better than allowing a client to continue while the repository is in
668 # an inconsistent state.
667 # an inconsistent state.
669 ui.write(_('marking source repository as being upgraded; clients will be '
668 ui.write(_('marking source repository as being upgraded; clients will be '
670 'unable to read from repository\n'))
669 'unable to read from repository\n'))
671 scmutil.writerequires(srcrepo.vfs,
670 scmutil.writerequires(srcrepo.vfs,
672 srcrepo.requirements | {'upgradeinprogress'})
671 srcrepo.requirements | {'upgradeinprogress'})
673
672
674 ui.write(_('starting in-place swap of repository data\n'))
673 ui.write(_('starting in-place swap of repository data\n'))
675 ui.write(_('replaced files will be backed up at %s\n') %
674 ui.write(_('replaced files will be backed up at %s\n') %
676 backuppath)
675 backuppath)
677
676
678 # Now swap in the new store directory. Doing it as a rename should make
677 # Now swap in the new store directory. Doing it as a rename should make
679 # the operation nearly instantaneous and atomic (at least in well-behaved
678 # the operation nearly instantaneous and atomic (at least in well-behaved
680 # environments).
679 # environments).
681 ui.write(_('replacing store...\n'))
680 ui.write(_('replacing store...\n'))
682 tstart = util.timer()
681 tstart = util.timer()
683 util.rename(srcrepo.spath, backupvfs.join('store'))
682 util.rename(srcrepo.spath, backupvfs.join('store'))
684 util.rename(dstrepo.spath, srcrepo.spath)
683 util.rename(dstrepo.spath, srcrepo.spath)
685 elapsed = util.timer() - tstart
684 elapsed = util.timer() - tstart
686 ui.write(_('store replacement complete; repository was inconsistent for '
685 ui.write(_('store replacement complete; repository was inconsistent for '
687 '%0.1fs\n') % elapsed)
686 '%0.1fs\n') % elapsed)
688
687
689 # We first write the requirements file. Any new requirements will lock
688 # We first write the requirements file. Any new requirements will lock
690 # out legacy clients.
689 # out legacy clients.
691 ui.write(_('finalizing requirements file and making repository readable '
690 ui.write(_('finalizing requirements file and making repository readable '
692 'again\n'))
691 'again\n'))
693 scmutil.writerequires(srcrepo.vfs, requirements)
692 scmutil.writerequires(srcrepo.vfs, requirements)
694
693
695 # The lock file from the old store won't be removed because nothing has a
694 # The lock file from the old store won't be removed because nothing has a
696 # reference to its new location. So clean it up manually. Alternatively, we
695 # reference to its new location. So clean it up manually. Alternatively, we
697 # could update srcrepo.svfs and other variables to point to the new
696 # could update srcrepo.svfs and other variables to point to the new
698 # location. This is simpler.
697 # location. This is simpler.
699 backupvfs.unlink('store/lock')
698 backupvfs.unlink('store/lock')
700
699
701 return backuppath
700 return backuppath
702
701
703 def upgraderepo(ui, repo, run=False, optimize=None):
702 def upgraderepo(ui, repo, run=False, optimize=None):
704 """Upgrade a repository in place."""
703 """Upgrade a repository in place."""
705 optimize = set(optimize or [])
704 optimize = set(optimize or [])
706 repo = repo.unfiltered()
705 repo = repo.unfiltered()
707
706
708 # Ensure the repository can be upgraded.
707 # Ensure the repository can be upgraded.
709 missingreqs = requiredsourcerequirements(repo) - repo.requirements
708 missingreqs = requiredsourcerequirements(repo) - repo.requirements
710 if missingreqs:
709 if missingreqs:
711 raise error.Abort(_('cannot upgrade repository; requirement '
710 raise error.Abort(_('cannot upgrade repository; requirement '
712 'missing: %s') % _(', ').join(sorted(missingreqs)))
711 'missing: %s') % _(', ').join(sorted(missingreqs)))
713
712
714 blockedreqs = blocksourcerequirements(repo) & repo.requirements
713 blockedreqs = blocksourcerequirements(repo) & repo.requirements
715 if blockedreqs:
714 if blockedreqs:
716 raise error.Abort(_('cannot upgrade repository; unsupported source '
715 raise error.Abort(_('cannot upgrade repository; unsupported source '
717 'requirement: %s') %
716 'requirement: %s') %
718 _(', ').join(sorted(blockedreqs)))
717 _(', ').join(sorted(blockedreqs)))
719
718
720 # FUTURE there is potentially a need to control the wanted requirements via
719 # FUTURE there is potentially a need to control the wanted requirements via
721 # command arguments or via an extension hook point.
720 # command arguments or via an extension hook point.
722 newreqs = localrepo.newreporequirements(repo)
721 newreqs = localrepo.newreporequirements(repo)
723 newreqs.update(preservedrequirements(repo))
722 newreqs.update(preservedrequirements(repo))
724
723
725 noremovereqs = (repo.requirements - newreqs -
724 noremovereqs = (repo.requirements - newreqs -
726 supportremovedrequirements(repo))
725 supportremovedrequirements(repo))
727 if noremovereqs:
726 if noremovereqs:
728 raise error.Abort(_('cannot upgrade repository; requirement would be '
727 raise error.Abort(_('cannot upgrade repository; requirement would be '
729 'removed: %s') % _(', ').join(sorted(noremovereqs)))
728 'removed: %s') % _(', ').join(sorted(noremovereqs)))
730
729
731 noaddreqs = (newreqs - repo.requirements -
730 noaddreqs = (newreqs - repo.requirements -
732 allowednewrequirements(repo))
731 allowednewrequirements(repo))
733 if noaddreqs:
732 if noaddreqs:
734 raise error.Abort(_('cannot upgrade repository; do not support adding '
733 raise error.Abort(_('cannot upgrade repository; do not support adding '
735 'requirement: %s') %
734 'requirement: %s') %
736 _(', ').join(sorted(noaddreqs)))
735 _(', ').join(sorted(noaddreqs)))
737
736
738 unsupportedreqs = newreqs - supporteddestrequirements(repo)
737 unsupportedreqs = newreqs - supporteddestrequirements(repo)
739 if unsupportedreqs:
738 if unsupportedreqs:
740 raise error.Abort(_('cannot upgrade repository; do not support '
739 raise error.Abort(_('cannot upgrade repository; do not support '
741 'destination requirement: %s') %
740 'destination requirement: %s') %
742 _(', ').join(sorted(unsupportedreqs)))
741 _(', ').join(sorted(unsupportedreqs)))
743
742
744 # Find and validate all improvements that can be made.
743 # Find and validate all improvements that can be made.
745 alloptimizations = findoptimizations(repo)
744 alloptimizations = findoptimizations(repo)
746
745
747 # Apply and Validate arguments.
746 # Apply and Validate arguments.
748 optimizations = []
747 optimizations = []
749 for o in alloptimizations:
748 for o in alloptimizations:
750 if o.name in optimize:
749 if o.name in optimize:
751 optimizations.append(o)
750 optimizations.append(o)
752 optimize.discard(o.name)
751 optimize.discard(o.name)
753
752
754 if optimize: # anything left is unknown
753 if optimize: # anything left is unknown
755 raise error.Abort(_('unknown optimization action requested: %s') %
754 raise error.Abort(_('unknown optimization action requested: %s') %
756 ', '.join(sorted(optimize)),
755 ', '.join(sorted(optimize)),
757 hint=_('run without arguments to see valid '
756 hint=_('run without arguments to see valid '
758 'optimizations'))
757 'optimizations'))
759
758
760 deficiencies = finddeficiencies(repo)
759 deficiencies = finddeficiencies(repo)
761 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
760 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
762 actions.extend(o for o in sorted(optimizations)
761 actions.extend(o for o in sorted(optimizations)
763 # determineactions could have added optimisation
762 # determineactions could have added optimisation
764 if o not in actions)
763 if o not in actions)
765
764
766 def printrequirements():
765 def printrequirements():
767 ui.write(_('requirements\n'))
766 ui.write(_('requirements\n'))
768 ui.write(_(' preserved: %s\n') %
767 ui.write(_(' preserved: %s\n') %
769 _(', ').join(sorted(newreqs & repo.requirements)))
768 _(', ').join(sorted(newreqs & repo.requirements)))
770
769
771 if repo.requirements - newreqs:
770 if repo.requirements - newreqs:
772 ui.write(_(' removed: %s\n') %
771 ui.write(_(' removed: %s\n') %
773 _(', ').join(sorted(repo.requirements - newreqs)))
772 _(', ').join(sorted(repo.requirements - newreqs)))
774
773
775 if newreqs - repo.requirements:
774 if newreqs - repo.requirements:
776 ui.write(_(' added: %s\n') %
775 ui.write(_(' added: %s\n') %
777 _(', ').join(sorted(newreqs - repo.requirements)))
776 _(', ').join(sorted(newreqs - repo.requirements)))
778
777
779 ui.write('\n')
778 ui.write('\n')
780
779
781 def printupgradeactions():
780 def printupgradeactions():
782 for a in actions:
781 for a in actions:
783 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
782 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
784
783
785 if not run:
784 if not run:
786 fromconfig = []
785 fromconfig = []
787 onlydefault = []
786 onlydefault = []
788
787
789 for d in deficiencies:
788 for d in deficiencies:
790 if d.fromconfig(repo):
789 if d.fromconfig(repo):
791 fromconfig.append(d)
790 fromconfig.append(d)
792 elif d.default:
791 elif d.default:
793 onlydefault.append(d)
792 onlydefault.append(d)
794
793
795 if fromconfig or onlydefault:
794 if fromconfig or onlydefault:
796
795
797 if fromconfig:
796 if fromconfig:
798 ui.write(_('repository lacks features recommended by '
797 ui.write(_('repository lacks features recommended by '
799 'current config options:\n\n'))
798 'current config options:\n\n'))
800 for i in fromconfig:
799 for i in fromconfig:
801 ui.write('%s\n %s\n\n' % (i.name, i.description))
800 ui.write('%s\n %s\n\n' % (i.name, i.description))
802
801
803 if onlydefault:
802 if onlydefault:
804 ui.write(_('repository lacks features used by the default '
803 ui.write(_('repository lacks features used by the default '
805 'config options:\n\n'))
804 'config options:\n\n'))
806 for i in onlydefault:
805 for i in onlydefault:
807 ui.write('%s\n %s\n\n' % (i.name, i.description))
806 ui.write('%s\n %s\n\n' % (i.name, i.description))
808
807
809 ui.write('\n')
808 ui.write('\n')
810 else:
809 else:
811 ui.write(_('(no feature deficiencies found in existing '
810 ui.write(_('(no feature deficiencies found in existing '
812 'repository)\n'))
811 'repository)\n'))
813
812
814 ui.write(_('performing an upgrade with "--run" will make the following '
813 ui.write(_('performing an upgrade with "--run" will make the following '
815 'changes:\n\n'))
814 'changes:\n\n'))
816
815
817 printrequirements()
816 printrequirements()
818 printupgradeactions()
817 printupgradeactions()
819
818
820 unusedoptimize = [i for i in alloptimizations if i not in actions]
819 unusedoptimize = [i for i in alloptimizations if i not in actions]
821
820
822 if unusedoptimize:
821 if unusedoptimize:
823 ui.write(_('additional optimizations are available by specifying '
822 ui.write(_('additional optimizations are available by specifying '
824 '"--optimize <name>":\n\n'))
823 '"--optimize <name>":\n\n'))
825 for i in unusedoptimize:
824 for i in unusedoptimize:
826 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
825 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
827 return
826 return
828
827
829 # Else we're in the run=true case.
828 # Else we're in the run=true case.
830 ui.write(_('upgrade will perform the following actions:\n\n'))
829 ui.write(_('upgrade will perform the following actions:\n\n'))
831 printrequirements()
830 printrequirements()
832 printupgradeactions()
831 printupgradeactions()
833
832
834 upgradeactions = [a.name for a in actions]
833 upgradeactions = [a.name for a in actions]
835
834
836 ui.write(_('beginning upgrade...\n'))
835 ui.write(_('beginning upgrade...\n'))
837 with repo.wlock(), repo.lock():
836 with repo.wlock(), repo.lock():
838 ui.write(_('repository locked and read-only\n'))
837 ui.write(_('repository locked and read-only\n'))
839 # Our strategy for upgrading the repository is to create a new,
838 # Our strategy for upgrading the repository is to create a new,
840 # temporary repository, write data to it, then do a swap of the
839 # temporary repository, write data to it, then do a swap of the
841 # data. There are less heavyweight ways to do this, but it is easier
840 # data. There are less heavyweight ways to do this, but it is easier
842 # to create a new repo object than to instantiate all the components
841 # to create a new repo object than to instantiate all the components
843 # (like the store) separately.
842 # (like the store) separately.
844 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
843 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
845 backuppath = None
844 backuppath = None
846 try:
845 try:
847 ui.write(_('creating temporary repository to stage migrated '
846 ui.write(_('creating temporary repository to stage migrated '
848 'data: %s\n') % tmppath)
847 'data: %s\n') % tmppath)
849
848
850 # clone ui without using ui.copy because repo.ui is protected
849 # clone ui without using ui.copy because repo.ui is protected
851 repoui = repo.ui.__class__(repo.ui)
850 repoui = repo.ui.__class__(repo.ui)
852 dstrepo = hg.repository(repoui, path=tmppath, create=True)
851 dstrepo = hg.repository(repoui, path=tmppath, create=True)
853
852
854 with dstrepo.wlock(), dstrepo.lock():
853 with dstrepo.wlock(), dstrepo.lock():
855 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
854 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
856 upgradeactions)
855 upgradeactions)
857
856
858 finally:
857 finally:
859 ui.write(_('removing temporary repository %s\n') % tmppath)
858 ui.write(_('removing temporary repository %s\n') % tmppath)
860 repo.vfs.rmtree(tmppath, forcibly=True)
859 repo.vfs.rmtree(tmppath, forcibly=True)
861
860
862 if backuppath:
861 if backuppath:
863 ui.warn(_('copy of old repository backed up at %s\n') %
862 ui.warn(_('copy of old repository backed up at %s\n') %
864 backuppath)
863 backuppath)
865 ui.warn(_('the old repository will not be deleted; remove '
864 ui.warn(_('the old repository will not be deleted; remove '
866 'it to free up disk space once the upgraded '
865 'it to free up disk space once the upgraded '
867 'repository is verified\n'))
866 'repository is verified\n'))
@@ -1,489 +1,422 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import binascii
3 import binascii
4 import itertools
4 import itertools
5 import silenttestrunner
5 import silenttestrunner
6 import unittest
6 import unittest
7
7
8 from mercurial import (
8 from mercurial import (
9 manifest as manifestmod,
9 manifest as manifestmod,
10 match as matchmod,
10 match as matchmod,
11 )
11 )
12
12
13 EMTPY_MANIFEST = b''
13 EMTPY_MANIFEST = b''
14 EMTPY_MANIFEST_V2 = b'\0\n'
15
14
16 HASH_1 = b'1' * 40
15 HASH_1 = b'1' * 40
17 BIN_HASH_1 = binascii.unhexlify(HASH_1)
16 BIN_HASH_1 = binascii.unhexlify(HASH_1)
18 HASH_2 = b'f' * 40
17 HASH_2 = b'f' * 40
19 BIN_HASH_2 = binascii.unhexlify(HASH_2)
18 BIN_HASH_2 = binascii.unhexlify(HASH_2)
20 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
19 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
21 BIN_HASH_3 = binascii.unhexlify(HASH_3)
20 BIN_HASH_3 = binascii.unhexlify(HASH_3)
22 A_SHORT_MANIFEST = (
21 A_SHORT_MANIFEST = (
23 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n'
22 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n'
24 b'foo\0%(hash1)s%(flag1)s\n'
23 b'foo\0%(hash1)s%(flag1)s\n'
25 ) % {b'hash1': HASH_1,
24 ) % {b'hash1': HASH_1,
26 b'flag1': b'',
25 b'flag1': b'',
27 b'hash2': HASH_2,
26 b'hash2': HASH_2,
28 b'flag2': b'l',
27 b'flag2': b'l',
29 }
28 }
30
29
31 # Same data as A_SHORT_MANIFEST
32 A_SHORT_MANIFEST_V2 = (
33 b'\0\n'
34 b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
35 b'\x00foo\0%(flag1)s\n%(hash1)s\n'
36 ) % {b'hash1': BIN_HASH_1,
37 b'flag1': b'',
38 b'hash2': BIN_HASH_2,
39 b'flag2': b'l',
40 }
41
42 # Same data as A_SHORT_MANIFEST
43 A_METADATA_MANIFEST = (
44 b'\0foo\0bar\n'
45 b'\x00bar/baz/qux.py\0%(flag2)s\0foo\0bar\n%(hash2)s\n' # flag and metadata
46 b'\x00foo\0%(flag1)s\0foo\n%(hash1)s\n' # no flag, but metadata
47 ) % {b'hash1': BIN_HASH_1,
48 b'flag1': b'',
49 b'hash2': BIN_HASH_2,
50 b'flag2': b'l',
51 }
52
53 A_STEM_COMPRESSED_MANIFEST = (
54 b'\0\n'
55 b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
56 b'\x04qux/foo.py\0%(flag1)s\n%(hash1)s\n' # simple case of 4 stem chars
57 b'\x0az.py\0%(flag1)s\n%(hash1)s\n' # tricky newline = 10 stem characters
58 b'\x00%(verylongdir)sx/x\0\n%(hash1)s\n'
59 b'\xffx/y\0\n%(hash2)s\n' # more than 255 stem chars
60 ) % {b'hash1': BIN_HASH_1,
61 b'flag1': b'',
62 b'hash2': BIN_HASH_2,
63 b'flag2': b'l',
64 b'verylongdir': 255 * b'x',
65 }
66
67 A_DEEPER_MANIFEST = (
30 A_DEEPER_MANIFEST = (
68 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
31 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
69 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
32 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
70 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
33 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
71 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
34 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
72 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
35 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
73 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
36 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
74 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
37 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
75 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
38 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
76 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
39 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
77 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
40 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
78 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
41 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
79 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
42 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
80 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
43 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
81 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
44 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
82 b'a/green.py\0%(hash1)s%(flag2)s\n'
45 b'a/green.py\0%(hash1)s%(flag2)s\n'
83 b'a/purple.py\0%(hash2)s%(flag1)s\n'
46 b'a/purple.py\0%(hash2)s%(flag1)s\n'
84 b'app.py\0%(hash3)s%(flag1)s\n'
47 b'app.py\0%(hash3)s%(flag1)s\n'
85 b'readme.txt\0%(hash2)s%(flag1)s\n'
48 b'readme.txt\0%(hash2)s%(flag1)s\n'
86 ) % {b'hash1': HASH_1,
49 ) % {b'hash1': HASH_1,
87 b'flag1': b'',
50 b'flag1': b'',
88 b'hash2': HASH_2,
51 b'hash2': HASH_2,
89 b'flag2': b'l',
52 b'flag2': b'l',
90 b'hash3': HASH_3,
53 b'hash3': HASH_3,
91 }
54 }
92
55
93 HUGE_MANIFEST_ENTRIES = 200001
56 HUGE_MANIFEST_ENTRIES = 200001
94
57
95 izip = getattr(itertools, 'izip', zip)
58 izip = getattr(itertools, 'izip', zip)
96 if 'xrange' not in globals():
59 if 'xrange' not in globals():
97 xrange = range
60 xrange = range
98
61
99 A_HUGE_MANIFEST = b''.join(sorted(
62 A_HUGE_MANIFEST = b''.join(sorted(
100 b'file%d\0%s%s\n' % (i, h, f) for i, h, f in
63 b'file%d\0%s%s\n' % (i, h, f) for i, h, f in
101 izip(xrange(200001),
64 izip(xrange(200001),
102 itertools.cycle((HASH_1, HASH_2)),
65 itertools.cycle((HASH_1, HASH_2)),
103 itertools.cycle((b'', b'x', b'l')))))
66 itertools.cycle((b'', b'x', b'l')))))
104
67
105 class basemanifesttests(object):
68 class basemanifesttests(object):
106 def parsemanifest(self, text):
69 def parsemanifest(self, text):
107 raise NotImplementedError('parsemanifest not implemented by test case')
70 raise NotImplementedError('parsemanifest not implemented by test case')
108
71
109 def testEmptyManifest(self):
72 def testEmptyManifest(self):
110 m = self.parsemanifest(EMTPY_MANIFEST)
73 m = self.parsemanifest(EMTPY_MANIFEST)
111 self.assertEqual(0, len(m))
74 self.assertEqual(0, len(m))
112 self.assertEqual([], list(m))
75 self.assertEqual([], list(m))
113
76
114 def testEmptyManifestv2(self):
115 m = self.parsemanifest(EMTPY_MANIFEST_V2)
116 self.assertEqual(0, len(m))
117 self.assertEqual([], list(m))
118
119 def testManifest(self):
77 def testManifest(self):
120 m = self.parsemanifest(A_SHORT_MANIFEST)
78 m = self.parsemanifest(A_SHORT_MANIFEST)
121 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
79 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
122 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
80 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
123 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
81 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
124 self.assertEqual(BIN_HASH_1, m[b'foo'])
82 self.assertEqual(BIN_HASH_1, m[b'foo'])
125 self.assertEqual(b'', m.flags(b'foo'))
83 self.assertEqual(b'', m.flags(b'foo'))
126 with self.assertRaises(KeyError):
84 with self.assertRaises(KeyError):
127 m[b'wat']
85 m[b'wat']
128
86
129 def testParseManifestV2(self):
130 m1 = self.parsemanifest(A_SHORT_MANIFEST)
131 m2 = self.parsemanifest(A_SHORT_MANIFEST_V2)
132 # Should have same content as A_SHORT_MANIFEST
133 self.assertEqual(m1.text(), m2.text())
134
135 def testParseManifestMetadata(self):
136 # Metadata is for future-proofing and should be accepted but ignored
137 m = self.parsemanifest(A_METADATA_MANIFEST)
138 self.assertEqual(A_SHORT_MANIFEST, m.text())
139
140 def testParseManifestStemCompression(self):
141 m = self.parsemanifest(A_STEM_COMPRESSED_MANIFEST)
142 self.assertIn(b'bar/baz/qux.py', m)
143 self.assertIn(b'bar/qux/foo.py', m)
144 self.assertIn(b'bar/qux/foz.py', m)
145 self.assertIn(256 * b'x' + b'/x', m)
146 self.assertIn(256 * b'x' + b'/y', m)
147 self.assertEqual(A_STEM_COMPRESSED_MANIFEST, m.text(usemanifestv2=True))
148
149 def testTextV2(self):
150 m1 = self.parsemanifest(A_SHORT_MANIFEST)
151 v2text = m1.text(usemanifestv2=True)
152 self.assertEqual(A_SHORT_MANIFEST_V2, v2text)
153
154 def testSetItem(self):
87 def testSetItem(self):
155 want = BIN_HASH_1
88 want = BIN_HASH_1
156
89
157 m = self.parsemanifest(EMTPY_MANIFEST)
90 m = self.parsemanifest(EMTPY_MANIFEST)
158 m[b'a'] = want
91 m[b'a'] = want
159 self.assertIn(b'a', m)
92 self.assertIn(b'a', m)
160 self.assertEqual(want, m[b'a'])
93 self.assertEqual(want, m[b'a'])
161 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
94 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
162
95
163 m = self.parsemanifest(A_SHORT_MANIFEST)
96 m = self.parsemanifest(A_SHORT_MANIFEST)
164 m[b'a'] = want
97 m[b'a'] = want
165 self.assertEqual(want, m[b'a'])
98 self.assertEqual(want, m[b'a'])
166 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST,
99 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST,
167 m.text())
100 m.text())
168
101
169 def testSetFlag(self):
102 def testSetFlag(self):
170 want = b'x'
103 want = b'x'
171
104
172 m = self.parsemanifest(EMTPY_MANIFEST)
105 m = self.parsemanifest(EMTPY_MANIFEST)
173 # first add a file; a file-less flag makes no sense
106 # first add a file; a file-less flag makes no sense
174 m[b'a'] = BIN_HASH_1
107 m[b'a'] = BIN_HASH_1
175 m.setflag(b'a', want)
108 m.setflag(b'a', want)
176 self.assertEqual(want, m.flags(b'a'))
109 self.assertEqual(want, m.flags(b'a'))
177 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
110 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
178
111
179 m = self.parsemanifest(A_SHORT_MANIFEST)
112 m = self.parsemanifest(A_SHORT_MANIFEST)
180 # first add a file; a file-less flag makes no sense
113 # first add a file; a file-less flag makes no sense
181 m[b'a'] = BIN_HASH_1
114 m[b'a'] = BIN_HASH_1
182 m.setflag(b'a', want)
115 m.setflag(b'a', want)
183 self.assertEqual(want, m.flags(b'a'))
116 self.assertEqual(want, m.flags(b'a'))
184 self.assertEqual(b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST,
117 self.assertEqual(b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST,
185 m.text())
118 m.text())
186
119
187 def testCopy(self):
120 def testCopy(self):
188 m = self.parsemanifest(A_SHORT_MANIFEST)
121 m = self.parsemanifest(A_SHORT_MANIFEST)
189 m[b'a'] = BIN_HASH_1
122 m[b'a'] = BIN_HASH_1
190 m2 = m.copy()
123 m2 = m.copy()
191 del m
124 del m
192 del m2 # make sure we don't double free() anything
125 del m2 # make sure we don't double free() anything
193
126
194 def testCompaction(self):
127 def testCompaction(self):
195 unhex = binascii.unhexlify
128 unhex = binascii.unhexlify
196 h1, h2 = unhex(HASH_1), unhex(HASH_2)
129 h1, h2 = unhex(HASH_1), unhex(HASH_2)
197 m = self.parsemanifest(A_SHORT_MANIFEST)
130 m = self.parsemanifest(A_SHORT_MANIFEST)
198 m[b'alpha'] = h1
131 m[b'alpha'] = h1
199 m[b'beta'] = h2
132 m[b'beta'] = h2
200 del m[b'foo']
133 del m[b'foo']
201 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
134 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
202 HASH_1, HASH_2, HASH_2)
135 HASH_1, HASH_2, HASH_2)
203 self.assertEqual(want, m.text())
136 self.assertEqual(want, m.text())
204 self.assertEqual(3, len(m))
137 self.assertEqual(3, len(m))
205 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
138 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
206 self.assertEqual(h1, m[b'alpha'])
139 self.assertEqual(h1, m[b'alpha'])
207 self.assertEqual(h2, m[b'bar/baz/qux.py'])
140 self.assertEqual(h2, m[b'bar/baz/qux.py'])
208 self.assertEqual(h2, m[b'beta'])
141 self.assertEqual(h2, m[b'beta'])
209 self.assertEqual(b'', m.flags(b'alpha'))
142 self.assertEqual(b'', m.flags(b'alpha'))
210 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
143 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
211 self.assertEqual(b'', m.flags(b'beta'))
144 self.assertEqual(b'', m.flags(b'beta'))
212 with self.assertRaises(KeyError):
145 with self.assertRaises(KeyError):
213 m[b'foo']
146 m[b'foo']
214
147
215 def testSetGetNodeSuffix(self):
148 def testSetGetNodeSuffix(self):
216 clean = self.parsemanifest(A_SHORT_MANIFEST)
149 clean = self.parsemanifest(A_SHORT_MANIFEST)
217 m = self.parsemanifest(A_SHORT_MANIFEST)
150 m = self.parsemanifest(A_SHORT_MANIFEST)
218 h = m[b'foo']
151 h = m[b'foo']
219 f = m.flags(b'foo')
152 f = m.flags(b'foo')
220 want = h + b'a'
153 want = h + b'a'
221 # Merge code wants to set 21-byte fake hashes at times
154 # Merge code wants to set 21-byte fake hashes at times
222 m[b'foo'] = want
155 m[b'foo'] = want
223 self.assertEqual(want, m[b'foo'])
156 self.assertEqual(want, m[b'foo'])
224 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
157 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
225 (b'foo', BIN_HASH_1 + b'a')],
158 (b'foo', BIN_HASH_1 + b'a')],
226 list(m.items()))
159 list(m.items()))
227 # Sometimes it even tries a 22-byte fake hash, but we can
160 # Sometimes it even tries a 22-byte fake hash, but we can
228 # return 21 and it'll work out
161 # return 21 and it'll work out
229 m[b'foo'] = want + b'+'
162 m[b'foo'] = want + b'+'
230 self.assertEqual(want, m[b'foo'])
163 self.assertEqual(want, m[b'foo'])
231 # make sure the suffix survives a copy
164 # make sure the suffix survives a copy
232 match = matchmod.match(b'', b'', [b're:foo'])
165 match = matchmod.match(b'', b'', [b're:foo'])
233 m2 = m.matches(match)
166 m2 = m.matches(match)
234 self.assertEqual(want, m2[b'foo'])
167 self.assertEqual(want, m2[b'foo'])
235 self.assertEqual(1, len(m2))
168 self.assertEqual(1, len(m2))
236 m2 = m.copy()
169 m2 = m.copy()
237 self.assertEqual(want, m2[b'foo'])
170 self.assertEqual(want, m2[b'foo'])
238 # suffix with iteration
171 # suffix with iteration
239 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
172 self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
240 (b'foo', want)],
173 (b'foo', want)],
241 list(m.items()))
174 list(m.items()))
242
175
243 # shows up in diff
176 # shows up in diff
244 self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
177 self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
245 self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
178 self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
246
179
247 def testMatchException(self):
180 def testMatchException(self):
248 m = self.parsemanifest(A_SHORT_MANIFEST)
181 m = self.parsemanifest(A_SHORT_MANIFEST)
249 match = matchmod.match(b'', b'', [b're:.*'])
182 match = matchmod.match(b'', b'', [b're:.*'])
250 def filt(path):
183 def filt(path):
251 if path == b'foo':
184 if path == b'foo':
252 assert False
185 assert False
253 return True
186 return True
254 match.matchfn = filt
187 match.matchfn = filt
255 with self.assertRaises(AssertionError):
188 with self.assertRaises(AssertionError):
256 m.matches(match)
189 m.matches(match)
257
190
258 def testRemoveItem(self):
191 def testRemoveItem(self):
259 m = self.parsemanifest(A_SHORT_MANIFEST)
192 m = self.parsemanifest(A_SHORT_MANIFEST)
260 del m[b'foo']
193 del m[b'foo']
261 with self.assertRaises(KeyError):
194 with self.assertRaises(KeyError):
262 m[b'foo']
195 m[b'foo']
263 self.assertEqual(1, len(m))
196 self.assertEqual(1, len(m))
264 self.assertEqual(1, len(list(m)))
197 self.assertEqual(1, len(list(m)))
265 # now restore and make sure everything works right
198 # now restore and make sure everything works right
266 m[b'foo'] = b'a' * 20
199 m[b'foo'] = b'a' * 20
267 self.assertEqual(2, len(m))
200 self.assertEqual(2, len(m))
268 self.assertEqual(2, len(list(m)))
201 self.assertEqual(2, len(list(m)))
269
202
270 def testManifestDiff(self):
203 def testManifestDiff(self):
271 MISSING = (None, b'')
204 MISSING = (None, b'')
272 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
205 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
273 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
206 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
274 left = self.parsemanifest(
207 left = self.parsemanifest(
275 A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl)
208 A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl)
276 right = self.parsemanifest(A_SHORT_MANIFEST + addr)
209 right = self.parsemanifest(A_SHORT_MANIFEST + addr)
277 want = {
210 want = {
278 b'foo': ((BIN_HASH_3, b'x'),
211 b'foo': ((BIN_HASH_3, b'x'),
279 (BIN_HASH_1, b'')),
212 (BIN_HASH_1, b'')),
280 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
213 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
281 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
214 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
282 }
215 }
283 self.assertEqual(want, left.diff(right))
216 self.assertEqual(want, left.diff(right))
284
217
285 want = {
218 want = {
286 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
219 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
287 b'foo': (MISSING, (BIN_HASH_3, b'x')),
220 b'foo': (MISSING, (BIN_HASH_3, b'x')),
288 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
221 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
289 }
222 }
290 self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
223 self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
291
224
292 want = {
225 want = {
293 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
226 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
294 b'foo': ((BIN_HASH_3, b'x'), MISSING),
227 b'foo': ((BIN_HASH_3, b'x'), MISSING),
295 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
228 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
296 }
229 }
297 self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
230 self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
298 copy = right.copy()
231 copy = right.copy()
299 del copy[b'z-only-in-right']
232 del copy[b'z-only-in-right']
300 del right[b'foo']
233 del right[b'foo']
301 want = {
234 want = {
302 b'foo': (MISSING, (BIN_HASH_1, b'')),
235 b'foo': (MISSING, (BIN_HASH_1, b'')),
303 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
236 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
304 }
237 }
305 self.assertEqual(want, right.diff(copy))
238 self.assertEqual(want, right.diff(copy))
306
239
307 short = self.parsemanifest(A_SHORT_MANIFEST)
240 short = self.parsemanifest(A_SHORT_MANIFEST)
308 pruned = short.copy()
241 pruned = short.copy()
309 del pruned[b'foo']
242 del pruned[b'foo']
310 want = {
243 want = {
311 b'foo': ((BIN_HASH_1, b''), MISSING),
244 b'foo': ((BIN_HASH_1, b''), MISSING),
312 }
245 }
313 self.assertEqual(want, short.diff(pruned))
246 self.assertEqual(want, short.diff(pruned))
314 want = {
247 want = {
315 b'foo': (MISSING, (BIN_HASH_1, b'')),
248 b'foo': (MISSING, (BIN_HASH_1, b'')),
316 }
249 }
317 self.assertEqual(want, pruned.diff(short))
250 self.assertEqual(want, pruned.diff(short))
318 want = {
251 want = {
319 b'bar/baz/qux.py': None,
252 b'bar/baz/qux.py': None,
320 b'foo': (MISSING, (BIN_HASH_1, b'')),
253 b'foo': (MISSING, (BIN_HASH_1, b'')),
321 }
254 }
322 self.assertEqual(want, pruned.diff(short, clean=True))
255 self.assertEqual(want, pruned.diff(short, clean=True))
323
256
324 def testReversedLines(self):
257 def testReversedLines(self):
325 backwards = b''.join(
258 backwards = b''.join(
326 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l)
259 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l)
327 try:
260 try:
328 self.parsemanifest(backwards)
261 self.parsemanifest(backwards)
329 self.fail('Should have raised ValueError')
262 self.fail('Should have raised ValueError')
330 except ValueError as v:
263 except ValueError as v:
331 self.assertIn('Manifest lines not in sorted order.', str(v))
264 self.assertIn('Manifest lines not in sorted order.', str(v))
332
265
333 def testNoTerminalNewline(self):
266 def testNoTerminalNewline(self):
334 try:
267 try:
335 self.parsemanifest(A_SHORT_MANIFEST + b'wat')
268 self.parsemanifest(A_SHORT_MANIFEST + b'wat')
336 self.fail('Should have raised ValueError')
269 self.fail('Should have raised ValueError')
337 except ValueError as v:
270 except ValueError as v:
338 self.assertIn('Manifest did not end in a newline.', str(v))
271 self.assertIn('Manifest did not end in a newline.', str(v))
339
272
340 def testNoNewLineAtAll(self):
273 def testNoNewLineAtAll(self):
341 try:
274 try:
342 self.parsemanifest(b'wat')
275 self.parsemanifest(b'wat')
343 self.fail('Should have raised ValueError')
276 self.fail('Should have raised ValueError')
344 except ValueError as v:
277 except ValueError as v:
345 self.assertIn('Manifest did not end in a newline.', str(v))
278 self.assertIn('Manifest did not end in a newline.', str(v))
346
279
347 def testHugeManifest(self):
280 def testHugeManifest(self):
348 m = self.parsemanifest(A_HUGE_MANIFEST)
281 m = self.parsemanifest(A_HUGE_MANIFEST)
349 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
282 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
350 self.assertEqual(len(m), len(list(m)))
283 self.assertEqual(len(m), len(list(m)))
351
284
352 def testMatchesMetadata(self):
285 def testMatchesMetadata(self):
353 '''Tests matches() for a few specific files to make sure that both
286 '''Tests matches() for a few specific files to make sure that both
354 the set of files as well as their flags and nodeids are correct in
287 the set of files as well as their flags and nodeids are correct in
355 the resulting manifest.'''
288 the resulting manifest.'''
356 m = self.parsemanifest(A_HUGE_MANIFEST)
289 m = self.parsemanifest(A_HUGE_MANIFEST)
357
290
358 match = matchmod.match(b'/', b'',
291 match = matchmod.match(b'/', b'',
359 [b'file1', b'file200', b'file300'], exact=True)
292 [b'file1', b'file200', b'file300'], exact=True)
360 m2 = m.matches(match)
293 m2 = m.matches(match)
361
294
362 w = (b'file1\0%sx\n'
295 w = (b'file1\0%sx\n'
363 b'file200\0%sl\n'
296 b'file200\0%sl\n'
364 b'file300\0%s\n') % (HASH_2, HASH_1, HASH_1)
297 b'file300\0%s\n') % (HASH_2, HASH_1, HASH_1)
365 self.assertEqual(w, m2.text())
298 self.assertEqual(w, m2.text())
366
299
367 def testMatchesNonexistentFile(self):
300 def testMatchesNonexistentFile(self):
368 '''Tests matches() for a small set of specific files, including one
301 '''Tests matches() for a small set of specific files, including one
369 nonexistent file to make sure in only matches against existing files.
302 nonexistent file to make sure in only matches against existing files.
370 '''
303 '''
371 m = self.parsemanifest(A_DEEPER_MANIFEST)
304 m = self.parsemanifest(A_DEEPER_MANIFEST)
372
305
373 match = matchmod.match(b'/', b'',
306 match = matchmod.match(b'/', b'',
374 [b'a/b/c/bar.txt', b'a/b/d/qux.py',
307 [b'a/b/c/bar.txt', b'a/b/d/qux.py',
375 b'readme.txt', b'nonexistent'],
308 b'readme.txt', b'nonexistent'],
376 exact=True)
309 exact=True)
377 m2 = m.matches(match)
310 m2 = m.matches(match)
378
311
379 self.assertEqual(
312 self.assertEqual(
380 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'],
313 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'],
381 m2.keys())
314 m2.keys())
382
315
383 def testMatchesNonexistentDirectory(self):
316 def testMatchesNonexistentDirectory(self):
384 '''Tests matches() for a relpath match on a directory that doesn't
317 '''Tests matches() for a relpath match on a directory that doesn't
385 actually exist.'''
318 actually exist.'''
386 m = self.parsemanifest(A_DEEPER_MANIFEST)
319 m = self.parsemanifest(A_DEEPER_MANIFEST)
387
320
388 match = matchmod.match(b'/', b'', [b'a/f'], default=b'relpath')
321 match = matchmod.match(b'/', b'', [b'a/f'], default=b'relpath')
389 m2 = m.matches(match)
322 m2 = m.matches(match)
390
323
391 self.assertEqual([], m2.keys())
324 self.assertEqual([], m2.keys())
392
325
393 def testMatchesExactLarge(self):
326 def testMatchesExactLarge(self):
394 '''Tests matches() for files matching a large list of exact files.
327 '''Tests matches() for files matching a large list of exact files.
395 '''
328 '''
396 m = self.parsemanifest(A_HUGE_MANIFEST)
329 m = self.parsemanifest(A_HUGE_MANIFEST)
397
330
398 flist = m.keys()[80:300]
331 flist = m.keys()[80:300]
399 match = matchmod.match(b'/', b'', flist, exact=True)
332 match = matchmod.match(b'/', b'', flist, exact=True)
400 m2 = m.matches(match)
333 m2 = m.matches(match)
401
334
402 self.assertEqual(flist, m2.keys())
335 self.assertEqual(flist, m2.keys())
403
336
404 def testMatchesFull(self):
337 def testMatchesFull(self):
405 '''Tests matches() for what should be a full match.'''
338 '''Tests matches() for what should be a full match.'''
406 m = self.parsemanifest(A_DEEPER_MANIFEST)
339 m = self.parsemanifest(A_DEEPER_MANIFEST)
407
340
408 match = matchmod.match(b'/', b'', [b''])
341 match = matchmod.match(b'/', b'', [b''])
409 m2 = m.matches(match)
342 m2 = m.matches(match)
410
343
411 self.assertEqual(m.keys(), m2.keys())
344 self.assertEqual(m.keys(), m2.keys())
412
345
413 def testMatchesDirectory(self):
346 def testMatchesDirectory(self):
414 '''Tests matches() on a relpath match on a directory, which should
347 '''Tests matches() on a relpath match on a directory, which should
415 match against all files within said directory.'''
348 match against all files within said directory.'''
416 m = self.parsemanifest(A_DEEPER_MANIFEST)
349 m = self.parsemanifest(A_DEEPER_MANIFEST)
417
350
418 match = matchmod.match(b'/', b'', [b'a/b'], default=b'relpath')
351 match = matchmod.match(b'/', b'', [b'a/b'], default=b'relpath')
419 m2 = m.matches(match)
352 m2 = m.matches(match)
420
353
421 self.assertEqual([
354 self.assertEqual([
422 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
355 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
423 b'a/b/c/foo.txt',
356 b'a/b/c/foo.txt',
424 b'a/b/d/baz.py', b'a/b/d/qux.py', b'a/b/d/ten.txt', b'a/b/dog.py',
357 b'a/b/d/baz.py', b'a/b/d/qux.py', b'a/b/d/ten.txt', b'a/b/dog.py',
425 b'a/b/fish.py'], m2.keys())
358 b'a/b/fish.py'], m2.keys())
426
359
427 def testMatchesExactPath(self):
360 def testMatchesExactPath(self):
428 '''Tests matches() on an exact match on a directory, which should
361 '''Tests matches() on an exact match on a directory, which should
429 result in an empty manifest because you can't perform an exact match
362 result in an empty manifest because you can't perform an exact match
430 against a directory.'''
363 against a directory.'''
431 m = self.parsemanifest(A_DEEPER_MANIFEST)
364 m = self.parsemanifest(A_DEEPER_MANIFEST)
432
365
433 match = matchmod.match(b'/', b'', [b'a/b'], exact=True)
366 match = matchmod.match(b'/', b'', [b'a/b'], exact=True)
434 m2 = m.matches(match)
367 m2 = m.matches(match)
435
368
436 self.assertEqual([], m2.keys())
369 self.assertEqual([], m2.keys())
437
370
438 def testMatchesCwd(self):
371 def testMatchesCwd(self):
439 '''Tests matches() on a relpath match with the current directory ('.')
372 '''Tests matches() on a relpath match with the current directory ('.')
440 when not in the root directory.'''
373 when not in the root directory.'''
441 m = self.parsemanifest(A_DEEPER_MANIFEST)
374 m = self.parsemanifest(A_DEEPER_MANIFEST)
442
375
443 match = matchmod.match(b'/', b'a/b', [b'.'], default=b'relpath')
376 match = matchmod.match(b'/', b'a/b', [b'.'], default=b'relpath')
444 m2 = m.matches(match)
377 m2 = m.matches(match)
445
378
446 self.assertEqual([
379 self.assertEqual([
447 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
380 b'a/b/c/bar.py', b'a/b/c/bar.txt', b'a/b/c/foo.py',
448 b'a/b/c/foo.txt', b'a/b/d/baz.py', b'a/b/d/qux.py',
381 b'a/b/c/foo.txt', b'a/b/d/baz.py', b'a/b/d/qux.py',
449 b'a/b/d/ten.txt', b'a/b/dog.py', b'a/b/fish.py'], m2.keys())
382 b'a/b/d/ten.txt', b'a/b/dog.py', b'a/b/fish.py'], m2.keys())
450
383
451 def testMatchesWithPattern(self):
384 def testMatchesWithPattern(self):
452 '''Tests matches() for files matching a pattern that reside
385 '''Tests matches() for files matching a pattern that reside
453 deeper than the specified directory.'''
386 deeper than the specified directory.'''
454 m = self.parsemanifest(A_DEEPER_MANIFEST)
387 m = self.parsemanifest(A_DEEPER_MANIFEST)
455
388
456 match = matchmod.match(b'/', b'', [b'a/b/*/*.txt'])
389 match = matchmod.match(b'/', b'', [b'a/b/*/*.txt'])
457 m2 = m.matches(match)
390 m2 = m.matches(match)
458
391
459 self.assertEqual(
392 self.assertEqual(
460 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'],
393 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'],
461 m2.keys())
394 m2.keys())
462
395
463 class testmanifestdict(unittest.TestCase, basemanifesttests):
396 class testmanifestdict(unittest.TestCase, basemanifesttests):
464 def parsemanifest(self, text):
397 def parsemanifest(self, text):
465 return manifestmod.manifestdict(text)
398 return manifestmod.manifestdict(text)
466
399
467 class testtreemanifest(unittest.TestCase, basemanifesttests):
400 class testtreemanifest(unittest.TestCase, basemanifesttests):
468 def parsemanifest(self, text):
401 def parsemanifest(self, text):
469 return manifestmod.treemanifest(b'', text)
402 return manifestmod.treemanifest(b'', text)
470
403
471 def testWalkSubtrees(self):
404 def testWalkSubtrees(self):
472 m = self.parsemanifest(A_DEEPER_MANIFEST)
405 m = self.parsemanifest(A_DEEPER_MANIFEST)
473
406
474 dirs = [s._dir for s in m.walksubtrees()]
407 dirs = [s._dir for s in m.walksubtrees()]
475 self.assertEqual(
408 self.assertEqual(
476 sorted([
409 sorted([
477 b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']),
410 b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']),
478 sorted(dirs)
411 sorted(dirs)
479 )
412 )
480
413
481 match = matchmod.match(b'/', b'', [b'path:a/b/'])
414 match = matchmod.match(b'/', b'', [b'path:a/b/'])
482 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
415 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
483 self.assertEqual(
416 self.assertEqual(
484 sorted([b'a/b/', b'a/b/c/', b'a/b/d/']),
417 sorted([b'a/b/', b'a/b/c/', b'a/b/d/']),
485 sorted(dirs)
418 sorted(dirs)
486 )
419 )
487
420
488 if __name__ == '__main__':
421 if __name__ == '__main__':
489 silenttestrunner.main(__name__)
422 silenttestrunner.main(__name__)
@@ -1,702 +1,697 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > share =
3 > share =
4 > EOF
4 > EOF
5
5
6 store and revlogv1 are required in source
6 store and revlogv1 are required in source
7
7
8 $ hg --config format.usestore=false init no-store
8 $ hg --config format.usestore=false init no-store
9 $ hg -R no-store debugupgraderepo
9 $ hg -R no-store debugupgraderepo
10 abort: cannot upgrade repository; requirement missing: store
10 abort: cannot upgrade repository; requirement missing: store
11 [255]
11 [255]
12
12
13 $ hg init no-revlogv1
13 $ hg init no-revlogv1
14 $ cat > no-revlogv1/.hg/requires << EOF
14 $ cat > no-revlogv1/.hg/requires << EOF
15 > dotencode
15 > dotencode
16 > fncache
16 > fncache
17 > generaldelta
17 > generaldelta
18 > store
18 > store
19 > EOF
19 > EOF
20
20
21 $ hg -R no-revlogv1 debugupgraderepo
21 $ hg -R no-revlogv1 debugupgraderepo
22 abort: cannot upgrade repository; requirement missing: revlogv1
22 abort: cannot upgrade repository; requirement missing: revlogv1
23 [255]
23 [255]
24
24
25 Cannot upgrade shared repositories
25 Cannot upgrade shared repositories
26
26
27 $ hg init share-parent
27 $ hg init share-parent
28 $ hg -q share share-parent share-child
28 $ hg -q share share-parent share-child
29
29
30 $ hg -R share-child debugupgraderepo
30 $ hg -R share-child debugupgraderepo
31 abort: cannot upgrade repository; unsupported source requirement: shared
31 abort: cannot upgrade repository; unsupported source requirement: shared
32 [255]
32 [255]
33
33
34 Do not yet support upgrading manifestv2 and treemanifest repos
34 Do not yet support upgrading treemanifest repos
35
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
40
35
41 $ hg --config experimental.treemanifest=true init treemanifest
36 $ hg --config experimental.treemanifest=true init treemanifest
42 $ hg -R treemanifest debugupgraderepo
37 $ hg -R treemanifest debugupgraderepo
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
38 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 [255]
39 [255]
45
40
46 Cannot add manifestv2 or treemanifest requirement during upgrade
41 Cannot add treemanifest requirement during upgrade
47
42
48 $ hg init disallowaddedreq
43 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
44 $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
45 abort: cannot upgrade repository; do not support adding requirement: treemanifest
51 [255]
46 [255]
52
47
53 An upgrade of a repository created with recommended settings only suggests optimizations
48 An upgrade of a repository created with recommended settings only suggests optimizations
54
49
55 $ hg init empty
50 $ hg init empty
56 $ cd empty
51 $ cd empty
57 $ hg debugformat
52 $ hg debugformat
58 format-variant repo
53 format-variant repo
59 fncache: yes
54 fncache: yes
60 dotencode: yes
55 dotencode: yes
61 generaldelta: yes
56 generaldelta: yes
62 plain-cl-delta: yes
57 plain-cl-delta: yes
63 compression: zlib
58 compression: zlib
64 $ hg debugformat --verbose
59 $ hg debugformat --verbose
65 format-variant repo config default
60 format-variant repo config default
66 fncache: yes yes yes
61 fncache: yes yes yes
67 dotencode: yes yes yes
62 dotencode: yes yes yes
68 generaldelta: yes yes yes
63 generaldelta: yes yes yes
69 plain-cl-delta: yes yes yes
64 plain-cl-delta: yes yes yes
70 compression: zlib zlib zlib
65 compression: zlib zlib zlib
71 $ hg debugformat --verbose --config format.usegfncache=no
66 $ hg debugformat --verbose --config format.usegfncache=no
72 format-variant repo config default
67 format-variant repo config default
73 fncache: yes yes yes
68 fncache: yes yes yes
74 dotencode: yes yes yes
69 dotencode: yes yes yes
75 generaldelta: yes yes yes
70 generaldelta: yes yes yes
76 plain-cl-delta: yes yes yes
71 plain-cl-delta: yes yes yes
77 compression: zlib zlib zlib
72 compression: zlib zlib zlib
78 $ hg debugformat --verbose --config format.usegfncache=no --color=debug
73 $ hg debugformat --verbose --config format.usegfncache=no --color=debug
79 format-variant repo config default
74 format-variant repo config default
80 [formatvariant.name.uptodate|fncache: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
75 [formatvariant.name.uptodate|fncache: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
81 [formatvariant.name.uptodate|dotencode: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
76 [formatvariant.name.uptodate|dotencode: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
82 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
77 [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
83 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
78 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
84 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
79 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
85 $ hg debugformat -Tjson
80 $ hg debugformat -Tjson
86 [
81 [
87 {
82 {
88 "config": true,
83 "config": true,
89 "default": true,
84 "default": true,
90 "name": "fncache",
85 "name": "fncache",
91 "repo": true
86 "repo": true
92 },
87 },
93 {
88 {
94 "config": true,
89 "config": true,
95 "default": true,
90 "default": true,
96 "name": "dotencode",
91 "name": "dotencode",
97 "repo": true
92 "repo": true
98 },
93 },
99 {
94 {
100 "config": true,
95 "config": true,
101 "default": true,
96 "default": true,
102 "name": "generaldelta",
97 "name": "generaldelta",
103 "repo": true
98 "repo": true
104 },
99 },
105 {
100 {
106 "config": true,
101 "config": true,
107 "default": true,
102 "default": true,
108 "name": "plain-cl-delta",
103 "name": "plain-cl-delta",
109 "repo": true
104 "repo": true
110 },
105 },
111 {
106 {
112 "config": "zlib",
107 "config": "zlib",
113 "default": "zlib",
108 "default": "zlib",
114 "name": "compression",
109 "name": "compression",
115 "repo": "zlib"
110 "repo": "zlib"
116 }
111 }
117 ]
112 ]
118 $ hg debugupgraderepo
113 $ hg debugupgraderepo
119 (no feature deficiencies found in existing repository)
114 (no feature deficiencies found in existing repository)
120 performing an upgrade with "--run" will make the following changes:
115 performing an upgrade with "--run" will make the following changes:
121
116
122 requirements
117 requirements
123 preserved: dotencode, fncache, generaldelta, revlogv1, store
118 preserved: dotencode, fncache, generaldelta, revlogv1, store
124
119
125 additional optimizations are available by specifying "--optimize <name>":
120 additional optimizations are available by specifying "--optimize <name>":
126
121
127 redeltaparent
122 redeltaparent
128 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
123 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
129
124
130 redeltamultibase
125 redeltamultibase
131 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
126 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
132
127
133 redeltaall
128 redeltaall
134 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
129 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
135
130
136 redeltafulladd
131 redeltafulladd
137 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
132 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
138
133
139
134
140 --optimize can be used to add optimizations
135 --optimize can be used to add optimizations
141
136
142 $ hg debugupgrade --optimize redeltaparent
137 $ hg debugupgrade --optimize redeltaparent
143 (no feature deficiencies found in existing repository)
138 (no feature deficiencies found in existing repository)
144 performing an upgrade with "--run" will make the following changes:
139 performing an upgrade with "--run" will make the following changes:
145
140
146 requirements
141 requirements
147 preserved: dotencode, fncache, generaldelta, revlogv1, store
142 preserved: dotencode, fncache, generaldelta, revlogv1, store
148
143
149 redeltaparent
144 redeltaparent
150 deltas within internal storage will choose a new base revision if needed
145 deltas within internal storage will choose a new base revision if needed
151
146
152 additional optimizations are available by specifying "--optimize <name>":
147 additional optimizations are available by specifying "--optimize <name>":
153
148
154 redeltamultibase
149 redeltamultibase
155 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
150 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
156
151
157 redeltaall
152 redeltaall
158 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
153 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
159
154
160 redeltafulladd
155 redeltafulladd
161 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
156 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
162
157
163
158
164 Various sub-optimal detections work
159 Various sub-optimal detections work
165
160
166 $ cat > .hg/requires << EOF
161 $ cat > .hg/requires << EOF
167 > revlogv1
162 > revlogv1
168 > store
163 > store
169 > EOF
164 > EOF
170
165
171 $ hg debugformat
166 $ hg debugformat
172 format-variant repo
167 format-variant repo
173 fncache: no
168 fncache: no
174 dotencode: no
169 dotencode: no
175 generaldelta: no
170 generaldelta: no
176 plain-cl-delta: yes
171 plain-cl-delta: yes
177 compression: zlib
172 compression: zlib
178 $ hg debugformat --verbose
173 $ hg debugformat --verbose
179 format-variant repo config default
174 format-variant repo config default
180 fncache: no yes yes
175 fncache: no yes yes
181 dotencode: no yes yes
176 dotencode: no yes yes
182 generaldelta: no yes yes
177 generaldelta: no yes yes
183 plain-cl-delta: yes yes yes
178 plain-cl-delta: yes yes yes
184 compression: zlib zlib zlib
179 compression: zlib zlib zlib
185 $ hg debugformat --verbose --config format.usegeneraldelta=no
180 $ hg debugformat --verbose --config format.usegeneraldelta=no
186 format-variant repo config default
181 format-variant repo config default
187 fncache: no yes yes
182 fncache: no yes yes
188 dotencode: no yes yes
183 dotencode: no yes yes
189 generaldelta: no no yes
184 generaldelta: no no yes
190 plain-cl-delta: yes yes yes
185 plain-cl-delta: yes yes yes
191 compression: zlib zlib zlib
186 compression: zlib zlib zlib
192 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
187 $ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
193 format-variant repo config default
188 format-variant repo config default
194 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
189 [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
195 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
190 [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
196 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
191 [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
197 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
192 [formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
198 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
193 [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
199 $ hg debugupgraderepo
194 $ hg debugupgraderepo
200 repository lacks features recommended by current config options:
195 repository lacks features recommended by current config options:
201
196
202 fncache
197 fncache
203 long and reserved filenames may not work correctly; repository performance is sub-optimal
198 long and reserved filenames may not work correctly; repository performance is sub-optimal
204
199
205 dotencode
200 dotencode
206 storage of filenames beginning with a period or space may not work correctly
201 storage of filenames beginning with a period or space may not work correctly
207
202
208 generaldelta
203 generaldelta
209 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
204 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
210
205
211
206
212 performing an upgrade with "--run" will make the following changes:
207 performing an upgrade with "--run" will make the following changes:
213
208
214 requirements
209 requirements
215 preserved: revlogv1, store
210 preserved: revlogv1, store
216 added: dotencode, fncache, generaldelta
211 added: dotencode, fncache, generaldelta
217
212
218 fncache
213 fncache
219 repository will be more resilient to storing certain paths and performance of certain operations should be improved
214 repository will be more resilient to storing certain paths and performance of certain operations should be improved
220
215
221 dotencode
216 dotencode
222 repository will be better able to store files beginning with a space or period
217 repository will be better able to store files beginning with a space or period
223
218
224 generaldelta
219 generaldelta
225 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
220 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
226
221
227 additional optimizations are available by specifying "--optimize <name>":
222 additional optimizations are available by specifying "--optimize <name>":
228
223
229 redeltaparent
224 redeltaparent
230 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
225 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
231
226
232 redeltamultibase
227 redeltamultibase
233 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
228 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
234
229
235 redeltaall
230 redeltaall
236 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
231 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
237
232
238 redeltafulladd
233 redeltafulladd
239 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
234 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
240
235
241
236
242 $ hg --config format.dotencode=false debugupgraderepo
237 $ hg --config format.dotencode=false debugupgraderepo
243 repository lacks features recommended by current config options:
238 repository lacks features recommended by current config options:
244
239
245 fncache
240 fncache
246 long and reserved filenames may not work correctly; repository performance is sub-optimal
241 long and reserved filenames may not work correctly; repository performance is sub-optimal
247
242
248 generaldelta
243 generaldelta
249 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
244 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
250
245
251 repository lacks features used by the default config options:
246 repository lacks features used by the default config options:
252
247
253 dotencode
248 dotencode
254 storage of filenames beginning with a period or space may not work correctly
249 storage of filenames beginning with a period or space may not work correctly
255
250
256
251
257 performing an upgrade with "--run" will make the following changes:
252 performing an upgrade with "--run" will make the following changes:
258
253
259 requirements
254 requirements
260 preserved: revlogv1, store
255 preserved: revlogv1, store
261 added: fncache, generaldelta
256 added: fncache, generaldelta
262
257
263 fncache
258 fncache
264 repository will be more resilient to storing certain paths and performance of certain operations should be improved
259 repository will be more resilient to storing certain paths and performance of certain operations should be improved
265
260
266 generaldelta
261 generaldelta
267 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
262 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
268
263
269 additional optimizations are available by specifying "--optimize <name>":
264 additional optimizations are available by specifying "--optimize <name>":
270
265
271 redeltaparent
266 redeltaparent
272 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
267 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
273
268
274 redeltamultibase
269 redeltamultibase
275 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
270 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
276
271
277 redeltaall
272 redeltaall
278 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
273 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
279
274
280 redeltafulladd
275 redeltafulladd
281 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
276 every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
282
277
283
278
284 $ cd ..
279 $ cd ..
285
280
286 Upgrading a repository that is already modern essentially no-ops
281 Upgrading a repository that is already modern essentially no-ops
287
282
288 $ hg init modern
283 $ hg init modern
289 $ hg -R modern debugupgraderepo --run
284 $ hg -R modern debugupgraderepo --run
290 upgrade will perform the following actions:
285 upgrade will perform the following actions:
291
286
292 requirements
287 requirements
293 preserved: dotencode, fncache, generaldelta, revlogv1, store
288 preserved: dotencode, fncache, generaldelta, revlogv1, store
294
289
295 beginning upgrade...
290 beginning upgrade...
296 repository locked and read-only
291 repository locked and read-only
297 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
292 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
298 (it is safe to interrupt this process any time before data migration completes)
293 (it is safe to interrupt this process any time before data migration completes)
299 data fully migrated to temporary repository
294 data fully migrated to temporary repository
300 marking source repository as being upgraded; clients will be unable to read from repository
295 marking source repository as being upgraded; clients will be unable to read from repository
301 starting in-place swap of repository data
296 starting in-place swap of repository data
302 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
297 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
303 replacing store...
298 replacing store...
304 store replacement complete; repository was inconsistent for *s (glob)
299 store replacement complete; repository was inconsistent for *s (glob)
305 finalizing requirements file and making repository readable again
300 finalizing requirements file and making repository readable again
306 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
301 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
307 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
302 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
308 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
303 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
309
304
310 Upgrading a repository to generaldelta works
305 Upgrading a repository to generaldelta works
311
306
312 $ hg --config format.usegeneraldelta=false init upgradegd
307 $ hg --config format.usegeneraldelta=false init upgradegd
313 $ cd upgradegd
308 $ cd upgradegd
314 $ touch f0
309 $ touch f0
315 $ hg -q commit -A -m initial
310 $ hg -q commit -A -m initial
316 $ touch f1
311 $ touch f1
317 $ hg -q commit -A -m 'add f1'
312 $ hg -q commit -A -m 'add f1'
318 $ hg -q up -r 0
313 $ hg -q up -r 0
319 $ touch f2
314 $ touch f2
320 $ hg -q commit -A -m 'add f2'
315 $ hg -q commit -A -m 'add f2'
321
316
322 $ hg debugupgraderepo --run
317 $ hg debugupgraderepo --run
323 upgrade will perform the following actions:
318 upgrade will perform the following actions:
324
319
325 requirements
320 requirements
326 preserved: dotencode, fncache, revlogv1, store
321 preserved: dotencode, fncache, revlogv1, store
327 added: generaldelta
322 added: generaldelta
328
323
329 generaldelta
324 generaldelta
330 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
325 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
331
326
332 beginning upgrade...
327 beginning upgrade...
333 repository locked and read-only
328 repository locked and read-only
334 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
329 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
335 (it is safe to interrupt this process any time before data migration completes)
330 (it is safe to interrupt this process any time before data migration completes)
336 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
331 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
337 migrating 341 bytes in store; 401 bytes tracked data
332 migrating 341 bytes in store; 401 bytes tracked data
338 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
333 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
339 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
334 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
340 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
335 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
341 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
336 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
342 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
337 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
343 finished migrating 3 changelog revisions; change in size: 0 bytes
338 finished migrating 3 changelog revisions; change in size: 0 bytes
344 finished migrating 9 total revisions; total change in store size: 0 bytes
339 finished migrating 9 total revisions; total change in store size: 0 bytes
345 copying phaseroots
340 copying phaseroots
346 data fully migrated to temporary repository
341 data fully migrated to temporary repository
347 marking source repository as being upgraded; clients will be unable to read from repository
342 marking source repository as being upgraded; clients will be unable to read from repository
348 starting in-place swap of repository data
343 starting in-place swap of repository data
349 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
344 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
350 replacing store...
345 replacing store...
351 store replacement complete; repository was inconsistent for *s (glob)
346 store replacement complete; repository was inconsistent for *s (glob)
352 finalizing requirements file and making repository readable again
347 finalizing requirements file and making repository readable again
353 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
348 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
354 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
349 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
355 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
350 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
356
351
357 Original requirements backed up
352 Original requirements backed up
358
353
359 $ cat .hg/upgradebackup.*/requires
354 $ cat .hg/upgradebackup.*/requires
360 dotencode
355 dotencode
361 fncache
356 fncache
362 revlogv1
357 revlogv1
363 store
358 store
364
359
365 generaldelta added to original requirements files
360 generaldelta added to original requirements files
366
361
367 $ cat .hg/requires
362 $ cat .hg/requires
368 dotencode
363 dotencode
369 fncache
364 fncache
370 generaldelta
365 generaldelta
371 revlogv1
366 revlogv1
372 store
367 store
373
368
374 store directory has files we expect
369 store directory has files we expect
375
370
376 $ ls .hg/store
371 $ ls .hg/store
377 00changelog.i
372 00changelog.i
378 00manifest.i
373 00manifest.i
379 data
374 data
380 fncache
375 fncache
381 phaseroots
376 phaseroots
382 undo
377 undo
383 undo.backupfiles
378 undo.backupfiles
384 undo.phaseroots
379 undo.phaseroots
385
380
386 manifest should be generaldelta
381 manifest should be generaldelta
387
382
388 $ hg debugrevlog -m | grep flags
383 $ hg debugrevlog -m | grep flags
389 flags : inline, generaldelta
384 flags : inline, generaldelta
390
385
391 verify should be happy
386 verify should be happy
392
387
393 $ hg verify
388 $ hg verify
394 checking changesets
389 checking changesets
395 checking manifests
390 checking manifests
396 crosschecking files in changesets and manifests
391 crosschecking files in changesets and manifests
397 checking files
392 checking files
398 3 files, 3 changesets, 3 total revisions
393 3 files, 3 changesets, 3 total revisions
399
394
400 old store should be backed up
395 old store should be backed up
401
396
402 $ ls .hg/upgradebackup.*/store
397 $ ls .hg/upgradebackup.*/store
403 00changelog.i
398 00changelog.i
404 00manifest.i
399 00manifest.i
405 data
400 data
406 fncache
401 fncache
407 phaseroots
402 phaseroots
408 undo
403 undo
409 undo.backup.fncache
404 undo.backup.fncache
410 undo.backupfiles
405 undo.backupfiles
411 undo.phaseroots
406 undo.phaseroots
412
407
413 $ cd ..
408 $ cd ..
414
409
415 store files with special filenames aren't encoded during copy
410 store files with special filenames aren't encoded during copy
416
411
417 $ hg init store-filenames
412 $ hg init store-filenames
418 $ cd store-filenames
413 $ cd store-filenames
419 $ touch foo
414 $ touch foo
420 $ hg -q commit -A -m initial
415 $ hg -q commit -A -m initial
421 $ touch .hg/store/.XX_special_filename
416 $ touch .hg/store/.XX_special_filename
422
417
423 $ hg debugupgraderepo --run
418 $ hg debugupgraderepo --run
424 upgrade will perform the following actions:
419 upgrade will perform the following actions:
425
420
426 requirements
421 requirements
427 preserved: dotencode, fncache, generaldelta, revlogv1, store
422 preserved: dotencode, fncache, generaldelta, revlogv1, store
428
423
429 beginning upgrade...
424 beginning upgrade...
430 repository locked and read-only
425 repository locked and read-only
431 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
426 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
432 (it is safe to interrupt this process any time before data migration completes)
427 (it is safe to interrupt this process any time before data migration completes)
433 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
428 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
434 migrating 109 bytes in store; 107 bytes tracked data
429 migrating 109 bytes in store; 107 bytes tracked data
435 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
430 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
436 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
431 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
437 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
432 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
438 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
433 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
439 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
434 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
440 finished migrating 1 changelog revisions; change in size: 0 bytes
435 finished migrating 1 changelog revisions; change in size: 0 bytes
441 finished migrating 3 total revisions; total change in store size: 0 bytes
436 finished migrating 3 total revisions; total change in store size: 0 bytes
442 copying .XX_special_filename
437 copying .XX_special_filename
443 copying phaseroots
438 copying phaseroots
444 data fully migrated to temporary repository
439 data fully migrated to temporary repository
445 marking source repository as being upgraded; clients will be unable to read from repository
440 marking source repository as being upgraded; clients will be unable to read from repository
446 starting in-place swap of repository data
441 starting in-place swap of repository data
447 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
442 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
448 replacing store...
443 replacing store...
449 store replacement complete; repository was inconsistent for *s (glob)
444 store replacement complete; repository was inconsistent for *s (glob)
450 finalizing requirements file and making repository readable again
445 finalizing requirements file and making repository readable again
451 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
446 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
452 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
447 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
453 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
448 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
454 $ hg debugupgraderepo --run --optimize redeltafulladd
449 $ hg debugupgraderepo --run --optimize redeltafulladd
455 upgrade will perform the following actions:
450 upgrade will perform the following actions:
456
451
457 requirements
452 requirements
458 preserved: dotencode, fncache, generaldelta, revlogv1, store
453 preserved: dotencode, fncache, generaldelta, revlogv1, store
459
454
460 redeltafulladd
455 redeltafulladd
461 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
456 each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
462
457
463 beginning upgrade...
458 beginning upgrade...
464 repository locked and read-only
459 repository locked and read-only
465 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
460 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
466 (it is safe to interrupt this process any time before data migration completes)
461 (it is safe to interrupt this process any time before data migration completes)
467 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
462 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
468 migrating 109 bytes in store; 107 bytes tracked data
463 migrating 109 bytes in store; 107 bytes tracked data
469 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
464 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
470 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
465 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
471 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
466 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
472 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
467 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
473 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
468 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
474 finished migrating 1 changelog revisions; change in size: 0 bytes
469 finished migrating 1 changelog revisions; change in size: 0 bytes
475 finished migrating 3 total revisions; total change in store size: 0 bytes
470 finished migrating 3 total revisions; total change in store size: 0 bytes
476 copying .XX_special_filename
471 copying .XX_special_filename
477 copying phaseroots
472 copying phaseroots
478 data fully migrated to temporary repository
473 data fully migrated to temporary repository
479 marking source repository as being upgraded; clients will be unable to read from repository
474 marking source repository as being upgraded; clients will be unable to read from repository
480 starting in-place swap of repository data
475 starting in-place swap of repository data
481 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
476 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
482 replacing store...
477 replacing store...
483 store replacement complete; repository was inconsistent for *s (glob)
478 store replacement complete; repository was inconsistent for *s (glob)
484 finalizing requirements file and making repository readable again
479 finalizing requirements file and making repository readable again
485 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
480 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
486 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
481 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
487 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
482 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
488
483
489 $ cd ..
484 $ cd ..
490
485
491 Check upgrading a large file repository
486 Check upgrading a large file repository
492 ---------------------------------------
487 ---------------------------------------
493
488
494 $ hg init largefilesrepo
489 $ hg init largefilesrepo
495 $ cat << EOF >> largefilesrepo/.hg/hgrc
490 $ cat << EOF >> largefilesrepo/.hg/hgrc
496 > [extensions]
491 > [extensions]
497 > largefiles =
492 > largefiles =
498 > EOF
493 > EOF
499
494
500 $ cd largefilesrepo
495 $ cd largefilesrepo
501 $ touch foo
496 $ touch foo
502 $ hg add --large foo
497 $ hg add --large foo
503 $ hg -q commit -m initial
498 $ hg -q commit -m initial
504 $ cat .hg/requires
499 $ cat .hg/requires
505 dotencode
500 dotencode
506 fncache
501 fncache
507 generaldelta
502 generaldelta
508 largefiles
503 largefiles
509 revlogv1
504 revlogv1
510 store
505 store
511
506
512 $ hg debugupgraderepo --run
507 $ hg debugupgraderepo --run
513 upgrade will perform the following actions:
508 upgrade will perform the following actions:
514
509
515 requirements
510 requirements
516 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, store
511 preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, store
517
512
518 beginning upgrade...
513 beginning upgrade...
519 repository locked and read-only
514 repository locked and read-only
520 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
515 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
521 (it is safe to interrupt this process any time before data migration completes)
516 (it is safe to interrupt this process any time before data migration completes)
522 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
517 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
523 migrating 163 bytes in store; 160 bytes tracked data
518 migrating 163 bytes in store; 160 bytes tracked data
524 migrating 1 filelogs containing 1 revisions (42 bytes in store; 41 bytes tracked data)
519 migrating 1 filelogs containing 1 revisions (42 bytes in store; 41 bytes tracked data)
525 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
520 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
526 migrating 1 manifests containing 1 revisions (52 bytes in store; 51 bytes tracked data)
521 migrating 1 manifests containing 1 revisions (52 bytes in store; 51 bytes tracked data)
527 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
522 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
528 migrating changelog containing 1 revisions (69 bytes in store; 68 bytes tracked data)
523 migrating changelog containing 1 revisions (69 bytes in store; 68 bytes tracked data)
529 finished migrating 1 changelog revisions; change in size: 0 bytes
524 finished migrating 1 changelog revisions; change in size: 0 bytes
530 finished migrating 3 total revisions; total change in store size: 0 bytes
525 finished migrating 3 total revisions; total change in store size: 0 bytes
531 copying phaseroots
526 copying phaseroots
532 data fully migrated to temporary repository
527 data fully migrated to temporary repository
533 marking source repository as being upgraded; clients will be unable to read from repository
528 marking source repository as being upgraded; clients will be unable to read from repository
534 starting in-place swap of repository data
529 starting in-place swap of repository data
535 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
530 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
536 replacing store...
531 replacing store...
537 store replacement complete; repository was inconsistent for *s (glob)
532 store replacement complete; repository was inconsistent for *s (glob)
538 finalizing requirements file and making repository readable again
533 finalizing requirements file and making repository readable again
539 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
534 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
540 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
535 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
541 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
536 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
542 $ cat .hg/requires
537 $ cat .hg/requires
543 dotencode
538 dotencode
544 fncache
539 fncache
545 generaldelta
540 generaldelta
546 largefiles
541 largefiles
547 revlogv1
542 revlogv1
548 store
543 store
549
544
550 $ cat << EOF >> .hg/hgrc
545 $ cat << EOF >> .hg/hgrc
551 > [extensions]
546 > [extensions]
552 > lfs =
547 > lfs =
553 > [lfs]
548 > [lfs]
554 > threshold = 10
549 > threshold = 10
555 > EOF
550 > EOF
556 $ echo '123456789012345' > lfs.bin
551 $ echo '123456789012345' > lfs.bin
557 $ hg ci -Am 'lfs.bin'
552 $ hg ci -Am 'lfs.bin'
558 adding lfs.bin
553 adding lfs.bin
559 $ grep lfs .hg/requires
554 $ grep lfs .hg/requires
560 lfs
555 lfs
561 $ find .hg/store/lfs -type f
556 $ find .hg/store/lfs -type f
562 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
557 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
563
558
564 $ hg debugupgraderepo --run
559 $ hg debugupgraderepo --run
565 upgrade will perform the following actions:
560 upgrade will perform the following actions:
566
561
567 requirements
562 requirements
568 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, store
563 preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, store
569
564
570 beginning upgrade...
565 beginning upgrade...
571 repository locked and read-only
566 repository locked and read-only
572 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
567 creating temporary repository to stage migrated data: $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
573 (it is safe to interrupt this process any time before data migration completes)
568 (it is safe to interrupt this process any time before data migration completes)
574 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
569 migrating 6 total revisions (2 in filelogs, 2 in manifests, 2 in changelog)
575 migrating 417 bytes in store; 467 bytes tracked data
570 migrating 417 bytes in store; 467 bytes tracked data
576 migrating 2 filelogs containing 2 revisions (168 bytes in store; 182 bytes tracked data)
571 migrating 2 filelogs containing 2 revisions (168 bytes in store; 182 bytes tracked data)
577 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
572 finished migrating 2 filelog revisions across 2 filelogs; change in size: 0 bytes
578 migrating 1 manifests containing 2 revisions (113 bytes in store; 151 bytes tracked data)
573 migrating 1 manifests containing 2 revisions (113 bytes in store; 151 bytes tracked data)
579 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
574 finished migrating 2 manifest revisions across 1 manifests; change in size: 0 bytes
580 migrating changelog containing 2 revisions (136 bytes in store; 134 bytes tracked data)
575 migrating changelog containing 2 revisions (136 bytes in store; 134 bytes tracked data)
581 finished migrating 2 changelog revisions; change in size: 0 bytes
576 finished migrating 2 changelog revisions; change in size: 0 bytes
582 finished migrating 6 total revisions; total change in store size: 0 bytes
577 finished migrating 6 total revisions; total change in store size: 0 bytes
583 copying phaseroots
578 copying phaseroots
584 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
579 copying lfs blob d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
585 data fully migrated to temporary repository
580 data fully migrated to temporary repository
586 marking source repository as being upgraded; clients will be unable to read from repository
581 marking source repository as being upgraded; clients will be unable to read from repository
587 starting in-place swap of repository data
582 starting in-place swap of repository data
588 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
583 replaced files will be backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
589 replacing store...
584 replacing store...
590 store replacement complete; repository was inconsistent for *s (glob)
585 store replacement complete; repository was inconsistent for *s (glob)
591 finalizing requirements file and making repository readable again
586 finalizing requirements file and making repository readable again
592 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
587 removing temporary repository $TESTTMP/largefilesrepo/.hg/upgrade.* (glob)
593 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
588 copy of old repository backed up at $TESTTMP/largefilesrepo/.hg/upgradebackup.* (glob)
594 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
589 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
595
590
596 $ grep lfs .hg/requires
591 $ grep lfs .hg/requires
597 lfs
592 lfs
598 $ find .hg/store/lfs -type f
593 $ find .hg/store/lfs -type f
599 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
594 .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
600 $ hg verify
595 $ hg verify
601 checking changesets
596 checking changesets
602 checking manifests
597 checking manifests
603 crosschecking files in changesets and manifests
598 crosschecking files in changesets and manifests
604 checking files
599 checking files
605 2 files, 2 changesets, 2 total revisions
600 2 files, 2 changesets, 2 total revisions
606 $ hg debugdata lfs.bin 0
601 $ hg debugdata lfs.bin 0
607 version https://git-lfs.github.com/spec/v1
602 version https://git-lfs.github.com/spec/v1
608 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
603 oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
609 size 16
604 size 16
610 x-is-binary 0
605 x-is-binary 0
611
606
612 $ cd ..
607 $ cd ..
613
608
614 repository config is taken in account
609 repository config is taken in account
615 -------------------------------------
610 -------------------------------------
616
611
617 $ cat << EOF >> $HGRCPATH
612 $ cat << EOF >> $HGRCPATH
618 > [format]
613 > [format]
619 > maxchainlen = 1
614 > maxchainlen = 1
620 > EOF
615 > EOF
621
616
622 $ hg init localconfig
617 $ hg init localconfig
623 $ cd localconfig
618 $ cd localconfig
624 $ cat << EOF > file
619 $ cat << EOF > file
625 > some content
620 > some content
626 > with some length
621 > with some length
627 > to make sure we get a delta
622 > to make sure we get a delta
628 > after changes
623 > after changes
629 > very long
624 > very long
630 > very long
625 > very long
631 > very long
626 > very long
632 > very long
627 > very long
633 > very long
628 > very long
634 > very long
629 > very long
635 > very long
630 > very long
636 > very long
631 > very long
637 > very long
632 > very long
638 > very long
633 > very long
639 > very long
634 > very long
640 > EOF
635 > EOF
641 $ hg -q commit -A -m A
636 $ hg -q commit -A -m A
642 $ echo "new line" >> file
637 $ echo "new line" >> file
643 $ hg -q commit -m B
638 $ hg -q commit -m B
644 $ echo "new line" >> file
639 $ echo "new line" >> file
645 $ hg -q commit -m C
640 $ hg -q commit -m C
646
641
647 $ cat << EOF >> .hg/hgrc
642 $ cat << EOF >> .hg/hgrc
648 > [format]
643 > [format]
649 > maxchainlen = 9001
644 > maxchainlen = 9001
650 > EOF
645 > EOF
651 $ hg config format
646 $ hg config format
652 format.maxchainlen=9001
647 format.maxchainlen=9001
653 $ hg debugindex file
648 $ hg debugindex file
654 rev offset length delta linkrev nodeid p1 p2
649 rev offset length delta linkrev nodeid p1 p2
655 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
650 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
656 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
651 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
657 2 98 84 -1 2 8daf79c5522b af3e29f7a72e 000000000000
652 2 98 84 -1 2 8daf79c5522b af3e29f7a72e 000000000000
658
653
659 $ hg debugupgraderepo --run --optimize redeltaall
654 $ hg debugupgraderepo --run --optimize redeltaall
660 upgrade will perform the following actions:
655 upgrade will perform the following actions:
661
656
662 requirements
657 requirements
663 preserved: dotencode, fncache, generaldelta, revlogv1, store
658 preserved: dotencode, fncache, generaldelta, revlogv1, store
664
659
665 redeltaall
660 redeltaall
666 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
661 deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
667
662
668 beginning upgrade...
663 beginning upgrade...
669 repository locked and read-only
664 repository locked and read-only
670 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
665 creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
671 (it is safe to interrupt this process any time before data migration completes)
666 (it is safe to interrupt this process any time before data migration completes)
672 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
667 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
673 migrating 497 bytes in store; 882 bytes tracked data
668 migrating 497 bytes in store; 882 bytes tracked data
674 migrating 1 filelogs containing 3 revisions (182 bytes in store; 573 bytes tracked data)
669 migrating 1 filelogs containing 3 revisions (182 bytes in store; 573 bytes tracked data)
675 finished migrating 3 filelog revisions across 1 filelogs; change in size: -63 bytes
670 finished migrating 3 filelog revisions across 1 filelogs; change in size: -63 bytes
676 migrating 1 manifests containing 3 revisions (141 bytes in store; 138 bytes tracked data)
671 migrating 1 manifests containing 3 revisions (141 bytes in store; 138 bytes tracked data)
677 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
672 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
678 migrating changelog containing 3 revisions (174 bytes in store; 171 bytes tracked data)
673 migrating changelog containing 3 revisions (174 bytes in store; 171 bytes tracked data)
679 finished migrating 3 changelog revisions; change in size: 0 bytes
674 finished migrating 3 changelog revisions; change in size: 0 bytes
680 finished migrating 9 total revisions; total change in store size: -63 bytes
675 finished migrating 9 total revisions; total change in store size: -63 bytes
681 copying phaseroots
676 copying phaseroots
682 data fully migrated to temporary repository
677 data fully migrated to temporary repository
683 marking source repository as being upgraded; clients will be unable to read from repository
678 marking source repository as being upgraded; clients will be unable to read from repository
684 starting in-place swap of repository data
679 starting in-place swap of repository data
685 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
680 replaced files will be backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
686 replacing store...
681 replacing store...
687 store replacement complete; repository was inconsistent for *s (glob)
682 store replacement complete; repository was inconsistent for *s (glob)
688 finalizing requirements file and making repository readable again
683 finalizing requirements file and making repository readable again
689 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
684 removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
690 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
685 copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
691 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
686 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
692 $ hg debugindex file
687 $ hg debugindex file
693 rev offset length delta linkrev nodeid p1 p2
688 rev offset length delta linkrev nodeid p1 p2
694 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
689 0 0 77 -1 0 bcc1d3df78b2 000000000000 000000000000
695 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
690 1 77 21 0 1 af3e29f7a72e bcc1d3df78b2 000000000000
696 2 98 21 1 2 8daf79c5522b af3e29f7a72e 000000000000
691 2 98 21 1 2 8daf79c5522b af3e29f7a72e 000000000000
697 $ cd ..
692 $ cd ..
698
693
699 $ cat << EOF >> $HGRCPATH
694 $ cat << EOF >> $HGRCPATH
700 > [format]
695 > [format]
701 > maxchainlen = 9001
696 > maxchainlen = 9001
702 > EOF
697 > EOF
1 NO CONTENT: file was removed
NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now