##// END OF EJS Templates
upgrade: introduce the internal code for revlog cloning selection...
marmoute -
r42921:0812d9fb default
parent child Browse files
Show More
@@ -1,1011 +1,1038 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 from .utils import (
27 from .utils import (
28 compression,
28 compression,
29 )
29 )
30
30
31 def requiredsourcerequirements(repo):
31 def requiredsourcerequirements(repo):
32 """Obtain requirements required to be present to upgrade a repo.
32 """Obtain requirements required to be present to upgrade a repo.
33
33
34 An upgrade will not be allowed if the repository doesn't have the
34 An upgrade will not be allowed if the repository doesn't have the
35 requirements returned by this function.
35 requirements returned by this function.
36 """
36 """
37 return {
37 return {
38 # Introduced in Mercurial 0.9.2.
38 # Introduced in Mercurial 0.9.2.
39 'revlogv1',
39 'revlogv1',
40 # Introduced in Mercurial 0.9.2.
40 # Introduced in Mercurial 0.9.2.
41 'store',
41 'store',
42 }
42 }
43
43
44 def blocksourcerequirements(repo):
44 def blocksourcerequirements(repo):
45 """Obtain requirements that will prevent an upgrade from occurring.
45 """Obtain requirements that will prevent an upgrade from occurring.
46
46
47 An upgrade cannot be performed if the source repository contains a
47 An upgrade cannot be performed if the source repository contains a
48 requirements in the returned set.
48 requirements in the returned set.
49 """
49 """
50 return {
50 return {
51 # The upgrade code does not yet support these experimental features.
51 # The upgrade code does not yet support these experimental features.
52 # This is an artificial limitation.
52 # This is an artificial limitation.
53 'treemanifest',
53 'treemanifest',
54 # This was a precursor to generaldelta and was never enabled by default.
54 # This was a precursor to generaldelta and was never enabled by default.
55 # It should (hopefully) not exist in the wild.
55 # It should (hopefully) not exist in the wild.
56 'parentdelta',
56 'parentdelta',
57 # Upgrade should operate on the actual store, not the shared link.
57 # Upgrade should operate on the actual store, not the shared link.
58 'shared',
58 'shared',
59 }
59 }
60
60
61 def supportremovedrequirements(repo):
61 def supportremovedrequirements(repo):
62 """Obtain requirements that can be removed during an upgrade.
62 """Obtain requirements that can be removed during an upgrade.
63
63
64 If an upgrade were to create a repository that dropped a requirement,
64 If an upgrade were to create a repository that dropped a requirement,
65 the dropped requirement must appear in the returned set for the upgrade
65 the dropped requirement must appear in the returned set for the upgrade
66 to be allowed.
66 to be allowed.
67 """
67 """
68 supported = {
68 supported = {
69 localrepo.SPARSEREVLOG_REQUIREMENT,
69 localrepo.SPARSEREVLOG_REQUIREMENT,
70 }
70 }
71 for name in compression.compengines:
71 for name in compression.compengines:
72 engine = compression.compengines[name]
72 engine = compression.compengines[name]
73 if engine.available() and engine.revlogheader():
73 if engine.available() and engine.revlogheader():
74 supported.add(b'exp-compression-%s' % name)
74 supported.add(b'exp-compression-%s' % name)
75 if engine.name() == 'zstd':
75 if engine.name() == 'zstd':
76 supported.add(b'revlog-compression-zstd')
76 supported.add(b'revlog-compression-zstd')
77 return supported
77 return supported
78
78
79 def supporteddestrequirements(repo):
79 def supporteddestrequirements(repo):
80 """Obtain requirements that upgrade supports in the destination.
80 """Obtain requirements that upgrade supports in the destination.
81
81
82 If the result of the upgrade would create requirements not in this set,
82 If the result of the upgrade would create requirements not in this set,
83 the upgrade is disallowed.
83 the upgrade is disallowed.
84
84
85 Extensions should monkeypatch this to add their custom requirements.
85 Extensions should monkeypatch this to add their custom requirements.
86 """
86 """
87 supported = {
87 supported = {
88 'dotencode',
88 'dotencode',
89 'fncache',
89 'fncache',
90 'generaldelta',
90 'generaldelta',
91 'revlogv1',
91 'revlogv1',
92 'store',
92 'store',
93 localrepo.SPARSEREVLOG_REQUIREMENT,
93 localrepo.SPARSEREVLOG_REQUIREMENT,
94 }
94 }
95 for name in compression.compengines:
95 for name in compression.compengines:
96 engine = compression.compengines[name]
96 engine = compression.compengines[name]
97 if engine.available() and engine.revlogheader():
97 if engine.available() and engine.revlogheader():
98 supported.add(b'exp-compression-%s' % name)
98 supported.add(b'exp-compression-%s' % name)
99 if engine.name() == 'zstd':
99 if engine.name() == 'zstd':
100 supported.add(b'revlog-compression-zstd')
100 supported.add(b'revlog-compression-zstd')
101 return supported
101 return supported
102
102
103 def allowednewrequirements(repo):
103 def allowednewrequirements(repo):
104 """Obtain requirements that can be added to a repository during upgrade.
104 """Obtain requirements that can be added to a repository during upgrade.
105
105
106 This is used to disallow proposed requirements from being added when
106 This is used to disallow proposed requirements from being added when
107 they weren't present before.
107 they weren't present before.
108
108
109 We use a list of allowed requirement additions instead of a list of known
109 We use a list of allowed requirement additions instead of a list of known
110 bad additions because the whitelist approach is safer and will prevent
110 bad additions because the whitelist approach is safer and will prevent
111 future, unknown requirements from accidentally being added.
111 future, unknown requirements from accidentally being added.
112 """
112 """
113 supported = {
113 supported = {
114 'dotencode',
114 'dotencode',
115 'fncache',
115 'fncache',
116 'generaldelta',
116 'generaldelta',
117 localrepo.SPARSEREVLOG_REQUIREMENT,
117 localrepo.SPARSEREVLOG_REQUIREMENT,
118 }
118 }
119 for name in compression.compengines:
119 for name in compression.compengines:
120 engine = compression.compengines[name]
120 engine = compression.compengines[name]
121 if engine.available() and engine.revlogheader():
121 if engine.available() and engine.revlogheader():
122 supported.add(b'exp-compression-%s' % name)
122 supported.add(b'exp-compression-%s' % name)
123 if engine.name() == 'zstd':
123 if engine.name() == 'zstd':
124 supported.add(b'revlog-compression-zstd')
124 supported.add(b'revlog-compression-zstd')
125 return supported
125 return supported
126
126
127 def preservedrequirements(repo):
127 def preservedrequirements(repo):
128 return set()
128 return set()
129
129
130 deficiency = 'deficiency'
130 deficiency = 'deficiency'
131 optimisation = 'optimization'
131 optimisation = 'optimization'
132
132
133 class improvement(object):
133 class improvement(object):
134 """Represents an improvement that can be made as part of an upgrade.
134 """Represents an improvement that can be made as part of an upgrade.
135
135
136 The following attributes are defined on each instance:
136 The following attributes are defined on each instance:
137
137
138 name
138 name
139 Machine-readable string uniquely identifying this improvement. It
139 Machine-readable string uniquely identifying this improvement. It
140 will be mapped to an action later in the upgrade process.
140 will be mapped to an action later in the upgrade process.
141
141
142 type
142 type
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
144 problem. An optimization is an action (sometimes optional) that
144 problem. An optimization is an action (sometimes optional) that
145 can be taken to further improve the state of the repository.
145 can be taken to further improve the state of the repository.
146
146
147 description
147 description
148 Message intended for humans explaining the improvement in more detail,
148 Message intended for humans explaining the improvement in more detail,
149 including the implications of it. For ``deficiency`` types, should be
149 including the implications of it. For ``deficiency`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
151 worded in the future tense.
151 worded in the future tense.
152
152
153 upgrademessage
153 upgrademessage
154 Message intended for humans explaining what an upgrade addressing this
154 Message intended for humans explaining what an upgrade addressing this
155 issue will do. Should be worded in the future tense.
155 issue will do. Should be worded in the future tense.
156 """
156 """
157 def __init__(self, name, type, description, upgrademessage):
157 def __init__(self, name, type, description, upgrademessage):
158 self.name = name
158 self.name = name
159 self.type = type
159 self.type = type
160 self.description = description
160 self.description = description
161 self.upgrademessage = upgrademessage
161 self.upgrademessage = upgrademessage
162
162
163 def __eq__(self, other):
163 def __eq__(self, other):
164 if not isinstance(other, improvement):
164 if not isinstance(other, improvement):
165 # This is what python tell use to do
165 # This is what python tell use to do
166 return NotImplemented
166 return NotImplemented
167 return self.name == other.name
167 return self.name == other.name
168
168
169 def __ne__(self, other):
169 def __ne__(self, other):
170 return not (self == other)
170 return not (self == other)
171
171
172 def __hash__(self):
172 def __hash__(self):
173 return hash(self.name)
173 return hash(self.name)
174
174
175 allformatvariant = []
175 allformatvariant = []
176
176
177 def registerformatvariant(cls):
177 def registerformatvariant(cls):
178 allformatvariant.append(cls)
178 allformatvariant.append(cls)
179 return cls
179 return cls
180
180
181 class formatvariant(improvement):
181 class formatvariant(improvement):
182 """an improvement subclass dedicated to repository format"""
182 """an improvement subclass dedicated to repository format"""
183 type = deficiency
183 type = deficiency
184 ### The following attributes should be defined for each class:
184 ### The following attributes should be defined for each class:
185
185
186 # machine-readable string uniquely identifying this improvement. it will be
186 # machine-readable string uniquely identifying this improvement. it will be
187 # mapped to an action later in the upgrade process.
187 # mapped to an action later in the upgrade process.
188 name = None
188 name = None
189
189
190 # message intended for humans explaining the improvement in more detail,
190 # message intended for humans explaining the improvement in more detail,
191 # including the implications of it ``deficiency`` types, should be worded
191 # including the implications of it ``deficiency`` types, should be worded
192 # in the present tense.
192 # in the present tense.
193 description = None
193 description = None
194
194
195 # message intended for humans explaining what an upgrade addressing this
195 # message intended for humans explaining what an upgrade addressing this
196 # issue will do. should be worded in the future tense.
196 # issue will do. should be worded in the future tense.
197 upgrademessage = None
197 upgrademessage = None
198
198
199 # value of current Mercurial default for new repository
199 # value of current Mercurial default for new repository
200 default = None
200 default = None
201
201
202 def __init__(self):
202 def __init__(self):
203 raise NotImplementedError()
203 raise NotImplementedError()
204
204
205 @staticmethod
205 @staticmethod
206 def fromrepo(repo):
206 def fromrepo(repo):
207 """current value of the variant in the repository"""
207 """current value of the variant in the repository"""
208 raise NotImplementedError()
208 raise NotImplementedError()
209
209
210 @staticmethod
210 @staticmethod
211 def fromconfig(repo):
211 def fromconfig(repo):
212 """current value of the variant in the configuration"""
212 """current value of the variant in the configuration"""
213 raise NotImplementedError()
213 raise NotImplementedError()
214
214
215 class requirementformatvariant(formatvariant):
215 class requirementformatvariant(formatvariant):
216 """formatvariant based on a 'requirement' name.
216 """formatvariant based on a 'requirement' name.
217
217
218 Many format variant are controlled by a 'requirement'. We define a small
218 Many format variant are controlled by a 'requirement'. We define a small
219 subclass to factor the code.
219 subclass to factor the code.
220 """
220 """
221
221
222 # the requirement that control this format variant
222 # the requirement that control this format variant
223 _requirement = None
223 _requirement = None
224
224
225 @staticmethod
225 @staticmethod
226 def _newreporequirements(ui):
226 def _newreporequirements(ui):
227 return localrepo.newreporequirements(
227 return localrepo.newreporequirements(
228 ui, localrepo.defaultcreateopts(ui))
228 ui, localrepo.defaultcreateopts(ui))
229
229
230 @classmethod
230 @classmethod
231 def fromrepo(cls, repo):
231 def fromrepo(cls, repo):
232 assert cls._requirement is not None
232 assert cls._requirement is not None
233 return cls._requirement in repo.requirements
233 return cls._requirement in repo.requirements
234
234
235 @classmethod
235 @classmethod
236 def fromconfig(cls, repo):
236 def fromconfig(cls, repo):
237 assert cls._requirement is not None
237 assert cls._requirement is not None
238 return cls._requirement in cls._newreporequirements(repo.ui)
238 return cls._requirement in cls._newreporequirements(repo.ui)
239
239
240 @registerformatvariant
240 @registerformatvariant
241 class fncache(requirementformatvariant):
241 class fncache(requirementformatvariant):
242 name = 'fncache'
242 name = 'fncache'
243
243
244 _requirement = 'fncache'
244 _requirement = 'fncache'
245
245
246 default = True
246 default = True
247
247
248 description = _('long and reserved filenames may not work correctly; '
248 description = _('long and reserved filenames may not work correctly; '
249 'repository performance is sub-optimal')
249 'repository performance is sub-optimal')
250
250
251 upgrademessage = _('repository will be more resilient to storing '
251 upgrademessage = _('repository will be more resilient to storing '
252 'certain paths and performance of certain '
252 'certain paths and performance of certain '
253 'operations should be improved')
253 'operations should be improved')
254
254
255 @registerformatvariant
255 @registerformatvariant
256 class dotencode(requirementformatvariant):
256 class dotencode(requirementformatvariant):
257 name = 'dotencode'
257 name = 'dotencode'
258
258
259 _requirement = 'dotencode'
259 _requirement = 'dotencode'
260
260
261 default = True
261 default = True
262
262
263 description = _('storage of filenames beginning with a period or '
263 description = _('storage of filenames beginning with a period or '
264 'space may not work correctly')
264 'space may not work correctly')
265
265
266 upgrademessage = _('repository will be better able to store files '
266 upgrademessage = _('repository will be better able to store files '
267 'beginning with a space or period')
267 'beginning with a space or period')
268
268
269 @registerformatvariant
269 @registerformatvariant
270 class generaldelta(requirementformatvariant):
270 class generaldelta(requirementformatvariant):
271 name = 'generaldelta'
271 name = 'generaldelta'
272
272
273 _requirement = 'generaldelta'
273 _requirement = 'generaldelta'
274
274
275 default = True
275 default = True
276
276
277 description = _('deltas within internal storage are unable to '
277 description = _('deltas within internal storage are unable to '
278 'choose optimal revisions; repository is larger and '
278 'choose optimal revisions; repository is larger and '
279 'slower than it could be; interaction with other '
279 'slower than it could be; interaction with other '
280 'repositories may require extra network and CPU '
280 'repositories may require extra network and CPU '
281 'resources, making "hg push" and "hg pull" slower')
281 'resources, making "hg push" and "hg pull" slower')
282
282
283 upgrademessage = _('repository storage will be able to create '
283 upgrademessage = _('repository storage will be able to create '
284 'optimal deltas; new repository data will be '
284 'optimal deltas; new repository data will be '
285 'smaller and read times should decrease; '
285 'smaller and read times should decrease; '
286 'interacting with other repositories using this '
286 'interacting with other repositories using this '
287 'storage model should require less network and '
287 'storage model should require less network and '
288 'CPU resources, making "hg push" and "hg pull" '
288 'CPU resources, making "hg push" and "hg pull" '
289 'faster')
289 'faster')
290
290
291 @registerformatvariant
291 @registerformatvariant
292 class sparserevlog(requirementformatvariant):
292 class sparserevlog(requirementformatvariant):
293 name = 'sparserevlog'
293 name = 'sparserevlog'
294
294
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
296
296
297 default = True
297 default = True
298
298
299 description = _('in order to limit disk reading and memory usage on older '
299 description = _('in order to limit disk reading and memory usage on older '
300 'version, the span of a delta chain from its root to its '
300 'version, the span of a delta chain from its root to its '
301 'end is limited, whatever the relevant data in this span. '
301 'end is limited, whatever the relevant data in this span. '
302 'This can severly limit Mercurial ability to build good '
302 'This can severly limit Mercurial ability to build good '
303 'chain of delta resulting is much more storage space being '
303 'chain of delta resulting is much more storage space being '
304 'taken and limit reusability of on disk delta during '
304 'taken and limit reusability of on disk delta during '
305 'exchange.'
305 'exchange.'
306 )
306 )
307
307
308 upgrademessage = _('Revlog supports delta chain with more unused data '
308 upgrademessage = _('Revlog supports delta chain with more unused data '
309 'between payload. These gaps will be skipped at read '
309 'between payload. These gaps will be skipped at read '
310 'time. This allows for better delta chains, making a '
310 'time. This allows for better delta chains, making a '
311 'better compression and faster exchange with server.')
311 'better compression and faster exchange with server.')
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class removecldeltachain(formatvariant):
314 class removecldeltachain(formatvariant):
315 name = 'plain-cl-delta'
315 name = 'plain-cl-delta'
316
316
317 default = True
317 default = True
318
318
319 description = _('changelog storage is using deltas instead of '
319 description = _('changelog storage is using deltas instead of '
320 'raw entries; changelog reading and any '
320 'raw entries; changelog reading and any '
321 'operation relying on changelog data are slower '
321 'operation relying on changelog data are slower '
322 'than they could be')
322 'than they could be')
323
323
324 upgrademessage = _('changelog storage will be reformated to '
324 upgrademessage = _('changelog storage will be reformated to '
325 'store raw entries; changelog reading will be '
325 'store raw entries; changelog reading will be '
326 'faster; changelog size may be reduced')
326 'faster; changelog size may be reduced')
327
327
328 @staticmethod
328 @staticmethod
329 def fromrepo(repo):
329 def fromrepo(repo):
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
331 # changelogs with deltas.
331 # changelogs with deltas.
332 cl = repo.changelog
332 cl = repo.changelog
333 chainbase = cl.chainbase
333 chainbase = cl.chainbase
334 return all(rev == chainbase(rev) for rev in cl)
334 return all(rev == chainbase(rev) for rev in cl)
335
335
336 @staticmethod
336 @staticmethod
337 def fromconfig(repo):
337 def fromconfig(repo):
338 return True
338 return True
339
339
340 @registerformatvariant
340 @registerformatvariant
341 class compressionengine(formatvariant):
341 class compressionengine(formatvariant):
342 name = 'compression'
342 name = 'compression'
343 default = 'zlib'
343 default = 'zlib'
344
344
345 description = _('Compresion algorithm used to compress data. '
345 description = _('Compresion algorithm used to compress data. '
346 'Some engine are faster than other')
346 'Some engine are faster than other')
347
347
348 upgrademessage = _('revlog content will be recompressed with the new '
348 upgrademessage = _('revlog content will be recompressed with the new '
349 'algorithm.')
349 'algorithm.')
350
350
351 @classmethod
351 @classmethod
352 def fromrepo(cls, repo):
352 def fromrepo(cls, repo):
353 # we allow multiple compression engine requirement to co-exist because
353 # we allow multiple compression engine requirement to co-exist because
354 # strickly speaking, revlog seems to support mixed compression style.
354 # strickly speaking, revlog seems to support mixed compression style.
355 #
355 #
356 # The compression used for new entries will be "the last one"
356 # The compression used for new entries will be "the last one"
357 compression = 'zlib'
357 compression = 'zlib'
358 for req in repo.requirements:
358 for req in repo.requirements:
359 prefix = req.startswith
359 prefix = req.startswith
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
361 compression = req.split('-', 2)[2]
361 compression = req.split('-', 2)[2]
362 return compression
362 return compression
363
363
364 @classmethod
364 @classmethod
365 def fromconfig(cls, repo):
365 def fromconfig(cls, repo):
366 return repo.ui.config('format', 'revlog-compression')
366 return repo.ui.config('format', 'revlog-compression')
367
367
368 @registerformatvariant
368 @registerformatvariant
369 class compressionlevel(formatvariant):
369 class compressionlevel(formatvariant):
370 name = 'compression-level'
370 name = 'compression-level'
371 default = 'default'
371 default = 'default'
372
372
373 description = _('compression level')
373 description = _('compression level')
374
374
375 upgrademessage = _('revlog content will be recompressed')
375 upgrademessage = _('revlog content will be recompressed')
376
376
377 @classmethod
377 @classmethod
378 def fromrepo(cls, repo):
378 def fromrepo(cls, repo):
379 comp = compressionengine.fromrepo(repo)
379 comp = compressionengine.fromrepo(repo)
380 level = None
380 level = None
381 if comp == 'zlib':
381 if comp == 'zlib':
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
383 elif comp == 'zstd':
383 elif comp == 'zstd':
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
385 if level is None:
385 if level is None:
386 return 'default'
386 return 'default'
387 return bytes(level)
387 return bytes(level)
388
388
389 @classmethod
389 @classmethod
390 def fromconfig(cls, repo):
390 def fromconfig(cls, repo):
391 comp = compressionengine.fromconfig(repo)
391 comp = compressionengine.fromconfig(repo)
392 level = None
392 level = None
393 if comp == 'zlib':
393 if comp == 'zlib':
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
395 elif comp == 'zstd':
395 elif comp == 'zstd':
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
397 if level is None:
397 if level is None:
398 return 'default'
398 return 'default'
399 return bytes(level)
399 return bytes(level)
400
400
401 def finddeficiencies(repo):
401 def finddeficiencies(repo):
402 """returns a list of deficiencies that the repo suffer from"""
402 """returns a list of deficiencies that the repo suffer from"""
403 deficiencies = []
403 deficiencies = []
404
404
405 # We could detect lack of revlogv1 and store here, but they were added
405 # We could detect lack of revlogv1 and store here, but they were added
406 # in 0.9.2 and we don't support upgrading repos without these
406 # in 0.9.2 and we don't support upgrading repos without these
407 # requirements, so let's not bother.
407 # requirements, so let's not bother.
408
408
409 for fv in allformatvariant:
409 for fv in allformatvariant:
410 if not fv.fromrepo(repo):
410 if not fv.fromrepo(repo):
411 deficiencies.append(fv)
411 deficiencies.append(fv)
412
412
413 return deficiencies
413 return deficiencies
414
414
415 # search without '-' to support older form on newer client.
415 # search without '-' to support older form on newer client.
416 #
416 #
417 # We don't enforce backward compatibility for debug command so this
417 # We don't enforce backward compatibility for debug command so this
418 # might eventually be dropped. However, having to use two different
418 # might eventually be dropped. However, having to use two different
419 # forms in script when comparing result is anoying enough to add
419 # forms in script when comparing result is anoying enough to add
420 # backward compatibility for a while.
420 # backward compatibility for a while.
421 legacy_opts_map = {
421 legacy_opts_map = {
422 'redeltaparent': 're-delta-parent',
422 'redeltaparent': 're-delta-parent',
423 'redeltamultibase': 're-delta-multibase',
423 'redeltamultibase': 're-delta-multibase',
424 'redeltaall': 're-delta-all',
424 'redeltaall': 're-delta-all',
425 'redeltafulladd': 're-delta-fulladd',
425 'redeltafulladd': 're-delta-fulladd',
426 }
426 }
427
427
428 def findoptimizations(repo):
428 def findoptimizations(repo):
429 """Determine optimisation that could be used during upgrade"""
429 """Determine optimisation that could be used during upgrade"""
430 # These are unconditionally added. There is logic later that figures out
430 # These are unconditionally added. There is logic later that figures out
431 # which ones to apply.
431 # which ones to apply.
432 optimizations = []
432 optimizations = []
433
433
434 optimizations.append(improvement(
434 optimizations.append(improvement(
435 name='re-delta-parent',
435 name='re-delta-parent',
436 type=optimisation,
436 type=optimisation,
437 description=_('deltas within internal storage will be recalculated to '
437 description=_('deltas within internal storage will be recalculated to '
438 'choose an optimal base revision where this was not '
438 'choose an optimal base revision where this was not '
439 'already done; the size of the repository may shrink and '
439 'already done; the size of the repository may shrink and '
440 'various operations may become faster; the first time '
440 'various operations may become faster; the first time '
441 'this optimization is performed could slow down upgrade '
441 'this optimization is performed could slow down upgrade '
442 'execution considerably; subsequent invocations should '
442 'execution considerably; subsequent invocations should '
443 'not run noticeably slower'),
443 'not run noticeably slower'),
444 upgrademessage=_('deltas within internal storage will choose a new '
444 upgrademessage=_('deltas within internal storage will choose a new '
445 'base revision if needed')))
445 'base revision if needed')))
446
446
447 optimizations.append(improvement(
447 optimizations.append(improvement(
448 name='re-delta-multibase',
448 name='re-delta-multibase',
449 type=optimisation,
449 type=optimisation,
450 description=_('deltas within internal storage will be recalculated '
450 description=_('deltas within internal storage will be recalculated '
451 'against multiple base revision and the smallest '
451 'against multiple base revision and the smallest '
452 'difference will be used; the size of the repository may '
452 'difference will be used; the size of the repository may '
453 'shrink significantly when there are many merges; this '
453 'shrink significantly when there are many merges; this '
454 'optimization will slow down execution in proportion to '
454 'optimization will slow down execution in proportion to '
455 'the number of merges in the repository and the amount '
455 'the number of merges in the repository and the amount '
456 'of files in the repository; this slow down should not '
456 'of files in the repository; this slow down should not '
457 'be significant unless there are tens of thousands of '
457 'be significant unless there are tens of thousands of '
458 'files and thousands of merges'),
458 'files and thousands of merges'),
459 upgrademessage=_('deltas within internal storage will choose an '
459 upgrademessage=_('deltas within internal storage will choose an '
460 'optimal delta by computing deltas against multiple '
460 'optimal delta by computing deltas against multiple '
461 'parents; may slow down execution time '
461 'parents; may slow down execution time '
462 'significantly')))
462 'significantly')))
463
463
464 optimizations.append(improvement(
464 optimizations.append(improvement(
465 name='re-delta-all',
465 name='re-delta-all',
466 type=optimisation,
466 type=optimisation,
467 description=_('deltas within internal storage will always be '
467 description=_('deltas within internal storage will always be '
468 'recalculated without reusing prior deltas; this will '
468 'recalculated without reusing prior deltas; this will '
469 'likely make execution run several times slower; this '
469 'likely make execution run several times slower; this '
470 'optimization is typically not needed'),
470 'optimization is typically not needed'),
471 upgrademessage=_('deltas within internal storage will be fully '
471 upgrademessage=_('deltas within internal storage will be fully '
472 'recomputed; this will likely drastically slow down '
472 'recomputed; this will likely drastically slow down '
473 'execution time')))
473 'execution time')))
474
474
475 optimizations.append(improvement(
475 optimizations.append(improvement(
476 name='re-delta-fulladd',
476 name='re-delta-fulladd',
477 type=optimisation,
477 type=optimisation,
478 description=_('every revision will be re-added as if it was new '
478 description=_('every revision will be re-added as if it was new '
479 'content. It will go through the full storage '
479 'content. It will go through the full storage '
480 'mechanism giving extensions a chance to process it '
480 'mechanism giving extensions a chance to process it '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
482 'slower since more logic is involved.'),
482 'slower since more logic is involved.'),
483 upgrademessage=_('each revision will be added as new content to the '
483 upgrademessage=_('each revision will be added as new content to the '
484 'internal storage; this will likely drastically slow '
484 'internal storage; this will likely drastically slow '
485 'down execution time, but some extensions might need '
485 'down execution time, but some extensions might need '
486 'it')))
486 'it')))
487
487
488 return optimizations
488 return optimizations
489
489
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
491 """Determine upgrade actions that will be performed.
491 """Determine upgrade actions that will be performed.
492
492
493 Given a list of improvements as returned by ``finddeficiencies`` and
493 Given a list of improvements as returned by ``finddeficiencies`` and
494 ``findoptimizations``, determine the list of upgrade actions that
494 ``findoptimizations``, determine the list of upgrade actions that
495 will be performed.
495 will be performed.
496
496
497 The role of this function is to filter improvements if needed, apply
497 The role of this function is to filter improvements if needed, apply
498 recommended optimizations from the improvements list that make sense,
498 recommended optimizations from the improvements list that make sense,
499 etc.
499 etc.
500
500
501 Returns a list of action names.
501 Returns a list of action names.
502 """
502 """
503 newactions = []
503 newactions = []
504
504
505 knownreqs = supporteddestrequirements(repo)
505 knownreqs = supporteddestrequirements(repo)
506
506
507 for d in deficiencies:
507 for d in deficiencies:
508 name = d.name
508 name = d.name
509
509
510 # If the action is a requirement that doesn't show up in the
510 # If the action is a requirement that doesn't show up in the
511 # destination requirements, prune the action.
511 # destination requirements, prune the action.
512 if name in knownreqs and name not in destreqs:
512 if name in knownreqs and name not in destreqs:
513 continue
513 continue
514
514
515 newactions.append(d)
515 newactions.append(d)
516
516
517 # FUTURE consider adding some optimizations here for certain transitions.
517 # FUTURE consider adding some optimizations here for certain transitions.
518 # e.g. adding generaldelta could schedule parent redeltas.
518 # e.g. adding generaldelta could schedule parent redeltas.
519
519
520 return newactions
520 return newactions
521
521
522 def _revlogfrompath(repo, path):
522 def _revlogfrompath(repo, path):
523 """Obtain a revlog from a repo path.
523 """Obtain a revlog from a repo path.
524
524
525 An instance of the appropriate class is returned.
525 An instance of the appropriate class is returned.
526 """
526 """
527 if path == '00changelog.i':
527 if path == '00changelog.i':
528 return changelog.changelog(repo.svfs)
528 return changelog.changelog(repo.svfs)
529 elif path.endswith('00manifest.i'):
529 elif path.endswith('00manifest.i'):
530 mandir = path[:-len('00manifest.i')]
530 mandir = path[:-len('00manifest.i')]
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
532 else:
532 else:
533 #reverse of "/".join(("data", path + ".i"))
533 #reverse of "/".join(("data", path + ".i"))
534 return filelog.filelog(repo.svfs, path[5:-2])
534 return filelog.filelog(repo.svfs, path[5:-2])
535
535
536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
537 """copy all relevant files for `oldrl` into `destrepo` store
537 """copy all relevant files for `oldrl` into `destrepo` store
538
538
539 Files are copied "as is" without any transformation. The copy is performed
539 Files are copied "as is" without any transformation. The copy is performed
540 without extra checks. Callers are responsible for making sure the copied
540 without extra checks. Callers are responsible for making sure the copied
541 content is compatible with format of the destination repository.
541 content is compatible with format of the destination repository.
542 """
542 """
543 oldrl = getattr(oldrl, '_revlog', oldrl)
543 oldrl = getattr(oldrl, '_revlog', oldrl)
544 newrl = _revlogfrompath(destrepo, unencodedname)
544 newrl = _revlogfrompath(destrepo, unencodedname)
545 newrl = getattr(newrl, '_revlog', newrl)
545 newrl = getattr(newrl, '_revlog', newrl)
546
546
547 oldvfs = oldrl.opener
547 oldvfs = oldrl.opener
548 newvfs = newrl.opener
548 newvfs = newrl.opener
549 oldindex = oldvfs.join(oldrl.indexfile)
549 oldindex = oldvfs.join(oldrl.indexfile)
550 newindex = newvfs.join(newrl.indexfile)
550 newindex = newvfs.join(newrl.indexfile)
551 olddata = oldvfs.join(oldrl.datafile)
551 olddata = oldvfs.join(oldrl.datafile)
552 newdata = newvfs.join(newrl.datafile)
552 newdata = newvfs.join(newrl.datafile)
553
553
554 newdir = newvfs.dirname(newrl.indexfile)
554 newdir = newvfs.dirname(newrl.indexfile)
555 newvfs.makedirs(newdir)
555 newvfs.makedirs(newdir)
556
556
557 util.copyfile(oldindex, newindex)
557 util.copyfile(oldindex, newindex)
558 if oldrl.opener.exists(olddata):
558 if oldrl.opener.exists(olddata):
559 util.copyfile(olddata, newdata)
559 util.copyfile(olddata, newdata)
560
560
561 if not (unencodedname.endswith('00changelog.i')
561 if not (unencodedname.endswith('00changelog.i')
562 or unencodedname.endswith('00manifest.i')):
562 or unencodedname.endswith('00manifest.i')):
563 destrepo.svfs.fncache.add(unencodedname)
563 destrepo.svfs.fncache.add(unencodedname)
564
564
565 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
565 UPGRADE_CHANGELOG = object()
566 UPGRADE_MANIFEST = object()
567 UPGRADE_FILELOG = object()
568
569 UPGRADE_ALL_REVLOGS = frozenset([UPGRADE_CHANGELOG,
570 UPGRADE_MANIFEST,
571 UPGRADE_FILELOG])
572
573 def matchrevlog(revlogfilter, entry):
574 """check is a revlog is selected for cloning
575
576 The store entry is checked against the passed filter"""
577 if entry.endswith('00changelog.i'):
578 return UPGRADE_CHANGELOG in revlogfilter
579 elif entry.endswith('00manifest.i'):
580 return UPGRADE_MANIFEST in revlogfilter
581 return UPGRADE_FILELOG in revlogfilter
582
583 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents,
584 revlogs=UPGRADE_ALL_REVLOGS):
566 """Copy revlogs between 2 repos."""
585 """Copy revlogs between 2 repos."""
567 revcount = 0
586 revcount = 0
568 srcsize = 0
587 srcsize = 0
569 srcrawsize = 0
588 srcrawsize = 0
570 dstsize = 0
589 dstsize = 0
571 fcount = 0
590 fcount = 0
572 frevcount = 0
591 frevcount = 0
573 fsrcsize = 0
592 fsrcsize = 0
574 frawsize = 0
593 frawsize = 0
575 fdstsize = 0
594 fdstsize = 0
576 mcount = 0
595 mcount = 0
577 mrevcount = 0
596 mrevcount = 0
578 msrcsize = 0
597 msrcsize = 0
579 mrawsize = 0
598 mrawsize = 0
580 mdstsize = 0
599 mdstsize = 0
581 crevcount = 0
600 crevcount = 0
582 csrcsize = 0
601 csrcsize = 0
583 crawsize = 0
602 crawsize = 0
584 cdstsize = 0
603 cdstsize = 0
585
604
586 alldatafiles = list(srcrepo.store.walk())
605 alldatafiles = list(srcrepo.store.walk())
587
606
588 # Perform a pass to collect metadata. This validates we can open all
607 # Perform a pass to collect metadata. This validates we can open all
589 # source files and allows a unified progress bar to be displayed.
608 # source files and allows a unified progress bar to be displayed.
590 for unencoded, encoded, size in alldatafiles:
609 for unencoded, encoded, size in alldatafiles:
591 if unencoded.endswith('.d'):
610 if unencoded.endswith('.d'):
592 continue
611 continue
593
612
594 rl = _revlogfrompath(srcrepo, unencoded)
613 rl = _revlogfrompath(srcrepo, unencoded)
595
614
596 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
615 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
597 trackedsize=True, storedsize=True)
616 trackedsize=True, storedsize=True)
598
617
599 revcount += info['revisionscount'] or 0
618 revcount += info['revisionscount'] or 0
600 datasize = info['storedsize'] or 0
619 datasize = info['storedsize'] or 0
601 rawsize = info['trackedsize'] or 0
620 rawsize = info['trackedsize'] or 0
602
621
603 srcsize += datasize
622 srcsize += datasize
604 srcrawsize += rawsize
623 srcrawsize += rawsize
605
624
606 # This is for the separate progress bars.
625 # This is for the separate progress bars.
607 if isinstance(rl, changelog.changelog):
626 if isinstance(rl, changelog.changelog):
608 crevcount += len(rl)
627 crevcount += len(rl)
609 csrcsize += datasize
628 csrcsize += datasize
610 crawsize += rawsize
629 crawsize += rawsize
611 elif isinstance(rl, manifest.manifestrevlog):
630 elif isinstance(rl, manifest.manifestrevlog):
612 mcount += 1
631 mcount += 1
613 mrevcount += len(rl)
632 mrevcount += len(rl)
614 msrcsize += datasize
633 msrcsize += datasize
615 mrawsize += rawsize
634 mrawsize += rawsize
616 elif isinstance(rl, filelog.filelog):
635 elif isinstance(rl, filelog.filelog):
617 fcount += 1
636 fcount += 1
618 frevcount += len(rl)
637 frevcount += len(rl)
619 fsrcsize += datasize
638 fsrcsize += datasize
620 frawsize += rawsize
639 frawsize += rawsize
621 else:
640 else:
622 error.ProgrammingError('unknown revlog type')
641 error.ProgrammingError('unknown revlog type')
623
642
624 if not revcount:
643 if not revcount:
625 return
644 return
626
645
627 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
646 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
628 '%d in changelog)\n') %
647 '%d in changelog)\n') %
629 (revcount, frevcount, mrevcount, crevcount))
648 (revcount, frevcount, mrevcount, crevcount))
630 ui.write(_('migrating %s in store; %s tracked data\n') % (
649 ui.write(_('migrating %s in store; %s tracked data\n') % (
631 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
650 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
632
651
633 # Used to keep track of progress.
652 # Used to keep track of progress.
634 progress = None
653 progress = None
635 def oncopiedrevision(rl, rev, node):
654 def oncopiedrevision(rl, rev, node):
636 progress.increment()
655 progress.increment()
637
656
638 # Do the actual copying.
657 # Do the actual copying.
639 # FUTURE this operation can be farmed off to worker processes.
658 # FUTURE this operation can be farmed off to worker processes.
640 seen = set()
659 seen = set()
641 for unencoded, encoded, size in alldatafiles:
660 for unencoded, encoded, size in alldatafiles:
642 if unencoded.endswith('.d'):
661 if unencoded.endswith('.d'):
643 continue
662 continue
644
663
645 oldrl = _revlogfrompath(srcrepo, unencoded)
664 oldrl = _revlogfrompath(srcrepo, unencoded)
646 newrl = _revlogfrompath(dstrepo, unencoded)
647
665
648 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
666 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
649 ui.write(_('finished migrating %d manifest revisions across %d '
667 ui.write(_('finished migrating %d manifest revisions across %d '
650 'manifests; change in size: %s\n') %
668 'manifests; change in size: %s\n') %
651 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
669 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
652
670
653 ui.write(_('migrating changelog containing %d revisions '
671 ui.write(_('migrating changelog containing %d revisions '
654 '(%s in store; %s tracked data)\n') %
672 '(%s in store; %s tracked data)\n') %
655 (crevcount, util.bytecount(csrcsize),
673 (crevcount, util.bytecount(csrcsize),
656 util.bytecount(crawsize)))
674 util.bytecount(crawsize)))
657 seen.add('c')
675 seen.add('c')
658 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
676 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
659 total=crevcount)
677 total=crevcount)
660 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
678 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
661 ui.write(_('finished migrating %d filelog revisions across %d '
679 ui.write(_('finished migrating %d filelog revisions across %d '
662 'filelogs; change in size: %s\n') %
680 'filelogs; change in size: %s\n') %
663 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
681 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
664
682
665 ui.write(_('migrating %d manifests containing %d revisions '
683 ui.write(_('migrating %d manifests containing %d revisions '
666 '(%s in store; %s tracked data)\n') %
684 '(%s in store; %s tracked data)\n') %
667 (mcount, mrevcount, util.bytecount(msrcsize),
685 (mcount, mrevcount, util.bytecount(msrcsize),
668 util.bytecount(mrawsize)))
686 util.bytecount(mrawsize)))
669 seen.add('m')
687 seen.add('m')
670 if progress:
688 if progress:
671 progress.complete()
689 progress.complete()
672 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
690 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
673 total=mrevcount)
691 total=mrevcount)
674 elif 'f' not in seen:
692 elif 'f' not in seen:
675 ui.write(_('migrating %d filelogs containing %d revisions '
693 ui.write(_('migrating %d filelogs containing %d revisions '
676 '(%s in store; %s tracked data)\n') %
694 '(%s in store; %s tracked data)\n') %
677 (fcount, frevcount, util.bytecount(fsrcsize),
695 (fcount, frevcount, util.bytecount(fsrcsize),
678 util.bytecount(frawsize)))
696 util.bytecount(frawsize)))
679 seen.add('f')
697 seen.add('f')
680 if progress:
698 if progress:
681 progress.complete()
699 progress.complete()
682 progress = srcrepo.ui.makeprogress(_('file revisions'),
700 progress = srcrepo.ui.makeprogress(_('file revisions'),
683 total=frevcount)
701 total=frevcount)
684
702
685
703 if matchrevlog(revlogs, unencoded):
686 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
704 ui.note(_('cloning %d revisions from %s\n')
705 % (len(oldrl), unencoded))
706 newrl = _revlogfrompath(dstrepo, unencoded)
687 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
707 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
688 deltareuse=deltareuse,
708 deltareuse=deltareuse,
689 forcedeltabothparents=forcedeltabothparents)
709 forcedeltabothparents=forcedeltabothparents)
710 else:
711 msg = _('blindly copying %s containing %i revisions\n')
712 ui.note(msg % (unencoded, len(oldrl)))
713 _copyrevlog(tr, dstrepo, oldrl, unencoded)
714
715 newrl = _revlogfrompath(dstrepo, unencoded)
690
716
691 info = newrl.storageinfo(storedsize=True)
717 info = newrl.storageinfo(storedsize=True)
692 datasize = info['storedsize'] or 0
718 datasize = info['storedsize'] or 0
693
719
694 dstsize += datasize
720 dstsize += datasize
695
721
696 if isinstance(newrl, changelog.changelog):
722 if isinstance(newrl, changelog.changelog):
697 cdstsize += datasize
723 cdstsize += datasize
698 elif isinstance(newrl, manifest.manifestrevlog):
724 elif isinstance(newrl, manifest.manifestrevlog):
699 mdstsize += datasize
725 mdstsize += datasize
700 else:
726 else:
701 fdstsize += datasize
727 fdstsize += datasize
702
728
703 progress.complete()
729 progress.complete()
704
730
705 ui.write(_('finished migrating %d changelog revisions; change in size: '
731 ui.write(_('finished migrating %d changelog revisions; change in size: '
706 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
732 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
707
733
708 ui.write(_('finished migrating %d total revisions; total change in store '
734 ui.write(_('finished migrating %d total revisions; total change in store '
709 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
735 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
710
736
711 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
737 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
712 """Determine whether to copy a store file during upgrade.
738 """Determine whether to copy a store file during upgrade.
713
739
714 This function is called when migrating store files from ``srcrepo`` to
740 This function is called when migrating store files from ``srcrepo`` to
715 ``dstrepo`` as part of upgrading a repository.
741 ``dstrepo`` as part of upgrading a repository.
716
742
717 Args:
743 Args:
718 srcrepo: repo we are copying from
744 srcrepo: repo we are copying from
719 dstrepo: repo we are copying to
745 dstrepo: repo we are copying to
720 requirements: set of requirements for ``dstrepo``
746 requirements: set of requirements for ``dstrepo``
721 path: store file being examined
747 path: store file being examined
722 mode: the ``ST_MODE`` file type of ``path``
748 mode: the ``ST_MODE`` file type of ``path``
723 st: ``stat`` data structure for ``path``
749 st: ``stat`` data structure for ``path``
724
750
725 Function should return ``True`` if the file is to be copied.
751 Function should return ``True`` if the file is to be copied.
726 """
752 """
727 # Skip revlogs.
753 # Skip revlogs.
728 if path.endswith(('.i', '.d')):
754 if path.endswith(('.i', '.d')):
729 return False
755 return False
730 # Skip transaction related files.
756 # Skip transaction related files.
731 if path.startswith('undo'):
757 if path.startswith('undo'):
732 return False
758 return False
733 # Only copy regular files.
759 # Only copy regular files.
734 if mode != stat.S_IFREG:
760 if mode != stat.S_IFREG:
735 return False
761 return False
736 # Skip other skipped files.
762 # Skip other skipped files.
737 if path in ('lock', 'fncache'):
763 if path in ('lock', 'fncache'):
738 return False
764 return False
739
765
740 return True
766 return True
741
767
742 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
768 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
743 """Hook point for extensions to perform additional actions during upgrade.
769 """Hook point for extensions to perform additional actions during upgrade.
744
770
745 This function is called after revlogs and store files have been copied but
771 This function is called after revlogs and store files have been copied but
746 before the new store is swapped into the original location.
772 before the new store is swapped into the original location.
747 """
773 """
748
774
749 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
775 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions,
776 revlogs=UPGRADE_ALL_REVLOGS):
750 """Do the low-level work of upgrading a repository.
777 """Do the low-level work of upgrading a repository.
751
778
752 The upgrade is effectively performed as a copy between a source
779 The upgrade is effectively performed as a copy between a source
753 repository and a temporary destination repository.
780 repository and a temporary destination repository.
754
781
755 The source repository is unmodified for as long as possible so the
782 The source repository is unmodified for as long as possible so the
756 upgrade can abort at any time without causing loss of service for
783 upgrade can abort at any time without causing loss of service for
757 readers and without corrupting the source repository.
784 readers and without corrupting the source repository.
758 """
785 """
759 assert srcrepo.currentwlock()
786 assert srcrepo.currentwlock()
760 assert dstrepo.currentwlock()
787 assert dstrepo.currentwlock()
761
788
762 ui.write(_('(it is safe to interrupt this process any time before '
789 ui.write(_('(it is safe to interrupt this process any time before '
763 'data migration completes)\n'))
790 'data migration completes)\n'))
764
791
765 if 're-delta-all' in actions:
792 if 're-delta-all' in actions:
766 deltareuse = revlog.revlog.DELTAREUSENEVER
793 deltareuse = revlog.revlog.DELTAREUSENEVER
767 elif 're-delta-parent' in actions:
794 elif 're-delta-parent' in actions:
768 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
795 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
769 elif 're-delta-multibase' in actions:
796 elif 're-delta-multibase' in actions:
770 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
797 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
771 elif 're-delta-fulladd' in actions:
798 elif 're-delta-fulladd' in actions:
772 deltareuse = revlog.revlog.DELTAREUSEFULLADD
799 deltareuse = revlog.revlog.DELTAREUSEFULLADD
773 else:
800 else:
774 deltareuse = revlog.revlog.DELTAREUSEALWAYS
801 deltareuse = revlog.revlog.DELTAREUSEALWAYS
775
802
776 with dstrepo.transaction('upgrade') as tr:
803 with dstrepo.transaction('upgrade') as tr:
777 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
804 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
778 're-delta-multibase' in actions)
805 're-delta-multibase' in actions, revlogs=revlogs)
779
806
780 # Now copy other files in the store directory.
807 # Now copy other files in the store directory.
781 # The sorted() makes execution deterministic.
808 # The sorted() makes execution deterministic.
782 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
809 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
783 if not _filterstorefile(srcrepo, dstrepo, requirements,
810 if not _filterstorefile(srcrepo, dstrepo, requirements,
784 p, kind, st):
811 p, kind, st):
785 continue
812 continue
786
813
787 srcrepo.ui.write(_('copying %s\n') % p)
814 srcrepo.ui.write(_('copying %s\n') % p)
788 src = srcrepo.store.rawvfs.join(p)
815 src = srcrepo.store.rawvfs.join(p)
789 dst = dstrepo.store.rawvfs.join(p)
816 dst = dstrepo.store.rawvfs.join(p)
790 util.copyfile(src, dst, copystat=True)
817 util.copyfile(src, dst, copystat=True)
791
818
792 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
819 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
793
820
794 ui.write(_('data fully migrated to temporary repository\n'))
821 ui.write(_('data fully migrated to temporary repository\n'))
795
822
796 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
823 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
797 backupvfs = vfsmod.vfs(backuppath)
824 backupvfs = vfsmod.vfs(backuppath)
798
825
799 # Make a backup of requires file first, as it is the first to be modified.
826 # Make a backup of requires file first, as it is the first to be modified.
800 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
827 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
801
828
802 # We install an arbitrary requirement that clients must not support
829 # We install an arbitrary requirement that clients must not support
803 # as a mechanism to lock out new clients during the data swap. This is
830 # as a mechanism to lock out new clients during the data swap. This is
804 # better than allowing a client to continue while the repository is in
831 # better than allowing a client to continue while the repository is in
805 # an inconsistent state.
832 # an inconsistent state.
806 ui.write(_('marking source repository as being upgraded; clients will be '
833 ui.write(_('marking source repository as being upgraded; clients will be '
807 'unable to read from repository\n'))
834 'unable to read from repository\n'))
808 scmutil.writerequires(srcrepo.vfs,
835 scmutil.writerequires(srcrepo.vfs,
809 srcrepo.requirements | {'upgradeinprogress'})
836 srcrepo.requirements | {'upgradeinprogress'})
810
837
811 ui.write(_('starting in-place swap of repository data\n'))
838 ui.write(_('starting in-place swap of repository data\n'))
812 ui.write(_('replaced files will be backed up at %s\n') %
839 ui.write(_('replaced files will be backed up at %s\n') %
813 backuppath)
840 backuppath)
814
841
815 # Now swap in the new store directory. Doing it as a rename should make
842 # Now swap in the new store directory. Doing it as a rename should make
816 # the operation nearly instantaneous and atomic (at least in well-behaved
843 # the operation nearly instantaneous and atomic (at least in well-behaved
817 # environments).
844 # environments).
818 ui.write(_('replacing store...\n'))
845 ui.write(_('replacing store...\n'))
819 tstart = util.timer()
846 tstart = util.timer()
820 util.rename(srcrepo.spath, backupvfs.join('store'))
847 util.rename(srcrepo.spath, backupvfs.join('store'))
821 util.rename(dstrepo.spath, srcrepo.spath)
848 util.rename(dstrepo.spath, srcrepo.spath)
822 elapsed = util.timer() - tstart
849 elapsed = util.timer() - tstart
823 ui.write(_('store replacement complete; repository was inconsistent for '
850 ui.write(_('store replacement complete; repository was inconsistent for '
824 '%0.1fs\n') % elapsed)
851 '%0.1fs\n') % elapsed)
825
852
826 # We first write the requirements file. Any new requirements will lock
853 # We first write the requirements file. Any new requirements will lock
827 # out legacy clients.
854 # out legacy clients.
828 ui.write(_('finalizing requirements file and making repository readable '
855 ui.write(_('finalizing requirements file and making repository readable '
829 'again\n'))
856 'again\n'))
830 scmutil.writerequires(srcrepo.vfs, requirements)
857 scmutil.writerequires(srcrepo.vfs, requirements)
831
858
832 # The lock file from the old store won't be removed because nothing has a
859 # The lock file from the old store won't be removed because nothing has a
833 # reference to its new location. So clean it up manually. Alternatively, we
860 # reference to its new location. So clean it up manually. Alternatively, we
834 # could update srcrepo.svfs and other variables to point to the new
861 # could update srcrepo.svfs and other variables to point to the new
835 # location. This is simpler.
862 # location. This is simpler.
836 backupvfs.unlink('store/lock')
863 backupvfs.unlink('store/lock')
837
864
838 return backuppath
865 return backuppath
839
866
840 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
867 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
841 """Upgrade a repository in place."""
868 """Upgrade a repository in place."""
842 if optimize is None:
869 if optimize is None:
843 optimize = []
870 optimize = []
844 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
871 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
845 repo = repo.unfiltered()
872 repo = repo.unfiltered()
846
873
847 # Ensure the repository can be upgraded.
874 # Ensure the repository can be upgraded.
848 missingreqs = requiredsourcerequirements(repo) - repo.requirements
875 missingreqs = requiredsourcerequirements(repo) - repo.requirements
849 if missingreqs:
876 if missingreqs:
850 raise error.Abort(_('cannot upgrade repository; requirement '
877 raise error.Abort(_('cannot upgrade repository; requirement '
851 'missing: %s') % _(', ').join(sorted(missingreqs)))
878 'missing: %s') % _(', ').join(sorted(missingreqs)))
852
879
853 blockedreqs = blocksourcerequirements(repo) & repo.requirements
880 blockedreqs = blocksourcerequirements(repo) & repo.requirements
854 if blockedreqs:
881 if blockedreqs:
855 raise error.Abort(_('cannot upgrade repository; unsupported source '
882 raise error.Abort(_('cannot upgrade repository; unsupported source '
856 'requirement: %s') %
883 'requirement: %s') %
857 _(', ').join(sorted(blockedreqs)))
884 _(', ').join(sorted(blockedreqs)))
858
885
859 # FUTURE there is potentially a need to control the wanted requirements via
886 # FUTURE there is potentially a need to control the wanted requirements via
860 # command arguments or via an extension hook point.
887 # command arguments or via an extension hook point.
861 newreqs = localrepo.newreporequirements(
888 newreqs = localrepo.newreporequirements(
862 repo.ui, localrepo.defaultcreateopts(repo.ui))
889 repo.ui, localrepo.defaultcreateopts(repo.ui))
863 newreqs.update(preservedrequirements(repo))
890 newreqs.update(preservedrequirements(repo))
864
891
865 noremovereqs = (repo.requirements - newreqs -
892 noremovereqs = (repo.requirements - newreqs -
866 supportremovedrequirements(repo))
893 supportremovedrequirements(repo))
867 if noremovereqs:
894 if noremovereqs:
868 raise error.Abort(_('cannot upgrade repository; requirement would be '
895 raise error.Abort(_('cannot upgrade repository; requirement would be '
869 'removed: %s') % _(', ').join(sorted(noremovereqs)))
896 'removed: %s') % _(', ').join(sorted(noremovereqs)))
870
897
871 noaddreqs = (newreqs - repo.requirements -
898 noaddreqs = (newreqs - repo.requirements -
872 allowednewrequirements(repo))
899 allowednewrequirements(repo))
873 if noaddreqs:
900 if noaddreqs:
874 raise error.Abort(_('cannot upgrade repository; do not support adding '
901 raise error.Abort(_('cannot upgrade repository; do not support adding '
875 'requirement: %s') %
902 'requirement: %s') %
876 _(', ').join(sorted(noaddreqs)))
903 _(', ').join(sorted(noaddreqs)))
877
904
878 unsupportedreqs = newreqs - supporteddestrequirements(repo)
905 unsupportedreqs = newreqs - supporteddestrequirements(repo)
879 if unsupportedreqs:
906 if unsupportedreqs:
880 raise error.Abort(_('cannot upgrade repository; do not support '
907 raise error.Abort(_('cannot upgrade repository; do not support '
881 'destination requirement: %s') %
908 'destination requirement: %s') %
882 _(', ').join(sorted(unsupportedreqs)))
909 _(', ').join(sorted(unsupportedreqs)))
883
910
884 # Find and validate all improvements that can be made.
911 # Find and validate all improvements that can be made.
885 alloptimizations = findoptimizations(repo)
912 alloptimizations = findoptimizations(repo)
886
913
887 # Apply and Validate arguments.
914 # Apply and Validate arguments.
888 optimizations = []
915 optimizations = []
889 for o in alloptimizations:
916 for o in alloptimizations:
890 if o.name in optimize:
917 if o.name in optimize:
891 optimizations.append(o)
918 optimizations.append(o)
892 optimize.discard(o.name)
919 optimize.discard(o.name)
893
920
894 if optimize: # anything left is unknown
921 if optimize: # anything left is unknown
895 raise error.Abort(_('unknown optimization action requested: %s') %
922 raise error.Abort(_('unknown optimization action requested: %s') %
896 ', '.join(sorted(optimize)),
923 ', '.join(sorted(optimize)),
897 hint=_('run without arguments to see valid '
924 hint=_('run without arguments to see valid '
898 'optimizations'))
925 'optimizations'))
899
926
900 deficiencies = finddeficiencies(repo)
927 deficiencies = finddeficiencies(repo)
901 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
928 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
902 actions.extend(o for o in sorted(optimizations)
929 actions.extend(o for o in sorted(optimizations)
903 # determineactions could have added optimisation
930 # determineactions could have added optimisation
904 if o not in actions)
931 if o not in actions)
905
932
906 def printrequirements():
933 def printrequirements():
907 ui.write(_('requirements\n'))
934 ui.write(_('requirements\n'))
908 ui.write(_(' preserved: %s\n') %
935 ui.write(_(' preserved: %s\n') %
909 _(', ').join(sorted(newreqs & repo.requirements)))
936 _(', ').join(sorted(newreqs & repo.requirements)))
910
937
911 if repo.requirements - newreqs:
938 if repo.requirements - newreqs:
912 ui.write(_(' removed: %s\n') %
939 ui.write(_(' removed: %s\n') %
913 _(', ').join(sorted(repo.requirements - newreqs)))
940 _(', ').join(sorted(repo.requirements - newreqs)))
914
941
915 if newreqs - repo.requirements:
942 if newreqs - repo.requirements:
916 ui.write(_(' added: %s\n') %
943 ui.write(_(' added: %s\n') %
917 _(', ').join(sorted(newreqs - repo.requirements)))
944 _(', ').join(sorted(newreqs - repo.requirements)))
918
945
919 ui.write('\n')
946 ui.write('\n')
920
947
921 def printupgradeactions():
948 def printupgradeactions():
922 for a in actions:
949 for a in actions:
923 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
950 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
924
951
925 if not run:
952 if not run:
926 fromconfig = []
953 fromconfig = []
927 onlydefault = []
954 onlydefault = []
928
955
929 for d in deficiencies:
956 for d in deficiencies:
930 if d.fromconfig(repo):
957 if d.fromconfig(repo):
931 fromconfig.append(d)
958 fromconfig.append(d)
932 elif d.default:
959 elif d.default:
933 onlydefault.append(d)
960 onlydefault.append(d)
934
961
935 if fromconfig or onlydefault:
962 if fromconfig or onlydefault:
936
963
937 if fromconfig:
964 if fromconfig:
938 ui.write(_('repository lacks features recommended by '
965 ui.write(_('repository lacks features recommended by '
939 'current config options:\n\n'))
966 'current config options:\n\n'))
940 for i in fromconfig:
967 for i in fromconfig:
941 ui.write('%s\n %s\n\n' % (i.name, i.description))
968 ui.write('%s\n %s\n\n' % (i.name, i.description))
942
969
943 if onlydefault:
970 if onlydefault:
944 ui.write(_('repository lacks features used by the default '
971 ui.write(_('repository lacks features used by the default '
945 'config options:\n\n'))
972 'config options:\n\n'))
946 for i in onlydefault:
973 for i in onlydefault:
947 ui.write('%s\n %s\n\n' % (i.name, i.description))
974 ui.write('%s\n %s\n\n' % (i.name, i.description))
948
975
949 ui.write('\n')
976 ui.write('\n')
950 else:
977 else:
951 ui.write(_('(no feature deficiencies found in existing '
978 ui.write(_('(no feature deficiencies found in existing '
952 'repository)\n'))
979 'repository)\n'))
953
980
954 ui.write(_('performing an upgrade with "--run" will make the following '
981 ui.write(_('performing an upgrade with "--run" will make the following '
955 'changes:\n\n'))
982 'changes:\n\n'))
956
983
957 printrequirements()
984 printrequirements()
958 printupgradeactions()
985 printupgradeactions()
959
986
960 unusedoptimize = [i for i in alloptimizations if i not in actions]
987 unusedoptimize = [i for i in alloptimizations if i not in actions]
961
988
962 if unusedoptimize:
989 if unusedoptimize:
963 ui.write(_('additional optimizations are available by specifying '
990 ui.write(_('additional optimizations are available by specifying '
964 '"--optimize <name>":\n\n'))
991 '"--optimize <name>":\n\n'))
965 for i in unusedoptimize:
992 for i in unusedoptimize:
966 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
993 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
967 return
994 return
968
995
969 # Else we're in the run=true case.
996 # Else we're in the run=true case.
970 ui.write(_('upgrade will perform the following actions:\n\n'))
997 ui.write(_('upgrade will perform the following actions:\n\n'))
971 printrequirements()
998 printrequirements()
972 printupgradeactions()
999 printupgradeactions()
973
1000
974 upgradeactions = [a.name for a in actions]
1001 upgradeactions = [a.name for a in actions]
975
1002
976 ui.write(_('beginning upgrade...\n'))
1003 ui.write(_('beginning upgrade...\n'))
977 with repo.wlock(), repo.lock():
1004 with repo.wlock(), repo.lock():
978 ui.write(_('repository locked and read-only\n'))
1005 ui.write(_('repository locked and read-only\n'))
979 # Our strategy for upgrading the repository is to create a new,
1006 # Our strategy for upgrading the repository is to create a new,
980 # temporary repository, write data to it, then do a swap of the
1007 # temporary repository, write data to it, then do a swap of the
981 # data. There are less heavyweight ways to do this, but it is easier
1008 # data. There are less heavyweight ways to do this, but it is easier
982 # to create a new repo object than to instantiate all the components
1009 # to create a new repo object than to instantiate all the components
983 # (like the store) separately.
1010 # (like the store) separately.
984 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
1011 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
985 backuppath = None
1012 backuppath = None
986 try:
1013 try:
987 ui.write(_('creating temporary repository to stage migrated '
1014 ui.write(_('creating temporary repository to stage migrated '
988 'data: %s\n') % tmppath)
1015 'data: %s\n') % tmppath)
989
1016
990 # clone ui without using ui.copy because repo.ui is protected
1017 # clone ui without using ui.copy because repo.ui is protected
991 repoui = repo.ui.__class__(repo.ui)
1018 repoui = repo.ui.__class__(repo.ui)
992 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1019 dstrepo = hg.repository(repoui, path=tmppath, create=True)
993
1020
994 with dstrepo.wlock(), dstrepo.lock():
1021 with dstrepo.wlock(), dstrepo.lock():
995 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1022 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
996 upgradeactions)
1023 upgradeactions)
997 if not (backup or backuppath is None):
1024 if not (backup or backuppath is None):
998 ui.write(_('removing old repository content%s\n') % backuppath)
1025 ui.write(_('removing old repository content%s\n') % backuppath)
999 repo.vfs.rmtree(backuppath, forcibly=True)
1026 repo.vfs.rmtree(backuppath, forcibly=True)
1000 backuppath = None
1027 backuppath = None
1001
1028
1002 finally:
1029 finally:
1003 ui.write(_('removing temporary repository %s\n') % tmppath)
1030 ui.write(_('removing temporary repository %s\n') % tmppath)
1004 repo.vfs.rmtree(tmppath, forcibly=True)
1031 repo.vfs.rmtree(tmppath, forcibly=True)
1005
1032
1006 if backuppath:
1033 if backuppath:
1007 ui.warn(_('copy of old repository backed up at %s\n') %
1034 ui.warn(_('copy of old repository backed up at %s\n') %
1008 backuppath)
1035 backuppath)
1009 ui.warn(_('the old repository will not be deleted; remove '
1036 ui.warn(_('the old repository will not be deleted; remove '
1010 'it to free up disk space once the upgraded '
1037 'it to free up disk space once the upgraded '
1011 'repository is verified\n'))
1038 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now