##// END OF EJS Templates
upgrade: introduce a _copyrevlog method...
marmoute -
r42918:5535a220 default
parent child Browse files
Show More
@@ -1,982 +1,1011
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 from .utils import (
27 from .utils import (
28 compression,
28 compression,
29 )
29 )
30
30
31 def requiredsourcerequirements(repo):
31 def requiredsourcerequirements(repo):
32 """Obtain requirements required to be present to upgrade a repo.
32 """Obtain requirements required to be present to upgrade a repo.
33
33
34 An upgrade will not be allowed if the repository doesn't have the
34 An upgrade will not be allowed if the repository doesn't have the
35 requirements returned by this function.
35 requirements returned by this function.
36 """
36 """
37 return {
37 return {
38 # Introduced in Mercurial 0.9.2.
38 # Introduced in Mercurial 0.9.2.
39 'revlogv1',
39 'revlogv1',
40 # Introduced in Mercurial 0.9.2.
40 # Introduced in Mercurial 0.9.2.
41 'store',
41 'store',
42 }
42 }
43
43
44 def blocksourcerequirements(repo):
44 def blocksourcerequirements(repo):
45 """Obtain requirements that will prevent an upgrade from occurring.
45 """Obtain requirements that will prevent an upgrade from occurring.
46
46
47 An upgrade cannot be performed if the source repository contains a
47 An upgrade cannot be performed if the source repository contains a
48 requirements in the returned set.
48 requirements in the returned set.
49 """
49 """
50 return {
50 return {
51 # The upgrade code does not yet support these experimental features.
51 # The upgrade code does not yet support these experimental features.
52 # This is an artificial limitation.
52 # This is an artificial limitation.
53 'treemanifest',
53 'treemanifest',
54 # This was a precursor to generaldelta and was never enabled by default.
54 # This was a precursor to generaldelta and was never enabled by default.
55 # It should (hopefully) not exist in the wild.
55 # It should (hopefully) not exist in the wild.
56 'parentdelta',
56 'parentdelta',
57 # Upgrade should operate on the actual store, not the shared link.
57 # Upgrade should operate on the actual store, not the shared link.
58 'shared',
58 'shared',
59 }
59 }
60
60
61 def supportremovedrequirements(repo):
61 def supportremovedrequirements(repo):
62 """Obtain requirements that can be removed during an upgrade.
62 """Obtain requirements that can be removed during an upgrade.
63
63
64 If an upgrade were to create a repository that dropped a requirement,
64 If an upgrade were to create a repository that dropped a requirement,
65 the dropped requirement must appear in the returned set for the upgrade
65 the dropped requirement must appear in the returned set for the upgrade
66 to be allowed.
66 to be allowed.
67 """
67 """
68 supported = {
68 supported = {
69 localrepo.SPARSEREVLOG_REQUIREMENT,
69 localrepo.SPARSEREVLOG_REQUIREMENT,
70 }
70 }
71 for name in compression.compengines:
71 for name in compression.compengines:
72 engine = compression.compengines[name]
72 engine = compression.compengines[name]
73 if engine.available() and engine.revlogheader():
73 if engine.available() and engine.revlogheader():
74 supported.add(b'exp-compression-%s' % name)
74 supported.add(b'exp-compression-%s' % name)
75 if engine.name() == 'zstd':
75 if engine.name() == 'zstd':
76 supported.add(b'revlog-compression-zstd')
76 supported.add(b'revlog-compression-zstd')
77 return supported
77 return supported
78
78
79 def supporteddestrequirements(repo):
79 def supporteddestrequirements(repo):
80 """Obtain requirements that upgrade supports in the destination.
80 """Obtain requirements that upgrade supports in the destination.
81
81
82 If the result of the upgrade would create requirements not in this set,
82 If the result of the upgrade would create requirements not in this set,
83 the upgrade is disallowed.
83 the upgrade is disallowed.
84
84
85 Extensions should monkeypatch this to add their custom requirements.
85 Extensions should monkeypatch this to add their custom requirements.
86 """
86 """
87 supported = {
87 supported = {
88 'dotencode',
88 'dotencode',
89 'fncache',
89 'fncache',
90 'generaldelta',
90 'generaldelta',
91 'revlogv1',
91 'revlogv1',
92 'store',
92 'store',
93 localrepo.SPARSEREVLOG_REQUIREMENT,
93 localrepo.SPARSEREVLOG_REQUIREMENT,
94 }
94 }
95 for name in compression.compengines:
95 for name in compression.compengines:
96 engine = compression.compengines[name]
96 engine = compression.compengines[name]
97 if engine.available() and engine.revlogheader():
97 if engine.available() and engine.revlogheader():
98 supported.add(b'exp-compression-%s' % name)
98 supported.add(b'exp-compression-%s' % name)
99 if engine.name() == 'zstd':
99 if engine.name() == 'zstd':
100 supported.add(b'revlog-compression-zstd')
100 supported.add(b'revlog-compression-zstd')
101 return supported
101 return supported
102
102
103 def allowednewrequirements(repo):
103 def allowednewrequirements(repo):
104 """Obtain requirements that can be added to a repository during upgrade.
104 """Obtain requirements that can be added to a repository during upgrade.
105
105
106 This is used to disallow proposed requirements from being added when
106 This is used to disallow proposed requirements from being added when
107 they weren't present before.
107 they weren't present before.
108
108
109 We use a list of allowed requirement additions instead of a list of known
109 We use a list of allowed requirement additions instead of a list of known
110 bad additions because the whitelist approach is safer and will prevent
110 bad additions because the whitelist approach is safer and will prevent
111 future, unknown requirements from accidentally being added.
111 future, unknown requirements from accidentally being added.
112 """
112 """
113 supported = {
113 supported = {
114 'dotencode',
114 'dotencode',
115 'fncache',
115 'fncache',
116 'generaldelta',
116 'generaldelta',
117 localrepo.SPARSEREVLOG_REQUIREMENT,
117 localrepo.SPARSEREVLOG_REQUIREMENT,
118 }
118 }
119 for name in compression.compengines:
119 for name in compression.compengines:
120 engine = compression.compengines[name]
120 engine = compression.compengines[name]
121 if engine.available() and engine.revlogheader():
121 if engine.available() and engine.revlogheader():
122 supported.add(b'exp-compression-%s' % name)
122 supported.add(b'exp-compression-%s' % name)
123 if engine.name() == 'zstd':
123 if engine.name() == 'zstd':
124 supported.add(b'revlog-compression-zstd')
124 supported.add(b'revlog-compression-zstd')
125 return supported
125 return supported
126
126
127 def preservedrequirements(repo):
127 def preservedrequirements(repo):
128 return set()
128 return set()
129
129
130 deficiency = 'deficiency'
130 deficiency = 'deficiency'
131 optimisation = 'optimization'
131 optimisation = 'optimization'
132
132
133 class improvement(object):
133 class improvement(object):
134 """Represents an improvement that can be made as part of an upgrade.
134 """Represents an improvement that can be made as part of an upgrade.
135
135
136 The following attributes are defined on each instance:
136 The following attributes are defined on each instance:
137
137
138 name
138 name
139 Machine-readable string uniquely identifying this improvement. It
139 Machine-readable string uniquely identifying this improvement. It
140 will be mapped to an action later in the upgrade process.
140 will be mapped to an action later in the upgrade process.
141
141
142 type
142 type
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
144 problem. An optimization is an action (sometimes optional) that
144 problem. An optimization is an action (sometimes optional) that
145 can be taken to further improve the state of the repository.
145 can be taken to further improve the state of the repository.
146
146
147 description
147 description
148 Message intended for humans explaining the improvement in more detail,
148 Message intended for humans explaining the improvement in more detail,
149 including the implications of it. For ``deficiency`` types, should be
149 including the implications of it. For ``deficiency`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
151 worded in the future tense.
151 worded in the future tense.
152
152
153 upgrademessage
153 upgrademessage
154 Message intended for humans explaining what an upgrade addressing this
154 Message intended for humans explaining what an upgrade addressing this
155 issue will do. Should be worded in the future tense.
155 issue will do. Should be worded in the future tense.
156 """
156 """
157 def __init__(self, name, type, description, upgrademessage):
157 def __init__(self, name, type, description, upgrademessage):
158 self.name = name
158 self.name = name
159 self.type = type
159 self.type = type
160 self.description = description
160 self.description = description
161 self.upgrademessage = upgrademessage
161 self.upgrademessage = upgrademessage
162
162
163 def __eq__(self, other):
163 def __eq__(self, other):
164 if not isinstance(other, improvement):
164 if not isinstance(other, improvement):
165 # This is what python tell use to do
165 # This is what python tell use to do
166 return NotImplemented
166 return NotImplemented
167 return self.name == other.name
167 return self.name == other.name
168
168
169 def __ne__(self, other):
169 def __ne__(self, other):
170 return not (self == other)
170 return not (self == other)
171
171
172 def __hash__(self):
172 def __hash__(self):
173 return hash(self.name)
173 return hash(self.name)
174
174
175 allformatvariant = []
175 allformatvariant = []
176
176
177 def registerformatvariant(cls):
177 def registerformatvariant(cls):
178 allformatvariant.append(cls)
178 allformatvariant.append(cls)
179 return cls
179 return cls
180
180
181 class formatvariant(improvement):
181 class formatvariant(improvement):
182 """an improvement subclass dedicated to repository format"""
182 """an improvement subclass dedicated to repository format"""
183 type = deficiency
183 type = deficiency
184 ### The following attributes should be defined for each class:
184 ### The following attributes should be defined for each class:
185
185
186 # machine-readable string uniquely identifying this improvement. it will be
186 # machine-readable string uniquely identifying this improvement. it will be
187 # mapped to an action later in the upgrade process.
187 # mapped to an action later in the upgrade process.
188 name = None
188 name = None
189
189
190 # message intended for humans explaining the improvement in more detail,
190 # message intended for humans explaining the improvement in more detail,
191 # including the implications of it ``deficiency`` types, should be worded
191 # including the implications of it ``deficiency`` types, should be worded
192 # in the present tense.
192 # in the present tense.
193 description = None
193 description = None
194
194
195 # message intended for humans explaining what an upgrade addressing this
195 # message intended for humans explaining what an upgrade addressing this
196 # issue will do. should be worded in the future tense.
196 # issue will do. should be worded in the future tense.
197 upgrademessage = None
197 upgrademessage = None
198
198
199 # value of current Mercurial default for new repository
199 # value of current Mercurial default for new repository
200 default = None
200 default = None
201
201
202 def __init__(self):
202 def __init__(self):
203 raise NotImplementedError()
203 raise NotImplementedError()
204
204
205 @staticmethod
205 @staticmethod
206 def fromrepo(repo):
206 def fromrepo(repo):
207 """current value of the variant in the repository"""
207 """current value of the variant in the repository"""
208 raise NotImplementedError()
208 raise NotImplementedError()
209
209
210 @staticmethod
210 @staticmethod
211 def fromconfig(repo):
211 def fromconfig(repo):
212 """current value of the variant in the configuration"""
212 """current value of the variant in the configuration"""
213 raise NotImplementedError()
213 raise NotImplementedError()
214
214
215 class requirementformatvariant(formatvariant):
215 class requirementformatvariant(formatvariant):
216 """formatvariant based on a 'requirement' name.
216 """formatvariant based on a 'requirement' name.
217
217
218 Many format variant are controlled by a 'requirement'. We define a small
218 Many format variant are controlled by a 'requirement'. We define a small
219 subclass to factor the code.
219 subclass to factor the code.
220 """
220 """
221
221
222 # the requirement that control this format variant
222 # the requirement that control this format variant
223 _requirement = None
223 _requirement = None
224
224
225 @staticmethod
225 @staticmethod
226 def _newreporequirements(ui):
226 def _newreporequirements(ui):
227 return localrepo.newreporequirements(
227 return localrepo.newreporequirements(
228 ui, localrepo.defaultcreateopts(ui))
228 ui, localrepo.defaultcreateopts(ui))
229
229
230 @classmethod
230 @classmethod
231 def fromrepo(cls, repo):
231 def fromrepo(cls, repo):
232 assert cls._requirement is not None
232 assert cls._requirement is not None
233 return cls._requirement in repo.requirements
233 return cls._requirement in repo.requirements
234
234
235 @classmethod
235 @classmethod
236 def fromconfig(cls, repo):
236 def fromconfig(cls, repo):
237 assert cls._requirement is not None
237 assert cls._requirement is not None
238 return cls._requirement in cls._newreporequirements(repo.ui)
238 return cls._requirement in cls._newreporequirements(repo.ui)
239
239
240 @registerformatvariant
240 @registerformatvariant
241 class fncache(requirementformatvariant):
241 class fncache(requirementformatvariant):
242 name = 'fncache'
242 name = 'fncache'
243
243
244 _requirement = 'fncache'
244 _requirement = 'fncache'
245
245
246 default = True
246 default = True
247
247
248 description = _('long and reserved filenames may not work correctly; '
248 description = _('long and reserved filenames may not work correctly; '
249 'repository performance is sub-optimal')
249 'repository performance is sub-optimal')
250
250
251 upgrademessage = _('repository will be more resilient to storing '
251 upgrademessage = _('repository will be more resilient to storing '
252 'certain paths and performance of certain '
252 'certain paths and performance of certain '
253 'operations should be improved')
253 'operations should be improved')
254
254
255 @registerformatvariant
255 @registerformatvariant
256 class dotencode(requirementformatvariant):
256 class dotencode(requirementformatvariant):
257 name = 'dotencode'
257 name = 'dotencode'
258
258
259 _requirement = 'dotencode'
259 _requirement = 'dotencode'
260
260
261 default = True
261 default = True
262
262
263 description = _('storage of filenames beginning with a period or '
263 description = _('storage of filenames beginning with a period or '
264 'space may not work correctly')
264 'space may not work correctly')
265
265
266 upgrademessage = _('repository will be better able to store files '
266 upgrademessage = _('repository will be better able to store files '
267 'beginning with a space or period')
267 'beginning with a space or period')
268
268
269 @registerformatvariant
269 @registerformatvariant
270 class generaldelta(requirementformatvariant):
270 class generaldelta(requirementformatvariant):
271 name = 'generaldelta'
271 name = 'generaldelta'
272
272
273 _requirement = 'generaldelta'
273 _requirement = 'generaldelta'
274
274
275 default = True
275 default = True
276
276
277 description = _('deltas within internal storage are unable to '
277 description = _('deltas within internal storage are unable to '
278 'choose optimal revisions; repository is larger and '
278 'choose optimal revisions; repository is larger and '
279 'slower than it could be; interaction with other '
279 'slower than it could be; interaction with other '
280 'repositories may require extra network and CPU '
280 'repositories may require extra network and CPU '
281 'resources, making "hg push" and "hg pull" slower')
281 'resources, making "hg push" and "hg pull" slower')
282
282
283 upgrademessage = _('repository storage will be able to create '
283 upgrademessage = _('repository storage will be able to create '
284 'optimal deltas; new repository data will be '
284 'optimal deltas; new repository data will be '
285 'smaller and read times should decrease; '
285 'smaller and read times should decrease; '
286 'interacting with other repositories using this '
286 'interacting with other repositories using this '
287 'storage model should require less network and '
287 'storage model should require less network and '
288 'CPU resources, making "hg push" and "hg pull" '
288 'CPU resources, making "hg push" and "hg pull" '
289 'faster')
289 'faster')
290
290
291 @registerformatvariant
291 @registerformatvariant
292 class sparserevlog(requirementformatvariant):
292 class sparserevlog(requirementformatvariant):
293 name = 'sparserevlog'
293 name = 'sparserevlog'
294
294
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
296
296
297 default = True
297 default = True
298
298
299 description = _('in order to limit disk reading and memory usage on older '
299 description = _('in order to limit disk reading and memory usage on older '
300 'version, the span of a delta chain from its root to its '
300 'version, the span of a delta chain from its root to its '
301 'end is limited, whatever the relevant data in this span. '
301 'end is limited, whatever the relevant data in this span. '
302 'This can severly limit Mercurial ability to build good '
302 'This can severly limit Mercurial ability to build good '
303 'chain of delta resulting is much more storage space being '
303 'chain of delta resulting is much more storage space being '
304 'taken and limit reusability of on disk delta during '
304 'taken and limit reusability of on disk delta during '
305 'exchange.'
305 'exchange.'
306 )
306 )
307
307
308 upgrademessage = _('Revlog supports delta chain with more unused data '
308 upgrademessage = _('Revlog supports delta chain with more unused data '
309 'between payload. These gaps will be skipped at read '
309 'between payload. These gaps will be skipped at read '
310 'time. This allows for better delta chains, making a '
310 'time. This allows for better delta chains, making a '
311 'better compression and faster exchange with server.')
311 'better compression and faster exchange with server.')
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class removecldeltachain(formatvariant):
314 class removecldeltachain(formatvariant):
315 name = 'plain-cl-delta'
315 name = 'plain-cl-delta'
316
316
317 default = True
317 default = True
318
318
319 description = _('changelog storage is using deltas instead of '
319 description = _('changelog storage is using deltas instead of '
320 'raw entries; changelog reading and any '
320 'raw entries; changelog reading and any '
321 'operation relying on changelog data are slower '
321 'operation relying on changelog data are slower '
322 'than they could be')
322 'than they could be')
323
323
324 upgrademessage = _('changelog storage will be reformated to '
324 upgrademessage = _('changelog storage will be reformated to '
325 'store raw entries; changelog reading will be '
325 'store raw entries; changelog reading will be '
326 'faster; changelog size may be reduced')
326 'faster; changelog size may be reduced')
327
327
328 @staticmethod
328 @staticmethod
329 def fromrepo(repo):
329 def fromrepo(repo):
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
331 # changelogs with deltas.
331 # changelogs with deltas.
332 cl = repo.changelog
332 cl = repo.changelog
333 chainbase = cl.chainbase
333 chainbase = cl.chainbase
334 return all(rev == chainbase(rev) for rev in cl)
334 return all(rev == chainbase(rev) for rev in cl)
335
335
336 @staticmethod
336 @staticmethod
337 def fromconfig(repo):
337 def fromconfig(repo):
338 return True
338 return True
339
339
340 @registerformatvariant
340 @registerformatvariant
341 class compressionengine(formatvariant):
341 class compressionengine(formatvariant):
342 name = 'compression'
342 name = 'compression'
343 default = 'zlib'
343 default = 'zlib'
344
344
345 description = _('Compresion algorithm used to compress data. '
345 description = _('Compresion algorithm used to compress data. '
346 'Some engine are faster than other')
346 'Some engine are faster than other')
347
347
348 upgrademessage = _('revlog content will be recompressed with the new '
348 upgrademessage = _('revlog content will be recompressed with the new '
349 'algorithm.')
349 'algorithm.')
350
350
351 @classmethod
351 @classmethod
352 def fromrepo(cls, repo):
352 def fromrepo(cls, repo):
353 # we allow multiple compression engine requirement to co-exist because
353 # we allow multiple compression engine requirement to co-exist because
354 # strickly speaking, revlog seems to support mixed compression style.
354 # strickly speaking, revlog seems to support mixed compression style.
355 #
355 #
356 # The compression used for new entries will be "the last one"
356 # The compression used for new entries will be "the last one"
357 compression = 'zlib'
357 compression = 'zlib'
358 for req in repo.requirements:
358 for req in repo.requirements:
359 prefix = req.startswith
359 prefix = req.startswith
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
361 compression = req.split('-', 2)[2]
361 compression = req.split('-', 2)[2]
362 return compression
362 return compression
363
363
364 @classmethod
364 @classmethod
365 def fromconfig(cls, repo):
365 def fromconfig(cls, repo):
366 return repo.ui.config('format', 'revlog-compression')
366 return repo.ui.config('format', 'revlog-compression')
367
367
368 @registerformatvariant
368 @registerformatvariant
369 class compressionlevel(formatvariant):
369 class compressionlevel(formatvariant):
370 name = 'compression-level'
370 name = 'compression-level'
371 default = 'default'
371 default = 'default'
372
372
373 description = _('compression level')
373 description = _('compression level')
374
374
375 upgrademessage = _('revlog content will be recompressed')
375 upgrademessage = _('revlog content will be recompressed')
376
376
377 @classmethod
377 @classmethod
378 def fromrepo(cls, repo):
378 def fromrepo(cls, repo):
379 comp = compressionengine.fromrepo(repo)
379 comp = compressionengine.fromrepo(repo)
380 level = None
380 level = None
381 if comp == 'zlib':
381 if comp == 'zlib':
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
383 elif comp == 'zstd':
383 elif comp == 'zstd':
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
385 if level is None:
385 if level is None:
386 return 'default'
386 return 'default'
387 return bytes(level)
387 return bytes(level)
388
388
389 @classmethod
389 @classmethod
390 def fromconfig(cls, repo):
390 def fromconfig(cls, repo):
391 comp = compressionengine.fromconfig(repo)
391 comp = compressionengine.fromconfig(repo)
392 level = None
392 level = None
393 if comp == 'zlib':
393 if comp == 'zlib':
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
395 elif comp == 'zstd':
395 elif comp == 'zstd':
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
397 if level is None:
397 if level is None:
398 return 'default'
398 return 'default'
399 return bytes(level)
399 return bytes(level)
400
400
401 def finddeficiencies(repo):
401 def finddeficiencies(repo):
402 """returns a list of deficiencies that the repo suffer from"""
402 """returns a list of deficiencies that the repo suffer from"""
403 deficiencies = []
403 deficiencies = []
404
404
405 # We could detect lack of revlogv1 and store here, but they were added
405 # We could detect lack of revlogv1 and store here, but they were added
406 # in 0.9.2 and we don't support upgrading repos without these
406 # in 0.9.2 and we don't support upgrading repos without these
407 # requirements, so let's not bother.
407 # requirements, so let's not bother.
408
408
409 for fv in allformatvariant:
409 for fv in allformatvariant:
410 if not fv.fromrepo(repo):
410 if not fv.fromrepo(repo):
411 deficiencies.append(fv)
411 deficiencies.append(fv)
412
412
413 return deficiencies
413 return deficiencies
414
414
415 # search without '-' to support older form on newer client.
415 # search without '-' to support older form on newer client.
416 #
416 #
417 # We don't enforce backward compatibility for debug command so this
417 # We don't enforce backward compatibility for debug command so this
418 # might eventually be dropped. However, having to use two different
418 # might eventually be dropped. However, having to use two different
419 # forms in script when comparing result is anoying enough to add
419 # forms in script when comparing result is anoying enough to add
420 # backward compatibility for a while.
420 # backward compatibility for a while.
421 legacy_opts_map = {
421 legacy_opts_map = {
422 'redeltaparent': 're-delta-parent',
422 'redeltaparent': 're-delta-parent',
423 'redeltamultibase': 're-delta-multibase',
423 'redeltamultibase': 're-delta-multibase',
424 'redeltaall': 're-delta-all',
424 'redeltaall': 're-delta-all',
425 'redeltafulladd': 're-delta-fulladd',
425 'redeltafulladd': 're-delta-fulladd',
426 }
426 }
427
427
428 def findoptimizations(repo):
428 def findoptimizations(repo):
429 """Determine optimisation that could be used during upgrade"""
429 """Determine optimisation that could be used during upgrade"""
430 # These are unconditionally added. There is logic later that figures out
430 # These are unconditionally added. There is logic later that figures out
431 # which ones to apply.
431 # which ones to apply.
432 optimizations = []
432 optimizations = []
433
433
434 optimizations.append(improvement(
434 optimizations.append(improvement(
435 name='re-delta-parent',
435 name='re-delta-parent',
436 type=optimisation,
436 type=optimisation,
437 description=_('deltas within internal storage will be recalculated to '
437 description=_('deltas within internal storage will be recalculated to '
438 'choose an optimal base revision where this was not '
438 'choose an optimal base revision where this was not '
439 'already done; the size of the repository may shrink and '
439 'already done; the size of the repository may shrink and '
440 'various operations may become faster; the first time '
440 'various operations may become faster; the first time '
441 'this optimization is performed could slow down upgrade '
441 'this optimization is performed could slow down upgrade '
442 'execution considerably; subsequent invocations should '
442 'execution considerably; subsequent invocations should '
443 'not run noticeably slower'),
443 'not run noticeably slower'),
444 upgrademessage=_('deltas within internal storage will choose a new '
444 upgrademessage=_('deltas within internal storage will choose a new '
445 'base revision if needed')))
445 'base revision if needed')))
446
446
447 optimizations.append(improvement(
447 optimizations.append(improvement(
448 name='re-delta-multibase',
448 name='re-delta-multibase',
449 type=optimisation,
449 type=optimisation,
450 description=_('deltas within internal storage will be recalculated '
450 description=_('deltas within internal storage will be recalculated '
451 'against multiple base revision and the smallest '
451 'against multiple base revision and the smallest '
452 'difference will be used; the size of the repository may '
452 'difference will be used; the size of the repository may '
453 'shrink significantly when there are many merges; this '
453 'shrink significantly when there are many merges; this '
454 'optimization will slow down execution in proportion to '
454 'optimization will slow down execution in proportion to '
455 'the number of merges in the repository and the amount '
455 'the number of merges in the repository and the amount '
456 'of files in the repository; this slow down should not '
456 'of files in the repository; this slow down should not '
457 'be significant unless there are tens of thousands of '
457 'be significant unless there are tens of thousands of '
458 'files and thousands of merges'),
458 'files and thousands of merges'),
459 upgrademessage=_('deltas within internal storage will choose an '
459 upgrademessage=_('deltas within internal storage will choose an '
460 'optimal delta by computing deltas against multiple '
460 'optimal delta by computing deltas against multiple '
461 'parents; may slow down execution time '
461 'parents; may slow down execution time '
462 'significantly')))
462 'significantly')))
463
463
464 optimizations.append(improvement(
464 optimizations.append(improvement(
465 name='re-delta-all',
465 name='re-delta-all',
466 type=optimisation,
466 type=optimisation,
467 description=_('deltas within internal storage will always be '
467 description=_('deltas within internal storage will always be '
468 'recalculated without reusing prior deltas; this will '
468 'recalculated without reusing prior deltas; this will '
469 'likely make execution run several times slower; this '
469 'likely make execution run several times slower; this '
470 'optimization is typically not needed'),
470 'optimization is typically not needed'),
471 upgrademessage=_('deltas within internal storage will be fully '
471 upgrademessage=_('deltas within internal storage will be fully '
472 'recomputed; this will likely drastically slow down '
472 'recomputed; this will likely drastically slow down '
473 'execution time')))
473 'execution time')))
474
474
475 optimizations.append(improvement(
475 optimizations.append(improvement(
476 name='re-delta-fulladd',
476 name='re-delta-fulladd',
477 type=optimisation,
477 type=optimisation,
478 description=_('every revision will be re-added as if it was new '
478 description=_('every revision will be re-added as if it was new '
479 'content. It will go through the full storage '
479 'content. It will go through the full storage '
480 'mechanism giving extensions a chance to process it '
480 'mechanism giving extensions a chance to process it '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
482 'slower since more logic is involved.'),
482 'slower since more logic is involved.'),
483 upgrademessage=_('each revision will be added as new content to the '
483 upgrademessage=_('each revision will be added as new content to the '
484 'internal storage; this will likely drastically slow '
484 'internal storage; this will likely drastically slow '
485 'down execution time, but some extensions might need '
485 'down execution time, but some extensions might need '
486 'it')))
486 'it')))
487
487
488 return optimizations
488 return optimizations
489
489
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
491 """Determine upgrade actions that will be performed.
491 """Determine upgrade actions that will be performed.
492
492
493 Given a list of improvements as returned by ``finddeficiencies`` and
493 Given a list of improvements as returned by ``finddeficiencies`` and
494 ``findoptimizations``, determine the list of upgrade actions that
494 ``findoptimizations``, determine the list of upgrade actions that
495 will be performed.
495 will be performed.
496
496
497 The role of this function is to filter improvements if needed, apply
497 The role of this function is to filter improvements if needed, apply
498 recommended optimizations from the improvements list that make sense,
498 recommended optimizations from the improvements list that make sense,
499 etc.
499 etc.
500
500
501 Returns a list of action names.
501 Returns a list of action names.
502 """
502 """
503 newactions = []
503 newactions = []
504
504
505 knownreqs = supporteddestrequirements(repo)
505 knownreqs = supporteddestrequirements(repo)
506
506
507 for d in deficiencies:
507 for d in deficiencies:
508 name = d.name
508 name = d.name
509
509
510 # If the action is a requirement that doesn't show up in the
510 # If the action is a requirement that doesn't show up in the
511 # destination requirements, prune the action.
511 # destination requirements, prune the action.
512 if name in knownreqs and name not in destreqs:
512 if name in knownreqs and name not in destreqs:
513 continue
513 continue
514
514
515 newactions.append(d)
515 newactions.append(d)
516
516
517 # FUTURE consider adding some optimizations here for certain transitions.
517 # FUTURE consider adding some optimizations here for certain transitions.
518 # e.g. adding generaldelta could schedule parent redeltas.
518 # e.g. adding generaldelta could schedule parent redeltas.
519
519
520 return newactions
520 return newactions
521
521
522 def _revlogfrompath(repo, path):
522 def _revlogfrompath(repo, path):
523 """Obtain a revlog from a repo path.
523 """Obtain a revlog from a repo path.
524
524
525 An instance of the appropriate class is returned.
525 An instance of the appropriate class is returned.
526 """
526 """
527 if path == '00changelog.i':
527 if path == '00changelog.i':
528 return changelog.changelog(repo.svfs)
528 return changelog.changelog(repo.svfs)
529 elif path.endswith('00manifest.i'):
529 elif path.endswith('00manifest.i'):
530 mandir = path[:-len('00manifest.i')]
530 mandir = path[:-len('00manifest.i')]
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
532 else:
532 else:
533 #reverse of "/".join(("data", path + ".i"))
533 #reverse of "/".join(("data", path + ".i"))
534 return filelog.filelog(repo.svfs, path[5:-2])
534 return filelog.filelog(repo.svfs, path[5:-2])
535
535
536 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
537 """copy all relevant files for `oldrl` into `destrepo` store
538
539 Files are copied "as is" without any transformation. The copy is performed
540 without extra checks. Callers are responsible for making sure the copied
541 content is compatible with format of the destination repository.
542 """
543 oldrl = getattr(oldrl, '_revlog', oldrl)
544 newrl = _revlogfrompath(destrepo, unencodedname)
545 newrl = getattr(newrl, '_revlog', newrl)
546
547 oldvfs = oldrl.opener
548 newvfs = newrl.opener
549 oldindex = oldvfs.join(oldrl.indexfile)
550 newindex = newvfs.join(newrl.indexfile)
551 olddata = oldvfs.join(oldrl.datafile)
552 newdata = newvfs.join(newrl.datafile)
553
554 newdir = newvfs.dirname(newrl.indexfile)
555 newvfs.makedirs(newdir)
556
557 util.copyfile(oldindex, newindex)
558 if oldrl.opener.exists(olddata):
559 util.copyfile(olddata, newdata)
560
561 if not (unencodedname.endswith('00changelog.i')
562 or unencodedname.endswith('00manifest.i')):
563 destrepo.svfs.fncache.add(unencodedname)
564
536 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
565 def _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
537 """Copy revlogs between 2 repos."""
566 """Copy revlogs between 2 repos."""
538 revcount = 0
567 revcount = 0
539 srcsize = 0
568 srcsize = 0
540 srcrawsize = 0
569 srcrawsize = 0
541 dstsize = 0
570 dstsize = 0
542 fcount = 0
571 fcount = 0
543 frevcount = 0
572 frevcount = 0
544 fsrcsize = 0
573 fsrcsize = 0
545 frawsize = 0
574 frawsize = 0
546 fdstsize = 0
575 fdstsize = 0
547 mcount = 0
576 mcount = 0
548 mrevcount = 0
577 mrevcount = 0
549 msrcsize = 0
578 msrcsize = 0
550 mrawsize = 0
579 mrawsize = 0
551 mdstsize = 0
580 mdstsize = 0
552 crevcount = 0
581 crevcount = 0
553 csrcsize = 0
582 csrcsize = 0
554 crawsize = 0
583 crawsize = 0
555 cdstsize = 0
584 cdstsize = 0
556
585
557 alldatafiles = list(srcrepo.store.walk())
586 alldatafiles = list(srcrepo.store.walk())
558
587
559 # Perform a pass to collect metadata. This validates we can open all
588 # Perform a pass to collect metadata. This validates we can open all
560 # source files and allows a unified progress bar to be displayed.
589 # source files and allows a unified progress bar to be displayed.
561 for unencoded, encoded, size in alldatafiles:
590 for unencoded, encoded, size in alldatafiles:
562 if unencoded.endswith('.d'):
591 if unencoded.endswith('.d'):
563 continue
592 continue
564
593
565 rl = _revlogfrompath(srcrepo, unencoded)
594 rl = _revlogfrompath(srcrepo, unencoded)
566
595
567 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
596 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
568 trackedsize=True, storedsize=True)
597 trackedsize=True, storedsize=True)
569
598
570 revcount += info['revisionscount'] or 0
599 revcount += info['revisionscount'] or 0
571 datasize = info['storedsize'] or 0
600 datasize = info['storedsize'] or 0
572 rawsize = info['trackedsize'] or 0
601 rawsize = info['trackedsize'] or 0
573
602
574 srcsize += datasize
603 srcsize += datasize
575 srcrawsize += rawsize
604 srcrawsize += rawsize
576
605
577 # This is for the separate progress bars.
606 # This is for the separate progress bars.
578 if isinstance(rl, changelog.changelog):
607 if isinstance(rl, changelog.changelog):
579 crevcount += len(rl)
608 crevcount += len(rl)
580 csrcsize += datasize
609 csrcsize += datasize
581 crawsize += rawsize
610 crawsize += rawsize
582 elif isinstance(rl, manifest.manifestrevlog):
611 elif isinstance(rl, manifest.manifestrevlog):
583 mcount += 1
612 mcount += 1
584 mrevcount += len(rl)
613 mrevcount += len(rl)
585 msrcsize += datasize
614 msrcsize += datasize
586 mrawsize += rawsize
615 mrawsize += rawsize
587 elif isinstance(rl, filelog.filelog):
616 elif isinstance(rl, filelog.filelog):
588 fcount += 1
617 fcount += 1
589 frevcount += len(rl)
618 frevcount += len(rl)
590 fsrcsize += datasize
619 fsrcsize += datasize
591 frawsize += rawsize
620 frawsize += rawsize
592 else:
621 else:
593 error.ProgrammingError('unknown revlog type')
622 error.ProgrammingError('unknown revlog type')
594
623
595 if not revcount:
624 if not revcount:
596 return
625 return
597
626
598 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
627 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
599 '%d in changelog)\n') %
628 '%d in changelog)\n') %
600 (revcount, frevcount, mrevcount, crevcount))
629 (revcount, frevcount, mrevcount, crevcount))
601 ui.write(_('migrating %s in store; %s tracked data\n') % (
630 ui.write(_('migrating %s in store; %s tracked data\n') % (
602 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
631 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
603
632
604 # Used to keep track of progress.
633 # Used to keep track of progress.
605 progress = None
634 progress = None
606 def oncopiedrevision(rl, rev, node):
635 def oncopiedrevision(rl, rev, node):
607 progress.increment()
636 progress.increment()
608
637
609 # Do the actual copying.
638 # Do the actual copying.
610 # FUTURE this operation can be farmed off to worker processes.
639 # FUTURE this operation can be farmed off to worker processes.
611 seen = set()
640 seen = set()
612 for unencoded, encoded, size in alldatafiles:
641 for unencoded, encoded, size in alldatafiles:
613 if unencoded.endswith('.d'):
642 if unencoded.endswith('.d'):
614 continue
643 continue
615
644
616 oldrl = _revlogfrompath(srcrepo, unencoded)
645 oldrl = _revlogfrompath(srcrepo, unencoded)
617 newrl = _revlogfrompath(dstrepo, unencoded)
646 newrl = _revlogfrompath(dstrepo, unencoded)
618
647
619 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
648 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
620 ui.write(_('finished migrating %d manifest revisions across %d '
649 ui.write(_('finished migrating %d manifest revisions across %d '
621 'manifests; change in size: %s\n') %
650 'manifests; change in size: %s\n') %
622 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
651 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
623
652
624 ui.write(_('migrating changelog containing %d revisions '
653 ui.write(_('migrating changelog containing %d revisions '
625 '(%s in store; %s tracked data)\n') %
654 '(%s in store; %s tracked data)\n') %
626 (crevcount, util.bytecount(csrcsize),
655 (crevcount, util.bytecount(csrcsize),
627 util.bytecount(crawsize)))
656 util.bytecount(crawsize)))
628 seen.add('c')
657 seen.add('c')
629 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
658 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
630 total=crevcount)
659 total=crevcount)
631 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
660 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
632 ui.write(_('finished migrating %d filelog revisions across %d '
661 ui.write(_('finished migrating %d filelog revisions across %d '
633 'filelogs; change in size: %s\n') %
662 'filelogs; change in size: %s\n') %
634 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
663 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
635
664
636 ui.write(_('migrating %d manifests containing %d revisions '
665 ui.write(_('migrating %d manifests containing %d revisions '
637 '(%s in store; %s tracked data)\n') %
666 '(%s in store; %s tracked data)\n') %
638 (mcount, mrevcount, util.bytecount(msrcsize),
667 (mcount, mrevcount, util.bytecount(msrcsize),
639 util.bytecount(mrawsize)))
668 util.bytecount(mrawsize)))
640 seen.add('m')
669 seen.add('m')
641 if progress:
670 if progress:
642 progress.complete()
671 progress.complete()
643 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
672 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
644 total=mrevcount)
673 total=mrevcount)
645 elif 'f' not in seen:
674 elif 'f' not in seen:
646 ui.write(_('migrating %d filelogs containing %d revisions '
675 ui.write(_('migrating %d filelogs containing %d revisions '
647 '(%s in store; %s tracked data)\n') %
676 '(%s in store; %s tracked data)\n') %
648 (fcount, frevcount, util.bytecount(fsrcsize),
677 (fcount, frevcount, util.bytecount(fsrcsize),
649 util.bytecount(frawsize)))
678 util.bytecount(frawsize)))
650 seen.add('f')
679 seen.add('f')
651 if progress:
680 if progress:
652 progress.complete()
681 progress.complete()
653 progress = srcrepo.ui.makeprogress(_('file revisions'),
682 progress = srcrepo.ui.makeprogress(_('file revisions'),
654 total=frevcount)
683 total=frevcount)
655
684
656
685
657 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
686 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
658 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
687 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
659 deltareuse=deltareuse,
688 deltareuse=deltareuse,
660 forcedeltabothparents=forcedeltabothparents)
689 forcedeltabothparents=forcedeltabothparents)
661
690
662 info = newrl.storageinfo(storedsize=True)
691 info = newrl.storageinfo(storedsize=True)
663 datasize = info['storedsize'] or 0
692 datasize = info['storedsize'] or 0
664
693
665 dstsize += datasize
694 dstsize += datasize
666
695
667 if isinstance(newrl, changelog.changelog):
696 if isinstance(newrl, changelog.changelog):
668 cdstsize += datasize
697 cdstsize += datasize
669 elif isinstance(newrl, manifest.manifestrevlog):
698 elif isinstance(newrl, manifest.manifestrevlog):
670 mdstsize += datasize
699 mdstsize += datasize
671 else:
700 else:
672 fdstsize += datasize
701 fdstsize += datasize
673
702
674 progress.complete()
703 progress.complete()
675
704
676 ui.write(_('finished migrating %d changelog revisions; change in size: '
705 ui.write(_('finished migrating %d changelog revisions; change in size: '
677 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
706 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
678
707
679 ui.write(_('finished migrating %d total revisions; total change in store '
708 ui.write(_('finished migrating %d total revisions; total change in store '
680 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
709 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
681
710
682 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
711 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
683 """Determine whether to copy a store file during upgrade.
712 """Determine whether to copy a store file during upgrade.
684
713
685 This function is called when migrating store files from ``srcrepo`` to
714 This function is called when migrating store files from ``srcrepo`` to
686 ``dstrepo`` as part of upgrading a repository.
715 ``dstrepo`` as part of upgrading a repository.
687
716
688 Args:
717 Args:
689 srcrepo: repo we are copying from
718 srcrepo: repo we are copying from
690 dstrepo: repo we are copying to
719 dstrepo: repo we are copying to
691 requirements: set of requirements for ``dstrepo``
720 requirements: set of requirements for ``dstrepo``
692 path: store file being examined
721 path: store file being examined
693 mode: the ``ST_MODE`` file type of ``path``
722 mode: the ``ST_MODE`` file type of ``path``
694 st: ``stat`` data structure for ``path``
723 st: ``stat`` data structure for ``path``
695
724
696 Function should return ``True`` if the file is to be copied.
725 Function should return ``True`` if the file is to be copied.
697 """
726 """
698 # Skip revlogs.
727 # Skip revlogs.
699 if path.endswith(('.i', '.d')):
728 if path.endswith(('.i', '.d')):
700 return False
729 return False
701 # Skip transaction related files.
730 # Skip transaction related files.
702 if path.startswith('undo'):
731 if path.startswith('undo'):
703 return False
732 return False
704 # Only copy regular files.
733 # Only copy regular files.
705 if mode != stat.S_IFREG:
734 if mode != stat.S_IFREG:
706 return False
735 return False
707 # Skip other skipped files.
736 # Skip other skipped files.
708 if path in ('lock', 'fncache'):
737 if path in ('lock', 'fncache'):
709 return False
738 return False
710
739
711 return True
740 return True
712
741
713 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
742 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
714 """Hook point for extensions to perform additional actions during upgrade.
743 """Hook point for extensions to perform additional actions during upgrade.
715
744
716 This function is called after revlogs and store files have been copied but
745 This function is called after revlogs and store files have been copied but
717 before the new store is swapped into the original location.
746 before the new store is swapped into the original location.
718 """
747 """
719
748
720 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
749 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
721 """Do the low-level work of upgrading a repository.
750 """Do the low-level work of upgrading a repository.
722
751
723 The upgrade is effectively performed as a copy between a source
752 The upgrade is effectively performed as a copy between a source
724 repository and a temporary destination repository.
753 repository and a temporary destination repository.
725
754
726 The source repository is unmodified for as long as possible so the
755 The source repository is unmodified for as long as possible so the
727 upgrade can abort at any time without causing loss of service for
756 upgrade can abort at any time without causing loss of service for
728 readers and without corrupting the source repository.
757 readers and without corrupting the source repository.
729 """
758 """
730 assert srcrepo.currentwlock()
759 assert srcrepo.currentwlock()
731 assert dstrepo.currentwlock()
760 assert dstrepo.currentwlock()
732
761
733 ui.write(_('(it is safe to interrupt this process any time before '
762 ui.write(_('(it is safe to interrupt this process any time before '
734 'data migration completes)\n'))
763 'data migration completes)\n'))
735
764
736 if 're-delta-all' in actions:
765 if 're-delta-all' in actions:
737 deltareuse = revlog.revlog.DELTAREUSENEVER
766 deltareuse = revlog.revlog.DELTAREUSENEVER
738 elif 're-delta-parent' in actions:
767 elif 're-delta-parent' in actions:
739 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
768 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
740 elif 're-delta-multibase' in actions:
769 elif 're-delta-multibase' in actions:
741 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
770 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
742 elif 're-delta-fulladd' in actions:
771 elif 're-delta-fulladd' in actions:
743 deltareuse = revlog.revlog.DELTAREUSEFULLADD
772 deltareuse = revlog.revlog.DELTAREUSEFULLADD
744 else:
773 else:
745 deltareuse = revlog.revlog.DELTAREUSEALWAYS
774 deltareuse = revlog.revlog.DELTAREUSEALWAYS
746
775
747 with dstrepo.transaction('upgrade') as tr:
776 with dstrepo.transaction('upgrade') as tr:
748 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
777 _clonerevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
749 're-delta-multibase' in actions)
778 're-delta-multibase' in actions)
750
779
751 # Now copy other files in the store directory.
780 # Now copy other files in the store directory.
752 # The sorted() makes execution deterministic.
781 # The sorted() makes execution deterministic.
753 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
782 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
754 if not _filterstorefile(srcrepo, dstrepo, requirements,
783 if not _filterstorefile(srcrepo, dstrepo, requirements,
755 p, kind, st):
784 p, kind, st):
756 continue
785 continue
757
786
758 srcrepo.ui.write(_('copying %s\n') % p)
787 srcrepo.ui.write(_('copying %s\n') % p)
759 src = srcrepo.store.rawvfs.join(p)
788 src = srcrepo.store.rawvfs.join(p)
760 dst = dstrepo.store.rawvfs.join(p)
789 dst = dstrepo.store.rawvfs.join(p)
761 util.copyfile(src, dst, copystat=True)
790 util.copyfile(src, dst, copystat=True)
762
791
763 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
792 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
764
793
765 ui.write(_('data fully migrated to temporary repository\n'))
794 ui.write(_('data fully migrated to temporary repository\n'))
766
795
767 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
796 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
768 backupvfs = vfsmod.vfs(backuppath)
797 backupvfs = vfsmod.vfs(backuppath)
769
798
770 # Make a backup of requires file first, as it is the first to be modified.
799 # Make a backup of requires file first, as it is the first to be modified.
771 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
800 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
772
801
773 # We install an arbitrary requirement that clients must not support
802 # We install an arbitrary requirement that clients must not support
774 # as a mechanism to lock out new clients during the data swap. This is
803 # as a mechanism to lock out new clients during the data swap. This is
775 # better than allowing a client to continue while the repository is in
804 # better than allowing a client to continue while the repository is in
776 # an inconsistent state.
805 # an inconsistent state.
777 ui.write(_('marking source repository as being upgraded; clients will be '
806 ui.write(_('marking source repository as being upgraded; clients will be '
778 'unable to read from repository\n'))
807 'unable to read from repository\n'))
779 scmutil.writerequires(srcrepo.vfs,
808 scmutil.writerequires(srcrepo.vfs,
780 srcrepo.requirements | {'upgradeinprogress'})
809 srcrepo.requirements | {'upgradeinprogress'})
781
810
782 ui.write(_('starting in-place swap of repository data\n'))
811 ui.write(_('starting in-place swap of repository data\n'))
783 ui.write(_('replaced files will be backed up at %s\n') %
812 ui.write(_('replaced files will be backed up at %s\n') %
784 backuppath)
813 backuppath)
785
814
786 # Now swap in the new store directory. Doing it as a rename should make
815 # Now swap in the new store directory. Doing it as a rename should make
787 # the operation nearly instantaneous and atomic (at least in well-behaved
816 # the operation nearly instantaneous and atomic (at least in well-behaved
788 # environments).
817 # environments).
789 ui.write(_('replacing store...\n'))
818 ui.write(_('replacing store...\n'))
790 tstart = util.timer()
819 tstart = util.timer()
791 util.rename(srcrepo.spath, backupvfs.join('store'))
820 util.rename(srcrepo.spath, backupvfs.join('store'))
792 util.rename(dstrepo.spath, srcrepo.spath)
821 util.rename(dstrepo.spath, srcrepo.spath)
793 elapsed = util.timer() - tstart
822 elapsed = util.timer() - tstart
794 ui.write(_('store replacement complete; repository was inconsistent for '
823 ui.write(_('store replacement complete; repository was inconsistent for '
795 '%0.1fs\n') % elapsed)
824 '%0.1fs\n') % elapsed)
796
825
797 # We first write the requirements file. Any new requirements will lock
826 # We first write the requirements file. Any new requirements will lock
798 # out legacy clients.
827 # out legacy clients.
799 ui.write(_('finalizing requirements file and making repository readable '
828 ui.write(_('finalizing requirements file and making repository readable '
800 'again\n'))
829 'again\n'))
801 scmutil.writerequires(srcrepo.vfs, requirements)
830 scmutil.writerequires(srcrepo.vfs, requirements)
802
831
803 # The lock file from the old store won't be removed because nothing has a
832 # The lock file from the old store won't be removed because nothing has a
804 # reference to its new location. So clean it up manually. Alternatively, we
833 # reference to its new location. So clean it up manually. Alternatively, we
805 # could update srcrepo.svfs and other variables to point to the new
834 # could update srcrepo.svfs and other variables to point to the new
806 # location. This is simpler.
835 # location. This is simpler.
807 backupvfs.unlink('store/lock')
836 backupvfs.unlink('store/lock')
808
837
809 return backuppath
838 return backuppath
810
839
811 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
840 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
812 """Upgrade a repository in place."""
841 """Upgrade a repository in place."""
813 if optimize is None:
842 if optimize is None:
814 optimize = []
843 optimize = []
815 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
844 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
816 repo = repo.unfiltered()
845 repo = repo.unfiltered()
817
846
818 # Ensure the repository can be upgraded.
847 # Ensure the repository can be upgraded.
819 missingreqs = requiredsourcerequirements(repo) - repo.requirements
848 missingreqs = requiredsourcerequirements(repo) - repo.requirements
820 if missingreqs:
849 if missingreqs:
821 raise error.Abort(_('cannot upgrade repository; requirement '
850 raise error.Abort(_('cannot upgrade repository; requirement '
822 'missing: %s') % _(', ').join(sorted(missingreqs)))
851 'missing: %s') % _(', ').join(sorted(missingreqs)))
823
852
824 blockedreqs = blocksourcerequirements(repo) & repo.requirements
853 blockedreqs = blocksourcerequirements(repo) & repo.requirements
825 if blockedreqs:
854 if blockedreqs:
826 raise error.Abort(_('cannot upgrade repository; unsupported source '
855 raise error.Abort(_('cannot upgrade repository; unsupported source '
827 'requirement: %s') %
856 'requirement: %s') %
828 _(', ').join(sorted(blockedreqs)))
857 _(', ').join(sorted(blockedreqs)))
829
858
830 # FUTURE there is potentially a need to control the wanted requirements via
859 # FUTURE there is potentially a need to control the wanted requirements via
831 # command arguments or via an extension hook point.
860 # command arguments or via an extension hook point.
832 newreqs = localrepo.newreporequirements(
861 newreqs = localrepo.newreporequirements(
833 repo.ui, localrepo.defaultcreateopts(repo.ui))
862 repo.ui, localrepo.defaultcreateopts(repo.ui))
834 newreqs.update(preservedrequirements(repo))
863 newreqs.update(preservedrequirements(repo))
835
864
836 noremovereqs = (repo.requirements - newreqs -
865 noremovereqs = (repo.requirements - newreqs -
837 supportremovedrequirements(repo))
866 supportremovedrequirements(repo))
838 if noremovereqs:
867 if noremovereqs:
839 raise error.Abort(_('cannot upgrade repository; requirement would be '
868 raise error.Abort(_('cannot upgrade repository; requirement would be '
840 'removed: %s') % _(', ').join(sorted(noremovereqs)))
869 'removed: %s') % _(', ').join(sorted(noremovereqs)))
841
870
842 noaddreqs = (newreqs - repo.requirements -
871 noaddreqs = (newreqs - repo.requirements -
843 allowednewrequirements(repo))
872 allowednewrequirements(repo))
844 if noaddreqs:
873 if noaddreqs:
845 raise error.Abort(_('cannot upgrade repository; do not support adding '
874 raise error.Abort(_('cannot upgrade repository; do not support adding '
846 'requirement: %s') %
875 'requirement: %s') %
847 _(', ').join(sorted(noaddreqs)))
876 _(', ').join(sorted(noaddreqs)))
848
877
849 unsupportedreqs = newreqs - supporteddestrequirements(repo)
878 unsupportedreqs = newreqs - supporteddestrequirements(repo)
850 if unsupportedreqs:
879 if unsupportedreqs:
851 raise error.Abort(_('cannot upgrade repository; do not support '
880 raise error.Abort(_('cannot upgrade repository; do not support '
852 'destination requirement: %s') %
881 'destination requirement: %s') %
853 _(', ').join(sorted(unsupportedreqs)))
882 _(', ').join(sorted(unsupportedreqs)))
854
883
855 # Find and validate all improvements that can be made.
884 # Find and validate all improvements that can be made.
856 alloptimizations = findoptimizations(repo)
885 alloptimizations = findoptimizations(repo)
857
886
858 # Apply and Validate arguments.
887 # Apply and Validate arguments.
859 optimizations = []
888 optimizations = []
860 for o in alloptimizations:
889 for o in alloptimizations:
861 if o.name in optimize:
890 if o.name in optimize:
862 optimizations.append(o)
891 optimizations.append(o)
863 optimize.discard(o.name)
892 optimize.discard(o.name)
864
893
865 if optimize: # anything left is unknown
894 if optimize: # anything left is unknown
866 raise error.Abort(_('unknown optimization action requested: %s') %
895 raise error.Abort(_('unknown optimization action requested: %s') %
867 ', '.join(sorted(optimize)),
896 ', '.join(sorted(optimize)),
868 hint=_('run without arguments to see valid '
897 hint=_('run without arguments to see valid '
869 'optimizations'))
898 'optimizations'))
870
899
871 deficiencies = finddeficiencies(repo)
900 deficiencies = finddeficiencies(repo)
872 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
901 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
873 actions.extend(o for o in sorted(optimizations)
902 actions.extend(o for o in sorted(optimizations)
874 # determineactions could have added optimisation
903 # determineactions could have added optimisation
875 if o not in actions)
904 if o not in actions)
876
905
877 def printrequirements():
906 def printrequirements():
878 ui.write(_('requirements\n'))
907 ui.write(_('requirements\n'))
879 ui.write(_(' preserved: %s\n') %
908 ui.write(_(' preserved: %s\n') %
880 _(', ').join(sorted(newreqs & repo.requirements)))
909 _(', ').join(sorted(newreqs & repo.requirements)))
881
910
882 if repo.requirements - newreqs:
911 if repo.requirements - newreqs:
883 ui.write(_(' removed: %s\n') %
912 ui.write(_(' removed: %s\n') %
884 _(', ').join(sorted(repo.requirements - newreqs)))
913 _(', ').join(sorted(repo.requirements - newreqs)))
885
914
886 if newreqs - repo.requirements:
915 if newreqs - repo.requirements:
887 ui.write(_(' added: %s\n') %
916 ui.write(_(' added: %s\n') %
888 _(', ').join(sorted(newreqs - repo.requirements)))
917 _(', ').join(sorted(newreqs - repo.requirements)))
889
918
890 ui.write('\n')
919 ui.write('\n')
891
920
892 def printupgradeactions():
921 def printupgradeactions():
893 for a in actions:
922 for a in actions:
894 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
923 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
895
924
896 if not run:
925 if not run:
897 fromconfig = []
926 fromconfig = []
898 onlydefault = []
927 onlydefault = []
899
928
900 for d in deficiencies:
929 for d in deficiencies:
901 if d.fromconfig(repo):
930 if d.fromconfig(repo):
902 fromconfig.append(d)
931 fromconfig.append(d)
903 elif d.default:
932 elif d.default:
904 onlydefault.append(d)
933 onlydefault.append(d)
905
934
906 if fromconfig or onlydefault:
935 if fromconfig or onlydefault:
907
936
908 if fromconfig:
937 if fromconfig:
909 ui.write(_('repository lacks features recommended by '
938 ui.write(_('repository lacks features recommended by '
910 'current config options:\n\n'))
939 'current config options:\n\n'))
911 for i in fromconfig:
940 for i in fromconfig:
912 ui.write('%s\n %s\n\n' % (i.name, i.description))
941 ui.write('%s\n %s\n\n' % (i.name, i.description))
913
942
914 if onlydefault:
943 if onlydefault:
915 ui.write(_('repository lacks features used by the default '
944 ui.write(_('repository lacks features used by the default '
916 'config options:\n\n'))
945 'config options:\n\n'))
917 for i in onlydefault:
946 for i in onlydefault:
918 ui.write('%s\n %s\n\n' % (i.name, i.description))
947 ui.write('%s\n %s\n\n' % (i.name, i.description))
919
948
920 ui.write('\n')
949 ui.write('\n')
921 else:
950 else:
922 ui.write(_('(no feature deficiencies found in existing '
951 ui.write(_('(no feature deficiencies found in existing '
923 'repository)\n'))
952 'repository)\n'))
924
953
925 ui.write(_('performing an upgrade with "--run" will make the following '
954 ui.write(_('performing an upgrade with "--run" will make the following '
926 'changes:\n\n'))
955 'changes:\n\n'))
927
956
928 printrequirements()
957 printrequirements()
929 printupgradeactions()
958 printupgradeactions()
930
959
931 unusedoptimize = [i for i in alloptimizations if i not in actions]
960 unusedoptimize = [i for i in alloptimizations if i not in actions]
932
961
933 if unusedoptimize:
962 if unusedoptimize:
934 ui.write(_('additional optimizations are available by specifying '
963 ui.write(_('additional optimizations are available by specifying '
935 '"--optimize <name>":\n\n'))
964 '"--optimize <name>":\n\n'))
936 for i in unusedoptimize:
965 for i in unusedoptimize:
937 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
966 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
938 return
967 return
939
968
940 # Else we're in the run=true case.
969 # Else we're in the run=true case.
941 ui.write(_('upgrade will perform the following actions:\n\n'))
970 ui.write(_('upgrade will perform the following actions:\n\n'))
942 printrequirements()
971 printrequirements()
943 printupgradeactions()
972 printupgradeactions()
944
973
945 upgradeactions = [a.name for a in actions]
974 upgradeactions = [a.name for a in actions]
946
975
947 ui.write(_('beginning upgrade...\n'))
976 ui.write(_('beginning upgrade...\n'))
948 with repo.wlock(), repo.lock():
977 with repo.wlock(), repo.lock():
949 ui.write(_('repository locked and read-only\n'))
978 ui.write(_('repository locked and read-only\n'))
950 # Our strategy for upgrading the repository is to create a new,
979 # Our strategy for upgrading the repository is to create a new,
951 # temporary repository, write data to it, then do a swap of the
980 # temporary repository, write data to it, then do a swap of the
952 # data. There are less heavyweight ways to do this, but it is easier
981 # data. There are less heavyweight ways to do this, but it is easier
953 # to create a new repo object than to instantiate all the components
982 # to create a new repo object than to instantiate all the components
954 # (like the store) separately.
983 # (like the store) separately.
955 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
984 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
956 backuppath = None
985 backuppath = None
957 try:
986 try:
958 ui.write(_('creating temporary repository to stage migrated '
987 ui.write(_('creating temporary repository to stage migrated '
959 'data: %s\n') % tmppath)
988 'data: %s\n') % tmppath)
960
989
961 # clone ui without using ui.copy because repo.ui is protected
990 # clone ui without using ui.copy because repo.ui is protected
962 repoui = repo.ui.__class__(repo.ui)
991 repoui = repo.ui.__class__(repo.ui)
963 dstrepo = hg.repository(repoui, path=tmppath, create=True)
992 dstrepo = hg.repository(repoui, path=tmppath, create=True)
964
993
965 with dstrepo.wlock(), dstrepo.lock():
994 with dstrepo.wlock(), dstrepo.lock():
966 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
995 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
967 upgradeactions)
996 upgradeactions)
968 if not (backup or backuppath is None):
997 if not (backup or backuppath is None):
969 ui.write(_('removing old repository content%s\n') % backuppath)
998 ui.write(_('removing old repository content%s\n') % backuppath)
970 repo.vfs.rmtree(backuppath, forcibly=True)
999 repo.vfs.rmtree(backuppath, forcibly=True)
971 backuppath = None
1000 backuppath = None
972
1001
973 finally:
1002 finally:
974 ui.write(_('removing temporary repository %s\n') % tmppath)
1003 ui.write(_('removing temporary repository %s\n') % tmppath)
975 repo.vfs.rmtree(tmppath, forcibly=True)
1004 repo.vfs.rmtree(tmppath, forcibly=True)
976
1005
977 if backuppath:
1006 if backuppath:
978 ui.warn(_('copy of old repository backed up at %s\n') %
1007 ui.warn(_('copy of old repository backed up at %s\n') %
979 backuppath)
1008 backuppath)
980 ui.warn(_('the old repository will not be deleted; remove '
1009 ui.warn(_('the old repository will not be deleted; remove '
981 'it to free up disk space once the upgraded '
1010 'it to free up disk space once the upgraded '
982 'repository is verified\n'))
1011 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now