##// END OF EJS Templates
upgrade: walk the source store file only once...
marmoute -
r42916:896fb9de default
parent child Browse files
Show More
@@ -1,980 +1,982 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 from .utils import (
27 from .utils import (
28 compression,
28 compression,
29 )
29 )
30
30
31 def requiredsourcerequirements(repo):
31 def requiredsourcerequirements(repo):
32 """Obtain requirements required to be present to upgrade a repo.
32 """Obtain requirements required to be present to upgrade a repo.
33
33
34 An upgrade will not be allowed if the repository doesn't have the
34 An upgrade will not be allowed if the repository doesn't have the
35 requirements returned by this function.
35 requirements returned by this function.
36 """
36 """
37 return {
37 return {
38 # Introduced in Mercurial 0.9.2.
38 # Introduced in Mercurial 0.9.2.
39 'revlogv1',
39 'revlogv1',
40 # Introduced in Mercurial 0.9.2.
40 # Introduced in Mercurial 0.9.2.
41 'store',
41 'store',
42 }
42 }
43
43
44 def blocksourcerequirements(repo):
44 def blocksourcerequirements(repo):
45 """Obtain requirements that will prevent an upgrade from occurring.
45 """Obtain requirements that will prevent an upgrade from occurring.
46
46
47 An upgrade cannot be performed if the source repository contains a
47 An upgrade cannot be performed if the source repository contains a
48 requirements in the returned set.
48 requirements in the returned set.
49 """
49 """
50 return {
50 return {
51 # The upgrade code does not yet support these experimental features.
51 # The upgrade code does not yet support these experimental features.
52 # This is an artificial limitation.
52 # This is an artificial limitation.
53 'treemanifest',
53 'treemanifest',
54 # This was a precursor to generaldelta and was never enabled by default.
54 # This was a precursor to generaldelta and was never enabled by default.
55 # It should (hopefully) not exist in the wild.
55 # It should (hopefully) not exist in the wild.
56 'parentdelta',
56 'parentdelta',
57 # Upgrade should operate on the actual store, not the shared link.
57 # Upgrade should operate on the actual store, not the shared link.
58 'shared',
58 'shared',
59 }
59 }
60
60
61 def supportremovedrequirements(repo):
61 def supportremovedrequirements(repo):
62 """Obtain requirements that can be removed during an upgrade.
62 """Obtain requirements that can be removed during an upgrade.
63
63
64 If an upgrade were to create a repository that dropped a requirement,
64 If an upgrade were to create a repository that dropped a requirement,
65 the dropped requirement must appear in the returned set for the upgrade
65 the dropped requirement must appear in the returned set for the upgrade
66 to be allowed.
66 to be allowed.
67 """
67 """
68 supported = {
68 supported = {
69 localrepo.SPARSEREVLOG_REQUIREMENT,
69 localrepo.SPARSEREVLOG_REQUIREMENT,
70 }
70 }
71 for name in compression.compengines:
71 for name in compression.compengines:
72 engine = compression.compengines[name]
72 engine = compression.compengines[name]
73 if engine.available() and engine.revlogheader():
73 if engine.available() and engine.revlogheader():
74 supported.add(b'exp-compression-%s' % name)
74 supported.add(b'exp-compression-%s' % name)
75 if engine.name() == 'zstd':
75 if engine.name() == 'zstd':
76 supported.add(b'revlog-compression-zstd')
76 supported.add(b'revlog-compression-zstd')
77 return supported
77 return supported
78
78
79 def supporteddestrequirements(repo):
79 def supporteddestrequirements(repo):
80 """Obtain requirements that upgrade supports in the destination.
80 """Obtain requirements that upgrade supports in the destination.
81
81
82 If the result of the upgrade would create requirements not in this set,
82 If the result of the upgrade would create requirements not in this set,
83 the upgrade is disallowed.
83 the upgrade is disallowed.
84
84
85 Extensions should monkeypatch this to add their custom requirements.
85 Extensions should monkeypatch this to add their custom requirements.
86 """
86 """
87 supported = {
87 supported = {
88 'dotencode',
88 'dotencode',
89 'fncache',
89 'fncache',
90 'generaldelta',
90 'generaldelta',
91 'revlogv1',
91 'revlogv1',
92 'store',
92 'store',
93 localrepo.SPARSEREVLOG_REQUIREMENT,
93 localrepo.SPARSEREVLOG_REQUIREMENT,
94 }
94 }
95 for name in compression.compengines:
95 for name in compression.compengines:
96 engine = compression.compengines[name]
96 engine = compression.compengines[name]
97 if engine.available() and engine.revlogheader():
97 if engine.available() and engine.revlogheader():
98 supported.add(b'exp-compression-%s' % name)
98 supported.add(b'exp-compression-%s' % name)
99 if engine.name() == 'zstd':
99 if engine.name() == 'zstd':
100 supported.add(b'revlog-compression-zstd')
100 supported.add(b'revlog-compression-zstd')
101 return supported
101 return supported
102
102
103 def allowednewrequirements(repo):
103 def allowednewrequirements(repo):
104 """Obtain requirements that can be added to a repository during upgrade.
104 """Obtain requirements that can be added to a repository during upgrade.
105
105
106 This is used to disallow proposed requirements from being added when
106 This is used to disallow proposed requirements from being added when
107 they weren't present before.
107 they weren't present before.
108
108
109 We use a list of allowed requirement additions instead of a list of known
109 We use a list of allowed requirement additions instead of a list of known
110 bad additions because the whitelist approach is safer and will prevent
110 bad additions because the whitelist approach is safer and will prevent
111 future, unknown requirements from accidentally being added.
111 future, unknown requirements from accidentally being added.
112 """
112 """
113 supported = {
113 supported = {
114 'dotencode',
114 'dotencode',
115 'fncache',
115 'fncache',
116 'generaldelta',
116 'generaldelta',
117 localrepo.SPARSEREVLOG_REQUIREMENT,
117 localrepo.SPARSEREVLOG_REQUIREMENT,
118 }
118 }
119 for name in compression.compengines:
119 for name in compression.compengines:
120 engine = compression.compengines[name]
120 engine = compression.compengines[name]
121 if engine.available() and engine.revlogheader():
121 if engine.available() and engine.revlogheader():
122 supported.add(b'exp-compression-%s' % name)
122 supported.add(b'exp-compression-%s' % name)
123 if engine.name() == 'zstd':
123 if engine.name() == 'zstd':
124 supported.add(b'revlog-compression-zstd')
124 supported.add(b'revlog-compression-zstd')
125 return supported
125 return supported
126
126
127 def preservedrequirements(repo):
127 def preservedrequirements(repo):
128 return set()
128 return set()
129
129
130 deficiency = 'deficiency'
130 deficiency = 'deficiency'
131 optimisation = 'optimization'
131 optimisation = 'optimization'
132
132
133 class improvement(object):
133 class improvement(object):
134 """Represents an improvement that can be made as part of an upgrade.
134 """Represents an improvement that can be made as part of an upgrade.
135
135
136 The following attributes are defined on each instance:
136 The following attributes are defined on each instance:
137
137
138 name
138 name
139 Machine-readable string uniquely identifying this improvement. It
139 Machine-readable string uniquely identifying this improvement. It
140 will be mapped to an action later in the upgrade process.
140 will be mapped to an action later in the upgrade process.
141
141
142 type
142 type
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
143 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
144 problem. An optimization is an action (sometimes optional) that
144 problem. An optimization is an action (sometimes optional) that
145 can be taken to further improve the state of the repository.
145 can be taken to further improve the state of the repository.
146
146
147 description
147 description
148 Message intended for humans explaining the improvement in more detail,
148 Message intended for humans explaining the improvement in more detail,
149 including the implications of it. For ``deficiency`` types, should be
149 including the implications of it. For ``deficiency`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
150 worded in the present tense. For ``optimisation`` types, should be
151 worded in the future tense.
151 worded in the future tense.
152
152
153 upgrademessage
153 upgrademessage
154 Message intended for humans explaining what an upgrade addressing this
154 Message intended for humans explaining what an upgrade addressing this
155 issue will do. Should be worded in the future tense.
155 issue will do. Should be worded in the future tense.
156 """
156 """
157 def __init__(self, name, type, description, upgrademessage):
157 def __init__(self, name, type, description, upgrademessage):
158 self.name = name
158 self.name = name
159 self.type = type
159 self.type = type
160 self.description = description
160 self.description = description
161 self.upgrademessage = upgrademessage
161 self.upgrademessage = upgrademessage
162
162
163 def __eq__(self, other):
163 def __eq__(self, other):
164 if not isinstance(other, improvement):
164 if not isinstance(other, improvement):
165 # This is what python tell use to do
165 # This is what python tell use to do
166 return NotImplemented
166 return NotImplemented
167 return self.name == other.name
167 return self.name == other.name
168
168
169 def __ne__(self, other):
169 def __ne__(self, other):
170 return not (self == other)
170 return not (self == other)
171
171
172 def __hash__(self):
172 def __hash__(self):
173 return hash(self.name)
173 return hash(self.name)
174
174
175 allformatvariant = []
175 allformatvariant = []
176
176
177 def registerformatvariant(cls):
177 def registerformatvariant(cls):
178 allformatvariant.append(cls)
178 allformatvariant.append(cls)
179 return cls
179 return cls
180
180
181 class formatvariant(improvement):
181 class formatvariant(improvement):
182 """an improvement subclass dedicated to repository format"""
182 """an improvement subclass dedicated to repository format"""
183 type = deficiency
183 type = deficiency
184 ### The following attributes should be defined for each class:
184 ### The following attributes should be defined for each class:
185
185
186 # machine-readable string uniquely identifying this improvement. it will be
186 # machine-readable string uniquely identifying this improvement. it will be
187 # mapped to an action later in the upgrade process.
187 # mapped to an action later in the upgrade process.
188 name = None
188 name = None
189
189
190 # message intended for humans explaining the improvement in more detail,
190 # message intended for humans explaining the improvement in more detail,
191 # including the implications of it ``deficiency`` types, should be worded
191 # including the implications of it ``deficiency`` types, should be worded
192 # in the present tense.
192 # in the present tense.
193 description = None
193 description = None
194
194
195 # message intended for humans explaining what an upgrade addressing this
195 # message intended for humans explaining what an upgrade addressing this
196 # issue will do. should be worded in the future tense.
196 # issue will do. should be worded in the future tense.
197 upgrademessage = None
197 upgrademessage = None
198
198
199 # value of current Mercurial default for new repository
199 # value of current Mercurial default for new repository
200 default = None
200 default = None
201
201
202 def __init__(self):
202 def __init__(self):
203 raise NotImplementedError()
203 raise NotImplementedError()
204
204
205 @staticmethod
205 @staticmethod
206 def fromrepo(repo):
206 def fromrepo(repo):
207 """current value of the variant in the repository"""
207 """current value of the variant in the repository"""
208 raise NotImplementedError()
208 raise NotImplementedError()
209
209
210 @staticmethod
210 @staticmethod
211 def fromconfig(repo):
211 def fromconfig(repo):
212 """current value of the variant in the configuration"""
212 """current value of the variant in the configuration"""
213 raise NotImplementedError()
213 raise NotImplementedError()
214
214
215 class requirementformatvariant(formatvariant):
215 class requirementformatvariant(formatvariant):
216 """formatvariant based on a 'requirement' name.
216 """formatvariant based on a 'requirement' name.
217
217
218 Many format variant are controlled by a 'requirement'. We define a small
218 Many format variant are controlled by a 'requirement'. We define a small
219 subclass to factor the code.
219 subclass to factor the code.
220 """
220 """
221
221
222 # the requirement that control this format variant
222 # the requirement that control this format variant
223 _requirement = None
223 _requirement = None
224
224
225 @staticmethod
225 @staticmethod
226 def _newreporequirements(ui):
226 def _newreporequirements(ui):
227 return localrepo.newreporequirements(
227 return localrepo.newreporequirements(
228 ui, localrepo.defaultcreateopts(ui))
228 ui, localrepo.defaultcreateopts(ui))
229
229
230 @classmethod
230 @classmethod
231 def fromrepo(cls, repo):
231 def fromrepo(cls, repo):
232 assert cls._requirement is not None
232 assert cls._requirement is not None
233 return cls._requirement in repo.requirements
233 return cls._requirement in repo.requirements
234
234
235 @classmethod
235 @classmethod
236 def fromconfig(cls, repo):
236 def fromconfig(cls, repo):
237 assert cls._requirement is not None
237 assert cls._requirement is not None
238 return cls._requirement in cls._newreporequirements(repo.ui)
238 return cls._requirement in cls._newreporequirements(repo.ui)
239
239
240 @registerformatvariant
240 @registerformatvariant
241 class fncache(requirementformatvariant):
241 class fncache(requirementformatvariant):
242 name = 'fncache'
242 name = 'fncache'
243
243
244 _requirement = 'fncache'
244 _requirement = 'fncache'
245
245
246 default = True
246 default = True
247
247
248 description = _('long and reserved filenames may not work correctly; '
248 description = _('long and reserved filenames may not work correctly; '
249 'repository performance is sub-optimal')
249 'repository performance is sub-optimal')
250
250
251 upgrademessage = _('repository will be more resilient to storing '
251 upgrademessage = _('repository will be more resilient to storing '
252 'certain paths and performance of certain '
252 'certain paths and performance of certain '
253 'operations should be improved')
253 'operations should be improved')
254
254
255 @registerformatvariant
255 @registerformatvariant
256 class dotencode(requirementformatvariant):
256 class dotencode(requirementformatvariant):
257 name = 'dotencode'
257 name = 'dotencode'
258
258
259 _requirement = 'dotencode'
259 _requirement = 'dotencode'
260
260
261 default = True
261 default = True
262
262
263 description = _('storage of filenames beginning with a period or '
263 description = _('storage of filenames beginning with a period or '
264 'space may not work correctly')
264 'space may not work correctly')
265
265
266 upgrademessage = _('repository will be better able to store files '
266 upgrademessage = _('repository will be better able to store files '
267 'beginning with a space or period')
267 'beginning with a space or period')
268
268
269 @registerformatvariant
269 @registerformatvariant
270 class generaldelta(requirementformatvariant):
270 class generaldelta(requirementformatvariant):
271 name = 'generaldelta'
271 name = 'generaldelta'
272
272
273 _requirement = 'generaldelta'
273 _requirement = 'generaldelta'
274
274
275 default = True
275 default = True
276
276
277 description = _('deltas within internal storage are unable to '
277 description = _('deltas within internal storage are unable to '
278 'choose optimal revisions; repository is larger and '
278 'choose optimal revisions; repository is larger and '
279 'slower than it could be; interaction with other '
279 'slower than it could be; interaction with other '
280 'repositories may require extra network and CPU '
280 'repositories may require extra network and CPU '
281 'resources, making "hg push" and "hg pull" slower')
281 'resources, making "hg push" and "hg pull" slower')
282
282
283 upgrademessage = _('repository storage will be able to create '
283 upgrademessage = _('repository storage will be able to create '
284 'optimal deltas; new repository data will be '
284 'optimal deltas; new repository data will be '
285 'smaller and read times should decrease; '
285 'smaller and read times should decrease; '
286 'interacting with other repositories using this '
286 'interacting with other repositories using this '
287 'storage model should require less network and '
287 'storage model should require less network and '
288 'CPU resources, making "hg push" and "hg pull" '
288 'CPU resources, making "hg push" and "hg pull" '
289 'faster')
289 'faster')
290
290
291 @registerformatvariant
291 @registerformatvariant
292 class sparserevlog(requirementformatvariant):
292 class sparserevlog(requirementformatvariant):
293 name = 'sparserevlog'
293 name = 'sparserevlog'
294
294
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
295 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
296
296
297 default = True
297 default = True
298
298
299 description = _('in order to limit disk reading and memory usage on older '
299 description = _('in order to limit disk reading and memory usage on older '
300 'version, the span of a delta chain from its root to its '
300 'version, the span of a delta chain from its root to its '
301 'end is limited, whatever the relevant data in this span. '
301 'end is limited, whatever the relevant data in this span. '
302 'This can severly limit Mercurial ability to build good '
302 'This can severly limit Mercurial ability to build good '
303 'chain of delta resulting is much more storage space being '
303 'chain of delta resulting is much more storage space being '
304 'taken and limit reusability of on disk delta during '
304 'taken and limit reusability of on disk delta during '
305 'exchange.'
305 'exchange.'
306 )
306 )
307
307
308 upgrademessage = _('Revlog supports delta chain with more unused data '
308 upgrademessage = _('Revlog supports delta chain with more unused data '
309 'between payload. These gaps will be skipped at read '
309 'between payload. These gaps will be skipped at read '
310 'time. This allows for better delta chains, making a '
310 'time. This allows for better delta chains, making a '
311 'better compression and faster exchange with server.')
311 'better compression and faster exchange with server.')
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class removecldeltachain(formatvariant):
314 class removecldeltachain(formatvariant):
315 name = 'plain-cl-delta'
315 name = 'plain-cl-delta'
316
316
317 default = True
317 default = True
318
318
319 description = _('changelog storage is using deltas instead of '
319 description = _('changelog storage is using deltas instead of '
320 'raw entries; changelog reading and any '
320 'raw entries; changelog reading and any '
321 'operation relying on changelog data are slower '
321 'operation relying on changelog data are slower '
322 'than they could be')
322 'than they could be')
323
323
324 upgrademessage = _('changelog storage will be reformated to '
324 upgrademessage = _('changelog storage will be reformated to '
325 'store raw entries; changelog reading will be '
325 'store raw entries; changelog reading will be '
326 'faster; changelog size may be reduced')
326 'faster; changelog size may be reduced')
327
327
328 @staticmethod
328 @staticmethod
329 def fromrepo(repo):
329 def fromrepo(repo):
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
331 # changelogs with deltas.
331 # changelogs with deltas.
332 cl = repo.changelog
332 cl = repo.changelog
333 chainbase = cl.chainbase
333 chainbase = cl.chainbase
334 return all(rev == chainbase(rev) for rev in cl)
334 return all(rev == chainbase(rev) for rev in cl)
335
335
336 @staticmethod
336 @staticmethod
337 def fromconfig(repo):
337 def fromconfig(repo):
338 return True
338 return True
339
339
340 @registerformatvariant
340 @registerformatvariant
341 class compressionengine(formatvariant):
341 class compressionengine(formatvariant):
342 name = 'compression'
342 name = 'compression'
343 default = 'zlib'
343 default = 'zlib'
344
344
345 description = _('Compresion algorithm used to compress data. '
345 description = _('Compresion algorithm used to compress data. '
346 'Some engine are faster than other')
346 'Some engine are faster than other')
347
347
348 upgrademessage = _('revlog content will be recompressed with the new '
348 upgrademessage = _('revlog content will be recompressed with the new '
349 'algorithm.')
349 'algorithm.')
350
350
351 @classmethod
351 @classmethod
352 def fromrepo(cls, repo):
352 def fromrepo(cls, repo):
353 # we allow multiple compression engine requirement to co-exist because
353 # we allow multiple compression engine requirement to co-exist because
354 # strickly speaking, revlog seems to support mixed compression style.
354 # strickly speaking, revlog seems to support mixed compression style.
355 #
355 #
356 # The compression used for new entries will be "the last one"
356 # The compression used for new entries will be "the last one"
357 compression = 'zlib'
357 compression = 'zlib'
358 for req in repo.requirements:
358 for req in repo.requirements:
359 prefix = req.startswith
359 prefix = req.startswith
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
360 if prefix('revlog-compression-') or prefix('exp-compression-'):
361 compression = req.split('-', 2)[2]
361 compression = req.split('-', 2)[2]
362 return compression
362 return compression
363
363
364 @classmethod
364 @classmethod
365 def fromconfig(cls, repo):
365 def fromconfig(cls, repo):
366 return repo.ui.config('format', 'revlog-compression')
366 return repo.ui.config('format', 'revlog-compression')
367
367
368 @registerformatvariant
368 @registerformatvariant
369 class compressionlevel(formatvariant):
369 class compressionlevel(formatvariant):
370 name = 'compression-level'
370 name = 'compression-level'
371 default = 'default'
371 default = 'default'
372
372
373 description = _('compression level')
373 description = _('compression level')
374
374
375 upgrademessage = _('revlog content will be recompressed')
375 upgrademessage = _('revlog content will be recompressed')
376
376
377 @classmethod
377 @classmethod
378 def fromrepo(cls, repo):
378 def fromrepo(cls, repo):
379 comp = compressionengine.fromrepo(repo)
379 comp = compressionengine.fromrepo(repo)
380 level = None
380 level = None
381 if comp == 'zlib':
381 if comp == 'zlib':
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
382 level = repo.ui.configint('storage', 'revlog.zlib.level')
383 elif comp == 'zstd':
383 elif comp == 'zstd':
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
384 level = repo.ui.configint('storage', 'revlog.zstd.level')
385 if level is None:
385 if level is None:
386 return 'default'
386 return 'default'
387 return bytes(level)
387 return bytes(level)
388
388
389 @classmethod
389 @classmethod
390 def fromconfig(cls, repo):
390 def fromconfig(cls, repo):
391 comp = compressionengine.fromconfig(repo)
391 comp = compressionengine.fromconfig(repo)
392 level = None
392 level = None
393 if comp == 'zlib':
393 if comp == 'zlib':
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
394 level = repo.ui.configint('storage', 'revlog.zlib.level')
395 elif comp == 'zstd':
395 elif comp == 'zstd':
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
396 level = repo.ui.configint('storage', 'revlog.zstd.level')
397 if level is None:
397 if level is None:
398 return 'default'
398 return 'default'
399 return bytes(level)
399 return bytes(level)
400
400
401 def finddeficiencies(repo):
401 def finddeficiencies(repo):
402 """returns a list of deficiencies that the repo suffer from"""
402 """returns a list of deficiencies that the repo suffer from"""
403 deficiencies = []
403 deficiencies = []
404
404
405 # We could detect lack of revlogv1 and store here, but they were added
405 # We could detect lack of revlogv1 and store here, but they were added
406 # in 0.9.2 and we don't support upgrading repos without these
406 # in 0.9.2 and we don't support upgrading repos without these
407 # requirements, so let's not bother.
407 # requirements, so let's not bother.
408
408
409 for fv in allformatvariant:
409 for fv in allformatvariant:
410 if not fv.fromrepo(repo):
410 if not fv.fromrepo(repo):
411 deficiencies.append(fv)
411 deficiencies.append(fv)
412
412
413 return deficiencies
413 return deficiencies
414
414
415 # search without '-' to support older form on newer client.
415 # search without '-' to support older form on newer client.
416 #
416 #
417 # We don't enforce backward compatibility for debug command so this
417 # We don't enforce backward compatibility for debug command so this
418 # might eventually be dropped. However, having to use two different
418 # might eventually be dropped. However, having to use two different
419 # forms in script when comparing result is anoying enough to add
419 # forms in script when comparing result is anoying enough to add
420 # backward compatibility for a while.
420 # backward compatibility for a while.
421 legacy_opts_map = {
421 legacy_opts_map = {
422 'redeltaparent': 're-delta-parent',
422 'redeltaparent': 're-delta-parent',
423 'redeltamultibase': 're-delta-multibase',
423 'redeltamultibase': 're-delta-multibase',
424 'redeltaall': 're-delta-all',
424 'redeltaall': 're-delta-all',
425 'redeltafulladd': 're-delta-fulladd',
425 'redeltafulladd': 're-delta-fulladd',
426 }
426 }
427
427
428 def findoptimizations(repo):
428 def findoptimizations(repo):
429 """Determine optimisation that could be used during upgrade"""
429 """Determine optimisation that could be used during upgrade"""
430 # These are unconditionally added. There is logic later that figures out
430 # These are unconditionally added. There is logic later that figures out
431 # which ones to apply.
431 # which ones to apply.
432 optimizations = []
432 optimizations = []
433
433
434 optimizations.append(improvement(
434 optimizations.append(improvement(
435 name='re-delta-parent',
435 name='re-delta-parent',
436 type=optimisation,
436 type=optimisation,
437 description=_('deltas within internal storage will be recalculated to '
437 description=_('deltas within internal storage will be recalculated to '
438 'choose an optimal base revision where this was not '
438 'choose an optimal base revision where this was not '
439 'already done; the size of the repository may shrink and '
439 'already done; the size of the repository may shrink and '
440 'various operations may become faster; the first time '
440 'various operations may become faster; the first time '
441 'this optimization is performed could slow down upgrade '
441 'this optimization is performed could slow down upgrade '
442 'execution considerably; subsequent invocations should '
442 'execution considerably; subsequent invocations should '
443 'not run noticeably slower'),
443 'not run noticeably slower'),
444 upgrademessage=_('deltas within internal storage will choose a new '
444 upgrademessage=_('deltas within internal storage will choose a new '
445 'base revision if needed')))
445 'base revision if needed')))
446
446
447 optimizations.append(improvement(
447 optimizations.append(improvement(
448 name='re-delta-multibase',
448 name='re-delta-multibase',
449 type=optimisation,
449 type=optimisation,
450 description=_('deltas within internal storage will be recalculated '
450 description=_('deltas within internal storage will be recalculated '
451 'against multiple base revision and the smallest '
451 'against multiple base revision and the smallest '
452 'difference will be used; the size of the repository may '
452 'difference will be used; the size of the repository may '
453 'shrink significantly when there are many merges; this '
453 'shrink significantly when there are many merges; this '
454 'optimization will slow down execution in proportion to '
454 'optimization will slow down execution in proportion to '
455 'the number of merges in the repository and the amount '
455 'the number of merges in the repository and the amount '
456 'of files in the repository; this slow down should not '
456 'of files in the repository; this slow down should not '
457 'be significant unless there are tens of thousands of '
457 'be significant unless there are tens of thousands of '
458 'files and thousands of merges'),
458 'files and thousands of merges'),
459 upgrademessage=_('deltas within internal storage will choose an '
459 upgrademessage=_('deltas within internal storage will choose an '
460 'optimal delta by computing deltas against multiple '
460 'optimal delta by computing deltas against multiple '
461 'parents; may slow down execution time '
461 'parents; may slow down execution time '
462 'significantly')))
462 'significantly')))
463
463
464 optimizations.append(improvement(
464 optimizations.append(improvement(
465 name='re-delta-all',
465 name='re-delta-all',
466 type=optimisation,
466 type=optimisation,
467 description=_('deltas within internal storage will always be '
467 description=_('deltas within internal storage will always be '
468 'recalculated without reusing prior deltas; this will '
468 'recalculated without reusing prior deltas; this will '
469 'likely make execution run several times slower; this '
469 'likely make execution run several times slower; this '
470 'optimization is typically not needed'),
470 'optimization is typically not needed'),
471 upgrademessage=_('deltas within internal storage will be fully '
471 upgrademessage=_('deltas within internal storage will be fully '
472 'recomputed; this will likely drastically slow down '
472 'recomputed; this will likely drastically slow down '
473 'execution time')))
473 'execution time')))
474
474
475 optimizations.append(improvement(
475 optimizations.append(improvement(
476 name='re-delta-fulladd',
476 name='re-delta-fulladd',
477 type=optimisation,
477 type=optimisation,
478 description=_('every revision will be re-added as if it was new '
478 description=_('every revision will be re-added as if it was new '
479 'content. It will go through the full storage '
479 'content. It will go through the full storage '
480 'mechanism giving extensions a chance to process it '
480 'mechanism giving extensions a chance to process it '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
481 '(eg. lfs). This is similar to "re-delta-all" but even '
482 'slower since more logic is involved.'),
482 'slower since more logic is involved.'),
483 upgrademessage=_('each revision will be added as new content to the '
483 upgrademessage=_('each revision will be added as new content to the '
484 'internal storage; this will likely drastically slow '
484 'internal storage; this will likely drastically slow '
485 'down execution time, but some extensions might need '
485 'down execution time, but some extensions might need '
486 'it')))
486 'it')))
487
487
488 return optimizations
488 return optimizations
489
489
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
490 def determineactions(repo, deficiencies, sourcereqs, destreqs):
491 """Determine upgrade actions that will be performed.
491 """Determine upgrade actions that will be performed.
492
492
493 Given a list of improvements as returned by ``finddeficiencies`` and
493 Given a list of improvements as returned by ``finddeficiencies`` and
494 ``findoptimizations``, determine the list of upgrade actions that
494 ``findoptimizations``, determine the list of upgrade actions that
495 will be performed.
495 will be performed.
496
496
497 The role of this function is to filter improvements if needed, apply
497 The role of this function is to filter improvements if needed, apply
498 recommended optimizations from the improvements list that make sense,
498 recommended optimizations from the improvements list that make sense,
499 etc.
499 etc.
500
500
501 Returns a list of action names.
501 Returns a list of action names.
502 """
502 """
503 newactions = []
503 newactions = []
504
504
505 knownreqs = supporteddestrequirements(repo)
505 knownreqs = supporteddestrequirements(repo)
506
506
507 for d in deficiencies:
507 for d in deficiencies:
508 name = d.name
508 name = d.name
509
509
510 # If the action is a requirement that doesn't show up in the
510 # If the action is a requirement that doesn't show up in the
511 # destination requirements, prune the action.
511 # destination requirements, prune the action.
512 if name in knownreqs and name not in destreqs:
512 if name in knownreqs and name not in destreqs:
513 continue
513 continue
514
514
515 newactions.append(d)
515 newactions.append(d)
516
516
517 # FUTURE consider adding some optimizations here for certain transitions.
517 # FUTURE consider adding some optimizations here for certain transitions.
518 # e.g. adding generaldelta could schedule parent redeltas.
518 # e.g. adding generaldelta could schedule parent redeltas.
519
519
520 return newactions
520 return newactions
521
521
522 def _revlogfrompath(repo, path):
522 def _revlogfrompath(repo, path):
523 """Obtain a revlog from a repo path.
523 """Obtain a revlog from a repo path.
524
524
525 An instance of the appropriate class is returned.
525 An instance of the appropriate class is returned.
526 """
526 """
527 if path == '00changelog.i':
527 if path == '00changelog.i':
528 return changelog.changelog(repo.svfs)
528 return changelog.changelog(repo.svfs)
529 elif path.endswith('00manifest.i'):
529 elif path.endswith('00manifest.i'):
530 mandir = path[:-len('00manifest.i')]
530 mandir = path[:-len('00manifest.i')]
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
531 return manifest.manifestrevlog(repo.svfs, tree=mandir)
532 else:
532 else:
533 #reverse of "/".join(("data", path + ".i"))
533 #reverse of "/".join(("data", path + ".i"))
534 return filelog.filelog(repo.svfs, path[5:-2])
534 return filelog.filelog(repo.svfs, path[5:-2])
535
535
536 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
536 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
537 """Copy revlogs between 2 repos."""
537 """Copy revlogs between 2 repos."""
538 revcount = 0
538 revcount = 0
539 srcsize = 0
539 srcsize = 0
540 srcrawsize = 0
540 srcrawsize = 0
541 dstsize = 0
541 dstsize = 0
542 fcount = 0
542 fcount = 0
543 frevcount = 0
543 frevcount = 0
544 fsrcsize = 0
544 fsrcsize = 0
545 frawsize = 0
545 frawsize = 0
546 fdstsize = 0
546 fdstsize = 0
547 mcount = 0
547 mcount = 0
548 mrevcount = 0
548 mrevcount = 0
549 msrcsize = 0
549 msrcsize = 0
550 mrawsize = 0
550 mrawsize = 0
551 mdstsize = 0
551 mdstsize = 0
552 crevcount = 0
552 crevcount = 0
553 csrcsize = 0
553 csrcsize = 0
554 crawsize = 0
554 crawsize = 0
555 cdstsize = 0
555 cdstsize = 0
556
556
557 alldatafiles = list(srcrepo.store.walk())
558
557 # Perform a pass to collect metadata. This validates we can open all
559 # Perform a pass to collect metadata. This validates we can open all
558 # source files and allows a unified progress bar to be displayed.
560 # source files and allows a unified progress bar to be displayed.
559 for unencoded, encoded, size in srcrepo.store.walk():
561 for unencoded, encoded, size in alldatafiles:
560 if unencoded.endswith('.d'):
562 if unencoded.endswith('.d'):
561 continue
563 continue
562
564
563 rl = _revlogfrompath(srcrepo, unencoded)
565 rl = _revlogfrompath(srcrepo, unencoded)
564
566
565 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
567 info = rl.storageinfo(exclusivefiles=True, revisionscount=True,
566 trackedsize=True, storedsize=True)
568 trackedsize=True, storedsize=True)
567
569
568 revcount += info['revisionscount'] or 0
570 revcount += info['revisionscount'] or 0
569 datasize = info['storedsize'] or 0
571 datasize = info['storedsize'] or 0
570 rawsize = info['trackedsize'] or 0
572 rawsize = info['trackedsize'] or 0
571
573
572 srcsize += datasize
574 srcsize += datasize
573 srcrawsize += rawsize
575 srcrawsize += rawsize
574
576
575 # This is for the separate progress bars.
577 # This is for the separate progress bars.
576 if isinstance(rl, changelog.changelog):
578 if isinstance(rl, changelog.changelog):
577 crevcount += len(rl)
579 crevcount += len(rl)
578 csrcsize += datasize
580 csrcsize += datasize
579 crawsize += rawsize
581 crawsize += rawsize
580 elif isinstance(rl, manifest.manifestrevlog):
582 elif isinstance(rl, manifest.manifestrevlog):
581 mcount += 1
583 mcount += 1
582 mrevcount += len(rl)
584 mrevcount += len(rl)
583 msrcsize += datasize
585 msrcsize += datasize
584 mrawsize += rawsize
586 mrawsize += rawsize
585 elif isinstance(rl, filelog.filelog):
587 elif isinstance(rl, filelog.filelog):
586 fcount += 1
588 fcount += 1
587 frevcount += len(rl)
589 frevcount += len(rl)
588 fsrcsize += datasize
590 fsrcsize += datasize
589 frawsize += rawsize
591 frawsize += rawsize
590 else:
592 else:
591 error.ProgrammingError('unknown revlog type')
593 error.ProgrammingError('unknown revlog type')
592
594
593 if not revcount:
595 if not revcount:
594 return
596 return
595
597
596 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
598 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
597 '%d in changelog)\n') %
599 '%d in changelog)\n') %
598 (revcount, frevcount, mrevcount, crevcount))
600 (revcount, frevcount, mrevcount, crevcount))
599 ui.write(_('migrating %s in store; %s tracked data\n') % (
601 ui.write(_('migrating %s in store; %s tracked data\n') % (
600 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
602 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
601
603
602 # Used to keep track of progress.
604 # Used to keep track of progress.
603 progress = None
605 progress = None
604 def oncopiedrevision(rl, rev, node):
606 def oncopiedrevision(rl, rev, node):
605 progress.increment()
607 progress.increment()
606
608
607 # Do the actual copying.
609 # Do the actual copying.
608 # FUTURE this operation can be farmed off to worker processes.
610 # FUTURE this operation can be farmed off to worker processes.
609 seen = set()
611 seen = set()
610 for unencoded, encoded, size in srcrepo.store.walk():
612 for unencoded, encoded, size in alldatafiles:
611 if unencoded.endswith('.d'):
613 if unencoded.endswith('.d'):
612 continue
614 continue
613
615
614 oldrl = _revlogfrompath(srcrepo, unencoded)
616 oldrl = _revlogfrompath(srcrepo, unencoded)
615 newrl = _revlogfrompath(dstrepo, unencoded)
617 newrl = _revlogfrompath(dstrepo, unencoded)
616
618
617 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
619 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
618 ui.write(_('finished migrating %d manifest revisions across %d '
620 ui.write(_('finished migrating %d manifest revisions across %d '
619 'manifests; change in size: %s\n') %
621 'manifests; change in size: %s\n') %
620 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
622 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
621
623
622 ui.write(_('migrating changelog containing %d revisions '
624 ui.write(_('migrating changelog containing %d revisions '
623 '(%s in store; %s tracked data)\n') %
625 '(%s in store; %s tracked data)\n') %
624 (crevcount, util.bytecount(csrcsize),
626 (crevcount, util.bytecount(csrcsize),
625 util.bytecount(crawsize)))
627 util.bytecount(crawsize)))
626 seen.add('c')
628 seen.add('c')
627 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
629 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
628 total=crevcount)
630 total=crevcount)
629 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
631 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
630 ui.write(_('finished migrating %d filelog revisions across %d '
632 ui.write(_('finished migrating %d filelog revisions across %d '
631 'filelogs; change in size: %s\n') %
633 'filelogs; change in size: %s\n') %
632 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
634 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
633
635
634 ui.write(_('migrating %d manifests containing %d revisions '
636 ui.write(_('migrating %d manifests containing %d revisions '
635 '(%s in store; %s tracked data)\n') %
637 '(%s in store; %s tracked data)\n') %
636 (mcount, mrevcount, util.bytecount(msrcsize),
638 (mcount, mrevcount, util.bytecount(msrcsize),
637 util.bytecount(mrawsize)))
639 util.bytecount(mrawsize)))
638 seen.add('m')
640 seen.add('m')
639 if progress:
641 if progress:
640 progress.complete()
642 progress.complete()
641 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
643 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
642 total=mrevcount)
644 total=mrevcount)
643 elif 'f' not in seen:
645 elif 'f' not in seen:
644 ui.write(_('migrating %d filelogs containing %d revisions '
646 ui.write(_('migrating %d filelogs containing %d revisions '
645 '(%s in store; %s tracked data)\n') %
647 '(%s in store; %s tracked data)\n') %
646 (fcount, frevcount, util.bytecount(fsrcsize),
648 (fcount, frevcount, util.bytecount(fsrcsize),
647 util.bytecount(frawsize)))
649 util.bytecount(frawsize)))
648 seen.add('f')
650 seen.add('f')
649 if progress:
651 if progress:
650 progress.complete()
652 progress.complete()
651 progress = srcrepo.ui.makeprogress(_('file revisions'),
653 progress = srcrepo.ui.makeprogress(_('file revisions'),
652 total=frevcount)
654 total=frevcount)
653
655
654
656
655 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
657 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
656 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
658 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
657 deltareuse=deltareuse,
659 deltareuse=deltareuse,
658 forcedeltabothparents=forcedeltabothparents)
660 forcedeltabothparents=forcedeltabothparents)
659
661
660 info = newrl.storageinfo(storedsize=True)
662 info = newrl.storageinfo(storedsize=True)
661 datasize = info['storedsize'] or 0
663 datasize = info['storedsize'] or 0
662
664
663 dstsize += datasize
665 dstsize += datasize
664
666
665 if isinstance(newrl, changelog.changelog):
667 if isinstance(newrl, changelog.changelog):
666 cdstsize += datasize
668 cdstsize += datasize
667 elif isinstance(newrl, manifest.manifestrevlog):
669 elif isinstance(newrl, manifest.manifestrevlog):
668 mdstsize += datasize
670 mdstsize += datasize
669 else:
671 else:
670 fdstsize += datasize
672 fdstsize += datasize
671
673
672 progress.complete()
674 progress.complete()
673
675
674 ui.write(_('finished migrating %d changelog revisions; change in size: '
676 ui.write(_('finished migrating %d changelog revisions; change in size: '
675 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
677 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
676
678
677 ui.write(_('finished migrating %d total revisions; total change in store '
679 ui.write(_('finished migrating %d total revisions; total change in store '
678 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
680 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
679
681
680 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
682 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
681 """Determine whether to copy a store file during upgrade.
683 """Determine whether to copy a store file during upgrade.
682
684
683 This function is called when migrating store files from ``srcrepo`` to
685 This function is called when migrating store files from ``srcrepo`` to
684 ``dstrepo`` as part of upgrading a repository.
686 ``dstrepo`` as part of upgrading a repository.
685
687
686 Args:
688 Args:
687 srcrepo: repo we are copying from
689 srcrepo: repo we are copying from
688 dstrepo: repo we are copying to
690 dstrepo: repo we are copying to
689 requirements: set of requirements for ``dstrepo``
691 requirements: set of requirements for ``dstrepo``
690 path: store file being examined
692 path: store file being examined
691 mode: the ``ST_MODE`` file type of ``path``
693 mode: the ``ST_MODE`` file type of ``path``
692 st: ``stat`` data structure for ``path``
694 st: ``stat`` data structure for ``path``
693
695
694 Function should return ``True`` if the file is to be copied.
696 Function should return ``True`` if the file is to be copied.
695 """
697 """
696 # Skip revlogs.
698 # Skip revlogs.
697 if path.endswith(('.i', '.d')):
699 if path.endswith(('.i', '.d')):
698 return False
700 return False
699 # Skip transaction related files.
701 # Skip transaction related files.
700 if path.startswith('undo'):
702 if path.startswith('undo'):
701 return False
703 return False
702 # Only copy regular files.
704 # Only copy regular files.
703 if mode != stat.S_IFREG:
705 if mode != stat.S_IFREG:
704 return False
706 return False
705 # Skip other skipped files.
707 # Skip other skipped files.
706 if path in ('lock', 'fncache'):
708 if path in ('lock', 'fncache'):
707 return False
709 return False
708
710
709 return True
711 return True
710
712
711 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
713 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
712 """Hook point for extensions to perform additional actions during upgrade.
714 """Hook point for extensions to perform additional actions during upgrade.
713
715
714 This function is called after revlogs and store files have been copied but
716 This function is called after revlogs and store files have been copied but
715 before the new store is swapped into the original location.
717 before the new store is swapped into the original location.
716 """
718 """
717
719
718 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
720 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
719 """Do the low-level work of upgrading a repository.
721 """Do the low-level work of upgrading a repository.
720
722
721 The upgrade is effectively performed as a copy between a source
723 The upgrade is effectively performed as a copy between a source
722 repository and a temporary destination repository.
724 repository and a temporary destination repository.
723
725
724 The source repository is unmodified for as long as possible so the
726 The source repository is unmodified for as long as possible so the
725 upgrade can abort at any time without causing loss of service for
727 upgrade can abort at any time without causing loss of service for
726 readers and without corrupting the source repository.
728 readers and without corrupting the source repository.
727 """
729 """
728 assert srcrepo.currentwlock()
730 assert srcrepo.currentwlock()
729 assert dstrepo.currentwlock()
731 assert dstrepo.currentwlock()
730
732
731 ui.write(_('(it is safe to interrupt this process any time before '
733 ui.write(_('(it is safe to interrupt this process any time before '
732 'data migration completes)\n'))
734 'data migration completes)\n'))
733
735
734 if 're-delta-all' in actions:
736 if 're-delta-all' in actions:
735 deltareuse = revlog.revlog.DELTAREUSENEVER
737 deltareuse = revlog.revlog.DELTAREUSENEVER
736 elif 're-delta-parent' in actions:
738 elif 're-delta-parent' in actions:
737 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
739 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
738 elif 're-delta-multibase' in actions:
740 elif 're-delta-multibase' in actions:
739 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
741 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
740 elif 're-delta-fulladd' in actions:
742 elif 're-delta-fulladd' in actions:
741 deltareuse = revlog.revlog.DELTAREUSEFULLADD
743 deltareuse = revlog.revlog.DELTAREUSEFULLADD
742 else:
744 else:
743 deltareuse = revlog.revlog.DELTAREUSEALWAYS
745 deltareuse = revlog.revlog.DELTAREUSEALWAYS
744
746
745 with dstrepo.transaction('upgrade') as tr:
747 with dstrepo.transaction('upgrade') as tr:
746 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
748 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
747 're-delta-multibase' in actions)
749 're-delta-multibase' in actions)
748
750
749 # Now copy other files in the store directory.
751 # Now copy other files in the store directory.
750 # The sorted() makes execution deterministic.
752 # The sorted() makes execution deterministic.
751 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
753 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
752 if not _filterstorefile(srcrepo, dstrepo, requirements,
754 if not _filterstorefile(srcrepo, dstrepo, requirements,
753 p, kind, st):
755 p, kind, st):
754 continue
756 continue
755
757
756 srcrepo.ui.write(_('copying %s\n') % p)
758 srcrepo.ui.write(_('copying %s\n') % p)
757 src = srcrepo.store.rawvfs.join(p)
759 src = srcrepo.store.rawvfs.join(p)
758 dst = dstrepo.store.rawvfs.join(p)
760 dst = dstrepo.store.rawvfs.join(p)
759 util.copyfile(src, dst, copystat=True)
761 util.copyfile(src, dst, copystat=True)
760
762
761 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
763 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
762
764
763 ui.write(_('data fully migrated to temporary repository\n'))
765 ui.write(_('data fully migrated to temporary repository\n'))
764
766
765 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
767 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
766 backupvfs = vfsmod.vfs(backuppath)
768 backupvfs = vfsmod.vfs(backuppath)
767
769
768 # Make a backup of requires file first, as it is the first to be modified.
770 # Make a backup of requires file first, as it is the first to be modified.
769 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
771 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
770
772
771 # We install an arbitrary requirement that clients must not support
773 # We install an arbitrary requirement that clients must not support
772 # as a mechanism to lock out new clients during the data swap. This is
774 # as a mechanism to lock out new clients during the data swap. This is
773 # better than allowing a client to continue while the repository is in
775 # better than allowing a client to continue while the repository is in
774 # an inconsistent state.
776 # an inconsistent state.
775 ui.write(_('marking source repository as being upgraded; clients will be '
777 ui.write(_('marking source repository as being upgraded; clients will be '
776 'unable to read from repository\n'))
778 'unable to read from repository\n'))
777 scmutil.writerequires(srcrepo.vfs,
779 scmutil.writerequires(srcrepo.vfs,
778 srcrepo.requirements | {'upgradeinprogress'})
780 srcrepo.requirements | {'upgradeinprogress'})
779
781
780 ui.write(_('starting in-place swap of repository data\n'))
782 ui.write(_('starting in-place swap of repository data\n'))
781 ui.write(_('replaced files will be backed up at %s\n') %
783 ui.write(_('replaced files will be backed up at %s\n') %
782 backuppath)
784 backuppath)
783
785
784 # Now swap in the new store directory. Doing it as a rename should make
786 # Now swap in the new store directory. Doing it as a rename should make
785 # the operation nearly instantaneous and atomic (at least in well-behaved
787 # the operation nearly instantaneous and atomic (at least in well-behaved
786 # environments).
788 # environments).
787 ui.write(_('replacing store...\n'))
789 ui.write(_('replacing store...\n'))
788 tstart = util.timer()
790 tstart = util.timer()
789 util.rename(srcrepo.spath, backupvfs.join('store'))
791 util.rename(srcrepo.spath, backupvfs.join('store'))
790 util.rename(dstrepo.spath, srcrepo.spath)
792 util.rename(dstrepo.spath, srcrepo.spath)
791 elapsed = util.timer() - tstart
793 elapsed = util.timer() - tstart
792 ui.write(_('store replacement complete; repository was inconsistent for '
794 ui.write(_('store replacement complete; repository was inconsistent for '
793 '%0.1fs\n') % elapsed)
795 '%0.1fs\n') % elapsed)
794
796
795 # We first write the requirements file. Any new requirements will lock
797 # We first write the requirements file. Any new requirements will lock
796 # out legacy clients.
798 # out legacy clients.
797 ui.write(_('finalizing requirements file and making repository readable '
799 ui.write(_('finalizing requirements file and making repository readable '
798 'again\n'))
800 'again\n'))
799 scmutil.writerequires(srcrepo.vfs, requirements)
801 scmutil.writerequires(srcrepo.vfs, requirements)
800
802
801 # The lock file from the old store won't be removed because nothing has a
803 # The lock file from the old store won't be removed because nothing has a
802 # reference to its new location. So clean it up manually. Alternatively, we
804 # reference to its new location. So clean it up manually. Alternatively, we
803 # could update srcrepo.svfs and other variables to point to the new
805 # could update srcrepo.svfs and other variables to point to the new
804 # location. This is simpler.
806 # location. This is simpler.
805 backupvfs.unlink('store/lock')
807 backupvfs.unlink('store/lock')
806
808
807 return backuppath
809 return backuppath
808
810
809 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
811 def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
810 """Upgrade a repository in place."""
812 """Upgrade a repository in place."""
811 if optimize is None:
813 if optimize is None:
812 optimize = []
814 optimize = []
813 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
815 optimize = set(legacy_opts_map.get(o, o) for o in optimize)
814 repo = repo.unfiltered()
816 repo = repo.unfiltered()
815
817
816 # Ensure the repository can be upgraded.
818 # Ensure the repository can be upgraded.
817 missingreqs = requiredsourcerequirements(repo) - repo.requirements
819 missingreqs = requiredsourcerequirements(repo) - repo.requirements
818 if missingreqs:
820 if missingreqs:
819 raise error.Abort(_('cannot upgrade repository; requirement '
821 raise error.Abort(_('cannot upgrade repository; requirement '
820 'missing: %s') % _(', ').join(sorted(missingreqs)))
822 'missing: %s') % _(', ').join(sorted(missingreqs)))
821
823
822 blockedreqs = blocksourcerequirements(repo) & repo.requirements
824 blockedreqs = blocksourcerequirements(repo) & repo.requirements
823 if blockedreqs:
825 if blockedreqs:
824 raise error.Abort(_('cannot upgrade repository; unsupported source '
826 raise error.Abort(_('cannot upgrade repository; unsupported source '
825 'requirement: %s') %
827 'requirement: %s') %
826 _(', ').join(sorted(blockedreqs)))
828 _(', ').join(sorted(blockedreqs)))
827
829
828 # FUTURE there is potentially a need to control the wanted requirements via
830 # FUTURE there is potentially a need to control the wanted requirements via
829 # command arguments or via an extension hook point.
831 # command arguments or via an extension hook point.
830 newreqs = localrepo.newreporequirements(
832 newreqs = localrepo.newreporequirements(
831 repo.ui, localrepo.defaultcreateopts(repo.ui))
833 repo.ui, localrepo.defaultcreateopts(repo.ui))
832 newreqs.update(preservedrequirements(repo))
834 newreqs.update(preservedrequirements(repo))
833
835
834 noremovereqs = (repo.requirements - newreqs -
836 noremovereqs = (repo.requirements - newreqs -
835 supportremovedrequirements(repo))
837 supportremovedrequirements(repo))
836 if noremovereqs:
838 if noremovereqs:
837 raise error.Abort(_('cannot upgrade repository; requirement would be '
839 raise error.Abort(_('cannot upgrade repository; requirement would be '
838 'removed: %s') % _(', ').join(sorted(noremovereqs)))
840 'removed: %s') % _(', ').join(sorted(noremovereqs)))
839
841
840 noaddreqs = (newreqs - repo.requirements -
842 noaddreqs = (newreqs - repo.requirements -
841 allowednewrequirements(repo))
843 allowednewrequirements(repo))
842 if noaddreqs:
844 if noaddreqs:
843 raise error.Abort(_('cannot upgrade repository; do not support adding '
845 raise error.Abort(_('cannot upgrade repository; do not support adding '
844 'requirement: %s') %
846 'requirement: %s') %
845 _(', ').join(sorted(noaddreqs)))
847 _(', ').join(sorted(noaddreqs)))
846
848
847 unsupportedreqs = newreqs - supporteddestrequirements(repo)
849 unsupportedreqs = newreqs - supporteddestrequirements(repo)
848 if unsupportedreqs:
850 if unsupportedreqs:
849 raise error.Abort(_('cannot upgrade repository; do not support '
851 raise error.Abort(_('cannot upgrade repository; do not support '
850 'destination requirement: %s') %
852 'destination requirement: %s') %
851 _(', ').join(sorted(unsupportedreqs)))
853 _(', ').join(sorted(unsupportedreqs)))
852
854
853 # Find and validate all improvements that can be made.
855 # Find and validate all improvements that can be made.
854 alloptimizations = findoptimizations(repo)
856 alloptimizations = findoptimizations(repo)
855
857
856 # Apply and Validate arguments.
858 # Apply and Validate arguments.
857 optimizations = []
859 optimizations = []
858 for o in alloptimizations:
860 for o in alloptimizations:
859 if o.name in optimize:
861 if o.name in optimize:
860 optimizations.append(o)
862 optimizations.append(o)
861 optimize.discard(o.name)
863 optimize.discard(o.name)
862
864
863 if optimize: # anything left is unknown
865 if optimize: # anything left is unknown
864 raise error.Abort(_('unknown optimization action requested: %s') %
866 raise error.Abort(_('unknown optimization action requested: %s') %
865 ', '.join(sorted(optimize)),
867 ', '.join(sorted(optimize)),
866 hint=_('run without arguments to see valid '
868 hint=_('run without arguments to see valid '
867 'optimizations'))
869 'optimizations'))
868
870
869 deficiencies = finddeficiencies(repo)
871 deficiencies = finddeficiencies(repo)
870 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
872 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
871 actions.extend(o for o in sorted(optimizations)
873 actions.extend(o for o in sorted(optimizations)
872 # determineactions could have added optimisation
874 # determineactions could have added optimisation
873 if o not in actions)
875 if o not in actions)
874
876
875 def printrequirements():
877 def printrequirements():
876 ui.write(_('requirements\n'))
878 ui.write(_('requirements\n'))
877 ui.write(_(' preserved: %s\n') %
879 ui.write(_(' preserved: %s\n') %
878 _(', ').join(sorted(newreqs & repo.requirements)))
880 _(', ').join(sorted(newreqs & repo.requirements)))
879
881
880 if repo.requirements - newreqs:
882 if repo.requirements - newreqs:
881 ui.write(_(' removed: %s\n') %
883 ui.write(_(' removed: %s\n') %
882 _(', ').join(sorted(repo.requirements - newreqs)))
884 _(', ').join(sorted(repo.requirements - newreqs)))
883
885
884 if newreqs - repo.requirements:
886 if newreqs - repo.requirements:
885 ui.write(_(' added: %s\n') %
887 ui.write(_(' added: %s\n') %
886 _(', ').join(sorted(newreqs - repo.requirements)))
888 _(', ').join(sorted(newreqs - repo.requirements)))
887
889
888 ui.write('\n')
890 ui.write('\n')
889
891
890 def printupgradeactions():
892 def printupgradeactions():
891 for a in actions:
893 for a in actions:
892 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
894 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
893
895
894 if not run:
896 if not run:
895 fromconfig = []
897 fromconfig = []
896 onlydefault = []
898 onlydefault = []
897
899
898 for d in deficiencies:
900 for d in deficiencies:
899 if d.fromconfig(repo):
901 if d.fromconfig(repo):
900 fromconfig.append(d)
902 fromconfig.append(d)
901 elif d.default:
903 elif d.default:
902 onlydefault.append(d)
904 onlydefault.append(d)
903
905
904 if fromconfig or onlydefault:
906 if fromconfig or onlydefault:
905
907
906 if fromconfig:
908 if fromconfig:
907 ui.write(_('repository lacks features recommended by '
909 ui.write(_('repository lacks features recommended by '
908 'current config options:\n\n'))
910 'current config options:\n\n'))
909 for i in fromconfig:
911 for i in fromconfig:
910 ui.write('%s\n %s\n\n' % (i.name, i.description))
912 ui.write('%s\n %s\n\n' % (i.name, i.description))
911
913
912 if onlydefault:
914 if onlydefault:
913 ui.write(_('repository lacks features used by the default '
915 ui.write(_('repository lacks features used by the default '
914 'config options:\n\n'))
916 'config options:\n\n'))
915 for i in onlydefault:
917 for i in onlydefault:
916 ui.write('%s\n %s\n\n' % (i.name, i.description))
918 ui.write('%s\n %s\n\n' % (i.name, i.description))
917
919
918 ui.write('\n')
920 ui.write('\n')
919 else:
921 else:
920 ui.write(_('(no feature deficiencies found in existing '
922 ui.write(_('(no feature deficiencies found in existing '
921 'repository)\n'))
923 'repository)\n'))
922
924
923 ui.write(_('performing an upgrade with "--run" will make the following '
925 ui.write(_('performing an upgrade with "--run" will make the following '
924 'changes:\n\n'))
926 'changes:\n\n'))
925
927
926 printrequirements()
928 printrequirements()
927 printupgradeactions()
929 printupgradeactions()
928
930
929 unusedoptimize = [i for i in alloptimizations if i not in actions]
931 unusedoptimize = [i for i in alloptimizations if i not in actions]
930
932
931 if unusedoptimize:
933 if unusedoptimize:
932 ui.write(_('additional optimizations are available by specifying '
934 ui.write(_('additional optimizations are available by specifying '
933 '"--optimize <name>":\n\n'))
935 '"--optimize <name>":\n\n'))
934 for i in unusedoptimize:
936 for i in unusedoptimize:
935 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
937 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
936 return
938 return
937
939
938 # Else we're in the run=true case.
940 # Else we're in the run=true case.
939 ui.write(_('upgrade will perform the following actions:\n\n'))
941 ui.write(_('upgrade will perform the following actions:\n\n'))
940 printrequirements()
942 printrequirements()
941 printupgradeactions()
943 printupgradeactions()
942
944
943 upgradeactions = [a.name for a in actions]
945 upgradeactions = [a.name for a in actions]
944
946
945 ui.write(_('beginning upgrade...\n'))
947 ui.write(_('beginning upgrade...\n'))
946 with repo.wlock(), repo.lock():
948 with repo.wlock(), repo.lock():
947 ui.write(_('repository locked and read-only\n'))
949 ui.write(_('repository locked and read-only\n'))
948 # Our strategy for upgrading the repository is to create a new,
950 # Our strategy for upgrading the repository is to create a new,
949 # temporary repository, write data to it, then do a swap of the
951 # temporary repository, write data to it, then do a swap of the
950 # data. There are less heavyweight ways to do this, but it is easier
952 # data. There are less heavyweight ways to do this, but it is easier
951 # to create a new repo object than to instantiate all the components
953 # to create a new repo object than to instantiate all the components
952 # (like the store) separately.
954 # (like the store) separately.
953 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
955 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
954 backuppath = None
956 backuppath = None
955 try:
957 try:
956 ui.write(_('creating temporary repository to stage migrated '
958 ui.write(_('creating temporary repository to stage migrated '
957 'data: %s\n') % tmppath)
959 'data: %s\n') % tmppath)
958
960
959 # clone ui without using ui.copy because repo.ui is protected
961 # clone ui without using ui.copy because repo.ui is protected
960 repoui = repo.ui.__class__(repo.ui)
962 repoui = repo.ui.__class__(repo.ui)
961 dstrepo = hg.repository(repoui, path=tmppath, create=True)
963 dstrepo = hg.repository(repoui, path=tmppath, create=True)
962
964
963 with dstrepo.wlock(), dstrepo.lock():
965 with dstrepo.wlock(), dstrepo.lock():
964 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
966 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
965 upgradeactions)
967 upgradeactions)
966 if not (backup or backuppath is None):
968 if not (backup or backuppath is None):
967 ui.write(_('removing old repository content%s\n') % backuppath)
969 ui.write(_('removing old repository content%s\n') % backuppath)
968 repo.vfs.rmtree(backuppath, forcibly=True)
970 repo.vfs.rmtree(backuppath, forcibly=True)
969 backuppath = None
971 backuppath = None
970
972
971 finally:
973 finally:
972 ui.write(_('removing temporary repository %s\n') % tmppath)
974 ui.write(_('removing temporary repository %s\n') % tmppath)
973 repo.vfs.rmtree(tmppath, forcibly=True)
975 repo.vfs.rmtree(tmppath, forcibly=True)
974
976
975 if backuppath:
977 if backuppath:
976 ui.warn(_('copy of old repository backed up at %s\n') %
978 ui.warn(_('copy of old repository backed up at %s\n') %
977 backuppath)
979 backuppath)
978 ui.warn(_('the old repository will not be deleted; remove '
980 ui.warn(_('the old repository will not be deleted; remove '
979 'it to free up disk space once the upgraded '
981 'it to free up disk space once the upgraded '
980 'repository is verified\n'))
982 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now