##// END OF EJS Templates
upgrade: close progress after each revlog...
Martin von Zweigbergk -
r38417:f273b768 default
parent child Browse files
Show More
@@ -1,868 +1,872
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'treemanifest',
49 'treemanifest',
50 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
51 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
52 'parentdelta',
52 'parentdelta',
53 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
54 'shared',
54 'shared',
55 }
55 }
56
56
57 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
58 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
59
59
60 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
61 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
62 to be allowed.
62 to be allowed.
63 """
63 """
64 return set()
64 return set()
65
65
66 def supporteddestrequirements(repo):
66 def supporteddestrequirements(repo):
67 """Obtain requirements that upgrade supports in the destination.
67 """Obtain requirements that upgrade supports in the destination.
68
68
69 If the result of the upgrade would create requirements not in this set,
69 If the result of the upgrade would create requirements not in this set,
70 the upgrade is disallowed.
70 the upgrade is disallowed.
71
71
72 Extensions should monkeypatch this to add their custom requirements.
72 Extensions should monkeypatch this to add their custom requirements.
73 """
73 """
74 return {
74 return {
75 'dotencode',
75 'dotencode',
76 'fncache',
76 'fncache',
77 'generaldelta',
77 'generaldelta',
78 'revlogv1',
78 'revlogv1',
79 'store',
79 'store',
80 }
80 }
81
81
82 def allowednewrequirements(repo):
82 def allowednewrequirements(repo):
83 """Obtain requirements that can be added to a repository during upgrade.
83 """Obtain requirements that can be added to a repository during upgrade.
84
84
85 This is used to disallow proposed requirements from being added when
85 This is used to disallow proposed requirements from being added when
86 they weren't present before.
86 they weren't present before.
87
87
88 We use a list of allowed requirement additions instead of a list of known
88 We use a list of allowed requirement additions instead of a list of known
89 bad additions because the whitelist approach is safer and will prevent
89 bad additions because the whitelist approach is safer and will prevent
90 future, unknown requirements from accidentally being added.
90 future, unknown requirements from accidentally being added.
91 """
91 """
92 return {
92 return {
93 'dotencode',
93 'dotencode',
94 'fncache',
94 'fncache',
95 'generaldelta',
95 'generaldelta',
96 }
96 }
97
97
98 def preservedrequirements(repo):
98 def preservedrequirements(repo):
99 return set()
99 return set()
100
100
101 deficiency = 'deficiency'
101 deficiency = 'deficiency'
102 optimisation = 'optimization'
102 optimisation = 'optimization'
103
103
104 class improvement(object):
104 class improvement(object):
105 """Represents an improvement that can be made as part of an upgrade.
105 """Represents an improvement that can be made as part of an upgrade.
106
106
107 The following attributes are defined on each instance:
107 The following attributes are defined on each instance:
108
108
109 name
109 name
110 Machine-readable string uniquely identifying this improvement. It
110 Machine-readable string uniquely identifying this improvement. It
111 will be mapped to an action later in the upgrade process.
111 will be mapped to an action later in the upgrade process.
112
112
113 type
113 type
114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
115 problem. An optimization is an action (sometimes optional) that
115 problem. An optimization is an action (sometimes optional) that
116 can be taken to further improve the state of the repository.
116 can be taken to further improve the state of the repository.
117
117
118 description
118 description
119 Message intended for humans explaining the improvement in more detail,
119 Message intended for humans explaining the improvement in more detail,
120 including the implications of it. For ``deficiency`` types, should be
120 including the implications of it. For ``deficiency`` types, should be
121 worded in the present tense. For ``optimisation`` types, should be
121 worded in the present tense. For ``optimisation`` types, should be
122 worded in the future tense.
122 worded in the future tense.
123
123
124 upgrademessage
124 upgrademessage
125 Message intended for humans explaining what an upgrade addressing this
125 Message intended for humans explaining what an upgrade addressing this
126 issue will do. Should be worded in the future tense.
126 issue will do. Should be worded in the future tense.
127 """
127 """
128 def __init__(self, name, type, description, upgrademessage):
128 def __init__(self, name, type, description, upgrademessage):
129 self.name = name
129 self.name = name
130 self.type = type
130 self.type = type
131 self.description = description
131 self.description = description
132 self.upgrademessage = upgrademessage
132 self.upgrademessage = upgrademessage
133
133
134 def __eq__(self, other):
134 def __eq__(self, other):
135 if not isinstance(other, improvement):
135 if not isinstance(other, improvement):
136 # This is what python tell use to do
136 # This is what python tell use to do
137 return NotImplemented
137 return NotImplemented
138 return self.name == other.name
138 return self.name == other.name
139
139
140 def __ne__(self, other):
140 def __ne__(self, other):
141 return not self == other
141 return not self == other
142
142
143 def __hash__(self):
143 def __hash__(self):
144 return hash(self.name)
144 return hash(self.name)
145
145
146 allformatvariant = []
146 allformatvariant = []
147
147
148 def registerformatvariant(cls):
148 def registerformatvariant(cls):
149 allformatvariant.append(cls)
149 allformatvariant.append(cls)
150 return cls
150 return cls
151
151
152 class formatvariant(improvement):
152 class formatvariant(improvement):
153 """an improvement subclass dedicated to repository format"""
153 """an improvement subclass dedicated to repository format"""
154 type = deficiency
154 type = deficiency
155 ### The following attributes should be defined for each class:
155 ### The following attributes should be defined for each class:
156
156
157 # machine-readable string uniquely identifying this improvement. it will be
157 # machine-readable string uniquely identifying this improvement. it will be
158 # mapped to an action later in the upgrade process.
158 # mapped to an action later in the upgrade process.
159 name = None
159 name = None
160
160
161 # message intended for humans explaining the improvement in more detail,
161 # message intended for humans explaining the improvement in more detail,
162 # including the implications of it ``deficiency`` types, should be worded
162 # including the implications of it ``deficiency`` types, should be worded
163 # in the present tense.
163 # in the present tense.
164 description = None
164 description = None
165
165
166 # message intended for humans explaining what an upgrade addressing this
166 # message intended for humans explaining what an upgrade addressing this
167 # issue will do. should be worded in the future tense.
167 # issue will do. should be worded in the future tense.
168 upgrademessage = None
168 upgrademessage = None
169
169
170 # value of current Mercurial default for new repository
170 # value of current Mercurial default for new repository
171 default = None
171 default = None
172
172
173 def __init__(self):
173 def __init__(self):
174 raise NotImplementedError()
174 raise NotImplementedError()
175
175
176 @staticmethod
176 @staticmethod
177 def fromrepo(repo):
177 def fromrepo(repo):
178 """current value of the variant in the repository"""
178 """current value of the variant in the repository"""
179 raise NotImplementedError()
179 raise NotImplementedError()
180
180
181 @staticmethod
181 @staticmethod
182 def fromconfig(repo):
182 def fromconfig(repo):
183 """current value of the variant in the configuration"""
183 """current value of the variant in the configuration"""
184 raise NotImplementedError()
184 raise NotImplementedError()
185
185
186 class requirementformatvariant(formatvariant):
186 class requirementformatvariant(formatvariant):
187 """formatvariant based on a 'requirement' name.
187 """formatvariant based on a 'requirement' name.
188
188
189 Many format variant are controlled by a 'requirement'. We define a small
189 Many format variant are controlled by a 'requirement'. We define a small
190 subclass to factor the code.
190 subclass to factor the code.
191 """
191 """
192
192
193 # the requirement that control this format variant
193 # the requirement that control this format variant
194 _requirement = None
194 _requirement = None
195
195
196 @staticmethod
196 @staticmethod
197 def _newreporequirements(repo):
197 def _newreporequirements(repo):
198 return localrepo.newreporequirements(repo)
198 return localrepo.newreporequirements(repo)
199
199
200 @classmethod
200 @classmethod
201 def fromrepo(cls, repo):
201 def fromrepo(cls, repo):
202 assert cls._requirement is not None
202 assert cls._requirement is not None
203 return cls._requirement in repo.requirements
203 return cls._requirement in repo.requirements
204
204
205 @classmethod
205 @classmethod
206 def fromconfig(cls, repo):
206 def fromconfig(cls, repo):
207 assert cls._requirement is not None
207 assert cls._requirement is not None
208 return cls._requirement in cls._newreporequirements(repo)
208 return cls._requirement in cls._newreporequirements(repo)
209
209
210 @registerformatvariant
210 @registerformatvariant
211 class fncache(requirementformatvariant):
211 class fncache(requirementformatvariant):
212 name = 'fncache'
212 name = 'fncache'
213
213
214 _requirement = 'fncache'
214 _requirement = 'fncache'
215
215
216 default = True
216 default = True
217
217
218 description = _('long and reserved filenames may not work correctly; '
218 description = _('long and reserved filenames may not work correctly; '
219 'repository performance is sub-optimal')
219 'repository performance is sub-optimal')
220
220
221 upgrademessage = _('repository will be more resilient to storing '
221 upgrademessage = _('repository will be more resilient to storing '
222 'certain paths and performance of certain '
222 'certain paths and performance of certain '
223 'operations should be improved')
223 'operations should be improved')
224
224
225 @registerformatvariant
225 @registerformatvariant
226 class dotencode(requirementformatvariant):
226 class dotencode(requirementformatvariant):
227 name = 'dotencode'
227 name = 'dotencode'
228
228
229 _requirement = 'dotencode'
229 _requirement = 'dotencode'
230
230
231 default = True
231 default = True
232
232
233 description = _('storage of filenames beginning with a period or '
233 description = _('storage of filenames beginning with a period or '
234 'space may not work correctly')
234 'space may not work correctly')
235
235
236 upgrademessage = _('repository will be better able to store files '
236 upgrademessage = _('repository will be better able to store files '
237 'beginning with a space or period')
237 'beginning with a space or period')
238
238
239 @registerformatvariant
239 @registerformatvariant
240 class generaldelta(requirementformatvariant):
240 class generaldelta(requirementformatvariant):
241 name = 'generaldelta'
241 name = 'generaldelta'
242
242
243 _requirement = 'generaldelta'
243 _requirement = 'generaldelta'
244
244
245 default = True
245 default = True
246
246
247 description = _('deltas within internal storage are unable to '
247 description = _('deltas within internal storage are unable to '
248 'choose optimal revisions; repository is larger and '
248 'choose optimal revisions; repository is larger and '
249 'slower than it could be; interaction with other '
249 'slower than it could be; interaction with other '
250 'repositories may require extra network and CPU '
250 'repositories may require extra network and CPU '
251 'resources, making "hg push" and "hg pull" slower')
251 'resources, making "hg push" and "hg pull" slower')
252
252
253 upgrademessage = _('repository storage will be able to create '
253 upgrademessage = _('repository storage will be able to create '
254 'optimal deltas; new repository data will be '
254 'optimal deltas; new repository data will be '
255 'smaller and read times should decrease; '
255 'smaller and read times should decrease; '
256 'interacting with other repositories using this '
256 'interacting with other repositories using this '
257 'storage model should require less network and '
257 'storage model should require less network and '
258 'CPU resources, making "hg push" and "hg pull" '
258 'CPU resources, making "hg push" and "hg pull" '
259 'faster')
259 'faster')
260
260
261 @registerformatvariant
261 @registerformatvariant
262 class removecldeltachain(formatvariant):
262 class removecldeltachain(formatvariant):
263 name = 'plain-cl-delta'
263 name = 'plain-cl-delta'
264
264
265 default = True
265 default = True
266
266
267 description = _('changelog storage is using deltas instead of '
267 description = _('changelog storage is using deltas instead of '
268 'raw entries; changelog reading and any '
268 'raw entries; changelog reading and any '
269 'operation relying on changelog data are slower '
269 'operation relying on changelog data are slower '
270 'than they could be')
270 'than they could be')
271
271
272 upgrademessage = _('changelog storage will be reformated to '
272 upgrademessage = _('changelog storage will be reformated to '
273 'store raw entries; changelog reading will be '
273 'store raw entries; changelog reading will be '
274 'faster; changelog size may be reduced')
274 'faster; changelog size may be reduced')
275
275
276 @staticmethod
276 @staticmethod
277 def fromrepo(repo):
277 def fromrepo(repo):
278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
279 # changelogs with deltas.
279 # changelogs with deltas.
280 cl = repo.changelog
280 cl = repo.changelog
281 chainbase = cl.chainbase
281 chainbase = cl.chainbase
282 return all(rev == chainbase(rev) for rev in cl)
282 return all(rev == chainbase(rev) for rev in cl)
283
283
284 @staticmethod
284 @staticmethod
285 def fromconfig(repo):
285 def fromconfig(repo):
286 return True
286 return True
287
287
288 @registerformatvariant
288 @registerformatvariant
289 class compressionengine(formatvariant):
289 class compressionengine(formatvariant):
290 name = 'compression'
290 name = 'compression'
291 default = 'zlib'
291 default = 'zlib'
292
292
293 description = _('Compresion algorithm used to compress data. '
293 description = _('Compresion algorithm used to compress data. '
294 'Some engine are faster than other')
294 'Some engine are faster than other')
295
295
296 upgrademessage = _('revlog content will be recompressed with the new '
296 upgrademessage = _('revlog content will be recompressed with the new '
297 'algorithm.')
297 'algorithm.')
298
298
299 @classmethod
299 @classmethod
300 def fromrepo(cls, repo):
300 def fromrepo(cls, repo):
301 for req in repo.requirements:
301 for req in repo.requirements:
302 if req.startswith('exp-compression-'):
302 if req.startswith('exp-compression-'):
303 return req.split('-', 2)[2]
303 return req.split('-', 2)[2]
304 return 'zlib'
304 return 'zlib'
305
305
306 @classmethod
306 @classmethod
307 def fromconfig(cls, repo):
307 def fromconfig(cls, repo):
308 return repo.ui.config('experimental', 'format.compression')
308 return repo.ui.config('experimental', 'format.compression')
309
309
310 def finddeficiencies(repo):
310 def finddeficiencies(repo):
311 """returns a list of deficiencies that the repo suffer from"""
311 """returns a list of deficiencies that the repo suffer from"""
312 deficiencies = []
312 deficiencies = []
313
313
314 # We could detect lack of revlogv1 and store here, but they were added
314 # We could detect lack of revlogv1 and store here, but they were added
315 # in 0.9.2 and we don't support upgrading repos without these
315 # in 0.9.2 and we don't support upgrading repos without these
316 # requirements, so let's not bother.
316 # requirements, so let's not bother.
317
317
318 for fv in allformatvariant:
318 for fv in allformatvariant:
319 if not fv.fromrepo(repo):
319 if not fv.fromrepo(repo):
320 deficiencies.append(fv)
320 deficiencies.append(fv)
321
321
322 return deficiencies
322 return deficiencies
323
323
324 def findoptimizations(repo):
324 def findoptimizations(repo):
325 """Determine optimisation that could be used during upgrade"""
325 """Determine optimisation that could be used during upgrade"""
326 # These are unconditionally added. There is logic later that figures out
326 # These are unconditionally added. There is logic later that figures out
327 # which ones to apply.
327 # which ones to apply.
328 optimizations = []
328 optimizations = []
329
329
330 optimizations.append(improvement(
330 optimizations.append(improvement(
331 name='redeltaparent',
331 name='redeltaparent',
332 type=optimisation,
332 type=optimisation,
333 description=_('deltas within internal storage will be recalculated to '
333 description=_('deltas within internal storage will be recalculated to '
334 'choose an optimal base revision where this was not '
334 'choose an optimal base revision where this was not '
335 'already done; the size of the repository may shrink and '
335 'already done; the size of the repository may shrink and '
336 'various operations may become faster; the first time '
336 'various operations may become faster; the first time '
337 'this optimization is performed could slow down upgrade '
337 'this optimization is performed could slow down upgrade '
338 'execution considerably; subsequent invocations should '
338 'execution considerably; subsequent invocations should '
339 'not run noticeably slower'),
339 'not run noticeably slower'),
340 upgrademessage=_('deltas within internal storage will choose a new '
340 upgrademessage=_('deltas within internal storage will choose a new '
341 'base revision if needed')))
341 'base revision if needed')))
342
342
343 optimizations.append(improvement(
343 optimizations.append(improvement(
344 name='redeltamultibase',
344 name='redeltamultibase',
345 type=optimisation,
345 type=optimisation,
346 description=_('deltas within internal storage will be recalculated '
346 description=_('deltas within internal storage will be recalculated '
347 'against multiple base revision and the smallest '
347 'against multiple base revision and the smallest '
348 'difference will be used; the size of the repository may '
348 'difference will be used; the size of the repository may '
349 'shrink significantly when there are many merges; this '
349 'shrink significantly when there are many merges; this '
350 'optimization will slow down execution in proportion to '
350 'optimization will slow down execution in proportion to '
351 'the number of merges in the repository and the amount '
351 'the number of merges in the repository and the amount '
352 'of files in the repository; this slow down should not '
352 'of files in the repository; this slow down should not '
353 'be significant unless there are tens of thousands of '
353 'be significant unless there are tens of thousands of '
354 'files and thousands of merges'),
354 'files and thousands of merges'),
355 upgrademessage=_('deltas within internal storage will choose an '
355 upgrademessage=_('deltas within internal storage will choose an '
356 'optimal delta by computing deltas against multiple '
356 'optimal delta by computing deltas against multiple '
357 'parents; may slow down execution time '
357 'parents; may slow down execution time '
358 'significantly')))
358 'significantly')))
359
359
360 optimizations.append(improvement(
360 optimizations.append(improvement(
361 name='redeltaall',
361 name='redeltaall',
362 type=optimisation,
362 type=optimisation,
363 description=_('deltas within internal storage will always be '
363 description=_('deltas within internal storage will always be '
364 'recalculated without reusing prior deltas; this will '
364 'recalculated without reusing prior deltas; this will '
365 'likely make execution run several times slower; this '
365 'likely make execution run several times slower; this '
366 'optimization is typically not needed'),
366 'optimization is typically not needed'),
367 upgrademessage=_('deltas within internal storage will be fully '
367 upgrademessage=_('deltas within internal storage will be fully '
368 'recomputed; this will likely drastically slow down '
368 'recomputed; this will likely drastically slow down '
369 'execution time')))
369 'execution time')))
370
370
371 optimizations.append(improvement(
371 optimizations.append(improvement(
372 name='redeltafulladd',
372 name='redeltafulladd',
373 type=optimisation,
373 type=optimisation,
374 description=_('every revision will be re-added as if it was new '
374 description=_('every revision will be re-added as if it was new '
375 'content. It will go through the full storage '
375 'content. It will go through the full storage '
376 'mechanism giving extensions a chance to process it '
376 'mechanism giving extensions a chance to process it '
377 '(eg. lfs). This is similar to "redeltaall" but even '
377 '(eg. lfs). This is similar to "redeltaall" but even '
378 'slower since more logic is involved.'),
378 'slower since more logic is involved.'),
379 upgrademessage=_('each revision will be added as new content to the '
379 upgrademessage=_('each revision will be added as new content to the '
380 'internal storage; this will likely drastically slow '
380 'internal storage; this will likely drastically slow '
381 'down execution time, but some extensions might need '
381 'down execution time, but some extensions might need '
382 'it')))
382 'it')))
383
383
384 return optimizations
384 return optimizations
385
385
386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
387 """Determine upgrade actions that will be performed.
387 """Determine upgrade actions that will be performed.
388
388
389 Given a list of improvements as returned by ``finddeficiencies`` and
389 Given a list of improvements as returned by ``finddeficiencies`` and
390 ``findoptimizations``, determine the list of upgrade actions that
390 ``findoptimizations``, determine the list of upgrade actions that
391 will be performed.
391 will be performed.
392
392
393 The role of this function is to filter improvements if needed, apply
393 The role of this function is to filter improvements if needed, apply
394 recommended optimizations from the improvements list that make sense,
394 recommended optimizations from the improvements list that make sense,
395 etc.
395 etc.
396
396
397 Returns a list of action names.
397 Returns a list of action names.
398 """
398 """
399 newactions = []
399 newactions = []
400
400
401 knownreqs = supporteddestrequirements(repo)
401 knownreqs = supporteddestrequirements(repo)
402
402
403 for d in deficiencies:
403 for d in deficiencies:
404 name = d.name
404 name = d.name
405
405
406 # If the action is a requirement that doesn't show up in the
406 # If the action is a requirement that doesn't show up in the
407 # destination requirements, prune the action.
407 # destination requirements, prune the action.
408 if name in knownreqs and name not in destreqs:
408 if name in knownreqs and name not in destreqs:
409 continue
409 continue
410
410
411 newactions.append(d)
411 newactions.append(d)
412
412
413 # FUTURE consider adding some optimizations here for certain transitions.
413 # FUTURE consider adding some optimizations here for certain transitions.
414 # e.g. adding generaldelta could schedule parent redeltas.
414 # e.g. adding generaldelta could schedule parent redeltas.
415
415
416 return newactions
416 return newactions
417
417
418 def _revlogfrompath(repo, path):
418 def _revlogfrompath(repo, path):
419 """Obtain a revlog from a repo path.
419 """Obtain a revlog from a repo path.
420
420
421 An instance of the appropriate class is returned.
421 An instance of the appropriate class is returned.
422 """
422 """
423 if path == '00changelog.i':
423 if path == '00changelog.i':
424 return changelog.changelog(repo.svfs)
424 return changelog.changelog(repo.svfs)
425 elif path.endswith('00manifest.i'):
425 elif path.endswith('00manifest.i'):
426 mandir = path[:-len('00manifest.i')]
426 mandir = path[:-len('00manifest.i')]
427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
428 else:
428 else:
429 #reverse of "/".join(("data", path + ".i"))
429 #reverse of "/".join(("data", path + ".i"))
430 return filelog.filelog(repo.svfs, path[5:-2])
430 return filelog.filelog(repo.svfs, path[5:-2])
431
431
432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
433 """Copy revlogs between 2 repos."""
433 """Copy revlogs between 2 repos."""
434 revcount = 0
434 revcount = 0
435 srcsize = 0
435 srcsize = 0
436 srcrawsize = 0
436 srcrawsize = 0
437 dstsize = 0
437 dstsize = 0
438 fcount = 0
438 fcount = 0
439 frevcount = 0
439 frevcount = 0
440 fsrcsize = 0
440 fsrcsize = 0
441 frawsize = 0
441 frawsize = 0
442 fdstsize = 0
442 fdstsize = 0
443 mcount = 0
443 mcount = 0
444 mrevcount = 0
444 mrevcount = 0
445 msrcsize = 0
445 msrcsize = 0
446 mrawsize = 0
446 mrawsize = 0
447 mdstsize = 0
447 mdstsize = 0
448 crevcount = 0
448 crevcount = 0
449 csrcsize = 0
449 csrcsize = 0
450 crawsize = 0
450 crawsize = 0
451 cdstsize = 0
451 cdstsize = 0
452
452
453 # Perform a pass to collect metadata. This validates we can open all
453 # Perform a pass to collect metadata. This validates we can open all
454 # source files and allows a unified progress bar to be displayed.
454 # source files and allows a unified progress bar to be displayed.
455 for unencoded, encoded, size in srcrepo.store.walk():
455 for unencoded, encoded, size in srcrepo.store.walk():
456 if unencoded.endswith('.d'):
456 if unencoded.endswith('.d'):
457 continue
457 continue
458
458
459 rl = _revlogfrompath(srcrepo, unencoded)
459 rl = _revlogfrompath(srcrepo, unencoded)
460 revcount += len(rl)
460 revcount += len(rl)
461
461
462 datasize = 0
462 datasize = 0
463 rawsize = 0
463 rawsize = 0
464 idx = rl.index
464 idx = rl.index
465 for rev in rl:
465 for rev in rl:
466 e = idx[rev]
466 e = idx[rev]
467 datasize += e[1]
467 datasize += e[1]
468 rawsize += e[2]
468 rawsize += e[2]
469
469
470 srcsize += datasize
470 srcsize += datasize
471 srcrawsize += rawsize
471 srcrawsize += rawsize
472
472
473 # This is for the separate progress bars.
473 # This is for the separate progress bars.
474 if isinstance(rl, changelog.changelog):
474 if isinstance(rl, changelog.changelog):
475 crevcount += len(rl)
475 crevcount += len(rl)
476 csrcsize += datasize
476 csrcsize += datasize
477 crawsize += rawsize
477 crawsize += rawsize
478 elif isinstance(rl, manifest.manifestrevlog):
478 elif isinstance(rl, manifest.manifestrevlog):
479 mcount += 1
479 mcount += 1
480 mrevcount += len(rl)
480 mrevcount += len(rl)
481 msrcsize += datasize
481 msrcsize += datasize
482 mrawsize += rawsize
482 mrawsize += rawsize
483 elif isinstance(rl, filelog.filelog):
483 elif isinstance(rl, filelog.filelog):
484 fcount += 1
484 fcount += 1
485 frevcount += len(rl)
485 frevcount += len(rl)
486 fsrcsize += datasize
486 fsrcsize += datasize
487 frawsize += rawsize
487 frawsize += rawsize
488 else:
488 else:
489 error.ProgrammingError('unknown revlog type')
489 error.ProgrammingError('unknown revlog type')
490
490
491 if not revcount:
491 if not revcount:
492 return
492 return
493
493
494 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
494 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
495 '%d in changelog)\n') %
495 '%d in changelog)\n') %
496 (revcount, frevcount, mrevcount, crevcount))
496 (revcount, frevcount, mrevcount, crevcount))
497 ui.write(_('migrating %s in store; %s tracked data\n') % (
497 ui.write(_('migrating %s in store; %s tracked data\n') % (
498 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
498 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
499
499
500 # Used to keep track of progress.
500 # Used to keep track of progress.
501 progress = []
501 progress = []
502 def oncopiedrevision(rl, rev, node):
502 def oncopiedrevision(rl, rev, node):
503 progress[1] += 1
503 progress[1] += 1
504 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
504 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
505
505
506 # Do the actual copying.
506 # Do the actual copying.
507 # FUTURE this operation can be farmed off to worker processes.
507 # FUTURE this operation can be farmed off to worker processes.
508 seen = set()
508 seen = set()
509 for unencoded, encoded, size in srcrepo.store.walk():
509 for unencoded, encoded, size in srcrepo.store.walk():
510 if unencoded.endswith('.d'):
510 if unencoded.endswith('.d'):
511 continue
511 continue
512
512
513 oldrl = _revlogfrompath(srcrepo, unencoded)
513 oldrl = _revlogfrompath(srcrepo, unencoded)
514 newrl = _revlogfrompath(dstrepo, unencoded)
514 newrl = _revlogfrompath(dstrepo, unencoded)
515
515
516 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
516 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
517 ui.write(_('finished migrating %d manifest revisions across %d '
517 ui.write(_('finished migrating %d manifest revisions across %d '
518 'manifests; change in size: %s\n') %
518 'manifests; change in size: %s\n') %
519 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
519 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
520
520
521 ui.write(_('migrating changelog containing %d revisions '
521 ui.write(_('migrating changelog containing %d revisions '
522 '(%s in store; %s tracked data)\n') %
522 '(%s in store; %s tracked data)\n') %
523 (crevcount, util.bytecount(csrcsize),
523 (crevcount, util.bytecount(csrcsize),
524 util.bytecount(crawsize)))
524 util.bytecount(crawsize)))
525 seen.add('c')
525 seen.add('c')
526 progress[:] = [_('changelog revisions'), 0, crevcount]
526 progress[:] = [_('changelog revisions'), 0, crevcount]
527 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
527 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
528 ui.write(_('finished migrating %d filelog revisions across %d '
528 ui.write(_('finished migrating %d filelog revisions across %d '
529 'filelogs; change in size: %s\n') %
529 'filelogs; change in size: %s\n') %
530 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
530 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
531
531
532 ui.write(_('migrating %d manifests containing %d revisions '
532 ui.write(_('migrating %d manifests containing %d revisions '
533 '(%s in store; %s tracked data)\n') %
533 '(%s in store; %s tracked data)\n') %
534 (mcount, mrevcount, util.bytecount(msrcsize),
534 (mcount, mrevcount, util.bytecount(msrcsize),
535 util.bytecount(mrawsize)))
535 util.bytecount(mrawsize)))
536 seen.add('m')
536 seen.add('m')
537 if progress:
538 ui.progress(progress[0], None)
537 progress[:] = [_('manifest revisions'), 0, mrevcount]
539 progress[:] = [_('manifest revisions'), 0, mrevcount]
538 elif 'f' not in seen:
540 elif 'f' not in seen:
539 ui.write(_('migrating %d filelogs containing %d revisions '
541 ui.write(_('migrating %d filelogs containing %d revisions '
540 '(%s in store; %s tracked data)\n') %
542 '(%s in store; %s tracked data)\n') %
541 (fcount, frevcount, util.bytecount(fsrcsize),
543 (fcount, frevcount, util.bytecount(fsrcsize),
542 util.bytecount(frawsize)))
544 util.bytecount(frawsize)))
543 seen.add('f')
545 seen.add('f')
546 if progress:
547 ui.progress(progress[0], None)
544 progress[:] = [_('file revisions'), 0, frevcount]
548 progress[:] = [_('file revisions'), 0, frevcount]
545
549
546 ui.progress(progress[0], progress[1], total=progress[2])
550 ui.progress(progress[0], progress[1], total=progress[2])
547
551
548 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
552 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
549 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
553 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
550 deltareuse=deltareuse,
554 deltareuse=deltareuse,
551 aggressivemergedeltas=aggressivemergedeltas)
555 aggressivemergedeltas=aggressivemergedeltas)
552
556
553 datasize = 0
557 datasize = 0
554 idx = newrl.index
558 idx = newrl.index
555 for rev in newrl:
559 for rev in newrl:
556 datasize += idx[rev][1]
560 datasize += idx[rev][1]
557
561
558 dstsize += datasize
562 dstsize += datasize
559
563
560 if isinstance(newrl, changelog.changelog):
564 if isinstance(newrl, changelog.changelog):
561 cdstsize += datasize
565 cdstsize += datasize
562 elif isinstance(newrl, manifest.manifestrevlog):
566 elif isinstance(newrl, manifest.manifestrevlog):
563 mdstsize += datasize
567 mdstsize += datasize
564 else:
568 else:
565 fdstsize += datasize
569 fdstsize += datasize
566
570
567 ui.progress(progress[0], None)
571 ui.progress(progress[0], None)
568
572
569 ui.write(_('finished migrating %d changelog revisions; change in size: '
573 ui.write(_('finished migrating %d changelog revisions; change in size: '
570 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
574 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
571
575
572 ui.write(_('finished migrating %d total revisions; total change in store '
576 ui.write(_('finished migrating %d total revisions; total change in store '
573 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
577 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
574
578
575 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
579 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
576 """Determine whether to copy a store file during upgrade.
580 """Determine whether to copy a store file during upgrade.
577
581
578 This function is called when migrating store files from ``srcrepo`` to
582 This function is called when migrating store files from ``srcrepo`` to
579 ``dstrepo`` as part of upgrading a repository.
583 ``dstrepo`` as part of upgrading a repository.
580
584
581 Args:
585 Args:
582 srcrepo: repo we are copying from
586 srcrepo: repo we are copying from
583 dstrepo: repo we are copying to
587 dstrepo: repo we are copying to
584 requirements: set of requirements for ``dstrepo``
588 requirements: set of requirements for ``dstrepo``
585 path: store file being examined
589 path: store file being examined
586 mode: the ``ST_MODE`` file type of ``path``
590 mode: the ``ST_MODE`` file type of ``path``
587 st: ``stat`` data structure for ``path``
591 st: ``stat`` data structure for ``path``
588
592
589 Function should return ``True`` if the file is to be copied.
593 Function should return ``True`` if the file is to be copied.
590 """
594 """
591 # Skip revlogs.
595 # Skip revlogs.
592 if path.endswith(('.i', '.d')):
596 if path.endswith(('.i', '.d')):
593 return False
597 return False
594 # Skip transaction related files.
598 # Skip transaction related files.
595 if path.startswith('undo'):
599 if path.startswith('undo'):
596 return False
600 return False
597 # Only copy regular files.
601 # Only copy regular files.
598 if mode != stat.S_IFREG:
602 if mode != stat.S_IFREG:
599 return False
603 return False
600 # Skip other skipped files.
604 # Skip other skipped files.
601 if path in ('lock', 'fncache'):
605 if path in ('lock', 'fncache'):
602 return False
606 return False
603
607
604 return True
608 return True
605
609
606 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
610 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
607 """Hook point for extensions to perform additional actions during upgrade.
611 """Hook point for extensions to perform additional actions during upgrade.
608
612
609 This function is called after revlogs and store files have been copied but
613 This function is called after revlogs and store files have been copied but
610 before the new store is swapped into the original location.
614 before the new store is swapped into the original location.
611 """
615 """
612
616
613 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
617 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
614 """Do the low-level work of upgrading a repository.
618 """Do the low-level work of upgrading a repository.
615
619
616 The upgrade is effectively performed as a copy between a source
620 The upgrade is effectively performed as a copy between a source
617 repository and a temporary destination repository.
621 repository and a temporary destination repository.
618
622
619 The source repository is unmodified for as long as possible so the
623 The source repository is unmodified for as long as possible so the
620 upgrade can abort at any time without causing loss of service for
624 upgrade can abort at any time without causing loss of service for
621 readers and without corrupting the source repository.
625 readers and without corrupting the source repository.
622 """
626 """
623 assert srcrepo.currentwlock()
627 assert srcrepo.currentwlock()
624 assert dstrepo.currentwlock()
628 assert dstrepo.currentwlock()
625
629
626 ui.write(_('(it is safe to interrupt this process any time before '
630 ui.write(_('(it is safe to interrupt this process any time before '
627 'data migration completes)\n'))
631 'data migration completes)\n'))
628
632
629 if 'redeltaall' in actions:
633 if 'redeltaall' in actions:
630 deltareuse = revlog.revlog.DELTAREUSENEVER
634 deltareuse = revlog.revlog.DELTAREUSENEVER
631 elif 'redeltaparent' in actions:
635 elif 'redeltaparent' in actions:
632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
636 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
633 elif 'redeltamultibase' in actions:
637 elif 'redeltamultibase' in actions:
634 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
638 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
635 elif 'redeltafulladd' in actions:
639 elif 'redeltafulladd' in actions:
636 deltareuse = revlog.revlog.DELTAREUSEFULLADD
640 deltareuse = revlog.revlog.DELTAREUSEFULLADD
637 else:
641 else:
638 deltareuse = revlog.revlog.DELTAREUSEALWAYS
642 deltareuse = revlog.revlog.DELTAREUSEALWAYS
639
643
640 with dstrepo.transaction('upgrade') as tr:
644 with dstrepo.transaction('upgrade') as tr:
641 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
645 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
642 'redeltamultibase' in actions)
646 'redeltamultibase' in actions)
643
647
644 # Now copy other files in the store directory.
648 # Now copy other files in the store directory.
645 # The sorted() makes execution deterministic.
649 # The sorted() makes execution deterministic.
646 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
650 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
647 if not _filterstorefile(srcrepo, dstrepo, requirements,
651 if not _filterstorefile(srcrepo, dstrepo, requirements,
648 p, kind, st):
652 p, kind, st):
649 continue
653 continue
650
654
651 srcrepo.ui.write(_('copying %s\n') % p)
655 srcrepo.ui.write(_('copying %s\n') % p)
652 src = srcrepo.store.rawvfs.join(p)
656 src = srcrepo.store.rawvfs.join(p)
653 dst = dstrepo.store.rawvfs.join(p)
657 dst = dstrepo.store.rawvfs.join(p)
654 util.copyfile(src, dst, copystat=True)
658 util.copyfile(src, dst, copystat=True)
655
659
656 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
660 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
657
661
658 ui.write(_('data fully migrated to temporary repository\n'))
662 ui.write(_('data fully migrated to temporary repository\n'))
659
663
660 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
664 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
661 backupvfs = vfsmod.vfs(backuppath)
665 backupvfs = vfsmod.vfs(backuppath)
662
666
663 # Make a backup of requires file first, as it is the first to be modified.
667 # Make a backup of requires file first, as it is the first to be modified.
664 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
668 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
665
669
666 # We install an arbitrary requirement that clients must not support
670 # We install an arbitrary requirement that clients must not support
667 # as a mechanism to lock out new clients during the data swap. This is
671 # as a mechanism to lock out new clients during the data swap. This is
668 # better than allowing a client to continue while the repository is in
672 # better than allowing a client to continue while the repository is in
669 # an inconsistent state.
673 # an inconsistent state.
670 ui.write(_('marking source repository as being upgraded; clients will be '
674 ui.write(_('marking source repository as being upgraded; clients will be '
671 'unable to read from repository\n'))
675 'unable to read from repository\n'))
672 scmutil.writerequires(srcrepo.vfs,
676 scmutil.writerequires(srcrepo.vfs,
673 srcrepo.requirements | {'upgradeinprogress'})
677 srcrepo.requirements | {'upgradeinprogress'})
674
678
675 ui.write(_('starting in-place swap of repository data\n'))
679 ui.write(_('starting in-place swap of repository data\n'))
676 ui.write(_('replaced files will be backed up at %s\n') %
680 ui.write(_('replaced files will be backed up at %s\n') %
677 backuppath)
681 backuppath)
678
682
679 # Now swap in the new store directory. Doing it as a rename should make
683 # Now swap in the new store directory. Doing it as a rename should make
680 # the operation nearly instantaneous and atomic (at least in well-behaved
684 # the operation nearly instantaneous and atomic (at least in well-behaved
681 # environments).
685 # environments).
682 ui.write(_('replacing store...\n'))
686 ui.write(_('replacing store...\n'))
683 tstart = util.timer()
687 tstart = util.timer()
684 util.rename(srcrepo.spath, backupvfs.join('store'))
688 util.rename(srcrepo.spath, backupvfs.join('store'))
685 util.rename(dstrepo.spath, srcrepo.spath)
689 util.rename(dstrepo.spath, srcrepo.spath)
686 elapsed = util.timer() - tstart
690 elapsed = util.timer() - tstart
687 ui.write(_('store replacement complete; repository was inconsistent for '
691 ui.write(_('store replacement complete; repository was inconsistent for '
688 '%0.1fs\n') % elapsed)
692 '%0.1fs\n') % elapsed)
689
693
690 # We first write the requirements file. Any new requirements will lock
694 # We first write the requirements file. Any new requirements will lock
691 # out legacy clients.
695 # out legacy clients.
692 ui.write(_('finalizing requirements file and making repository readable '
696 ui.write(_('finalizing requirements file and making repository readable '
693 'again\n'))
697 'again\n'))
694 scmutil.writerequires(srcrepo.vfs, requirements)
698 scmutil.writerequires(srcrepo.vfs, requirements)
695
699
696 # The lock file from the old store won't be removed because nothing has a
700 # The lock file from the old store won't be removed because nothing has a
697 # reference to its new location. So clean it up manually. Alternatively, we
701 # reference to its new location. So clean it up manually. Alternatively, we
698 # could update srcrepo.svfs and other variables to point to the new
702 # could update srcrepo.svfs and other variables to point to the new
699 # location. This is simpler.
703 # location. This is simpler.
700 backupvfs.unlink('store/lock')
704 backupvfs.unlink('store/lock')
701
705
702 return backuppath
706 return backuppath
703
707
704 def upgraderepo(ui, repo, run=False, optimize=None):
708 def upgraderepo(ui, repo, run=False, optimize=None):
705 """Upgrade a repository in place."""
709 """Upgrade a repository in place."""
706 optimize = set(optimize or [])
710 optimize = set(optimize or [])
707 repo = repo.unfiltered()
711 repo = repo.unfiltered()
708
712
709 # Ensure the repository can be upgraded.
713 # Ensure the repository can be upgraded.
710 missingreqs = requiredsourcerequirements(repo) - repo.requirements
714 missingreqs = requiredsourcerequirements(repo) - repo.requirements
711 if missingreqs:
715 if missingreqs:
712 raise error.Abort(_('cannot upgrade repository; requirement '
716 raise error.Abort(_('cannot upgrade repository; requirement '
713 'missing: %s') % _(', ').join(sorted(missingreqs)))
717 'missing: %s') % _(', ').join(sorted(missingreqs)))
714
718
715 blockedreqs = blocksourcerequirements(repo) & repo.requirements
719 blockedreqs = blocksourcerequirements(repo) & repo.requirements
716 if blockedreqs:
720 if blockedreqs:
717 raise error.Abort(_('cannot upgrade repository; unsupported source '
721 raise error.Abort(_('cannot upgrade repository; unsupported source '
718 'requirement: %s') %
722 'requirement: %s') %
719 _(', ').join(sorted(blockedreqs)))
723 _(', ').join(sorted(blockedreqs)))
720
724
721 # FUTURE there is potentially a need to control the wanted requirements via
725 # FUTURE there is potentially a need to control the wanted requirements via
722 # command arguments or via an extension hook point.
726 # command arguments or via an extension hook point.
723 newreqs = localrepo.newreporequirements(repo)
727 newreqs = localrepo.newreporequirements(repo)
724 newreqs.update(preservedrequirements(repo))
728 newreqs.update(preservedrequirements(repo))
725
729
726 noremovereqs = (repo.requirements - newreqs -
730 noremovereqs = (repo.requirements - newreqs -
727 supportremovedrequirements(repo))
731 supportremovedrequirements(repo))
728 if noremovereqs:
732 if noremovereqs:
729 raise error.Abort(_('cannot upgrade repository; requirement would be '
733 raise error.Abort(_('cannot upgrade repository; requirement would be '
730 'removed: %s') % _(', ').join(sorted(noremovereqs)))
734 'removed: %s') % _(', ').join(sorted(noremovereqs)))
731
735
732 noaddreqs = (newreqs - repo.requirements -
736 noaddreqs = (newreqs - repo.requirements -
733 allowednewrequirements(repo))
737 allowednewrequirements(repo))
734 if noaddreqs:
738 if noaddreqs:
735 raise error.Abort(_('cannot upgrade repository; do not support adding '
739 raise error.Abort(_('cannot upgrade repository; do not support adding '
736 'requirement: %s') %
740 'requirement: %s') %
737 _(', ').join(sorted(noaddreqs)))
741 _(', ').join(sorted(noaddreqs)))
738
742
739 unsupportedreqs = newreqs - supporteddestrequirements(repo)
743 unsupportedreqs = newreqs - supporteddestrequirements(repo)
740 if unsupportedreqs:
744 if unsupportedreqs:
741 raise error.Abort(_('cannot upgrade repository; do not support '
745 raise error.Abort(_('cannot upgrade repository; do not support '
742 'destination requirement: %s') %
746 'destination requirement: %s') %
743 _(', ').join(sorted(unsupportedreqs)))
747 _(', ').join(sorted(unsupportedreqs)))
744
748
745 # Find and validate all improvements that can be made.
749 # Find and validate all improvements that can be made.
746 alloptimizations = findoptimizations(repo)
750 alloptimizations = findoptimizations(repo)
747
751
748 # Apply and Validate arguments.
752 # Apply and Validate arguments.
749 optimizations = []
753 optimizations = []
750 for o in alloptimizations:
754 for o in alloptimizations:
751 if o.name in optimize:
755 if o.name in optimize:
752 optimizations.append(o)
756 optimizations.append(o)
753 optimize.discard(o.name)
757 optimize.discard(o.name)
754
758
755 if optimize: # anything left is unknown
759 if optimize: # anything left is unknown
756 raise error.Abort(_('unknown optimization action requested: %s') %
760 raise error.Abort(_('unknown optimization action requested: %s') %
757 ', '.join(sorted(optimize)),
761 ', '.join(sorted(optimize)),
758 hint=_('run without arguments to see valid '
762 hint=_('run without arguments to see valid '
759 'optimizations'))
763 'optimizations'))
760
764
761 deficiencies = finddeficiencies(repo)
765 deficiencies = finddeficiencies(repo)
762 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
766 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
763 actions.extend(o for o in sorted(optimizations)
767 actions.extend(o for o in sorted(optimizations)
764 # determineactions could have added optimisation
768 # determineactions could have added optimisation
765 if o not in actions)
769 if o not in actions)
766
770
767 def printrequirements():
771 def printrequirements():
768 ui.write(_('requirements\n'))
772 ui.write(_('requirements\n'))
769 ui.write(_(' preserved: %s\n') %
773 ui.write(_(' preserved: %s\n') %
770 _(', ').join(sorted(newreqs & repo.requirements)))
774 _(', ').join(sorted(newreqs & repo.requirements)))
771
775
772 if repo.requirements - newreqs:
776 if repo.requirements - newreqs:
773 ui.write(_(' removed: %s\n') %
777 ui.write(_(' removed: %s\n') %
774 _(', ').join(sorted(repo.requirements - newreqs)))
778 _(', ').join(sorted(repo.requirements - newreqs)))
775
779
776 if newreqs - repo.requirements:
780 if newreqs - repo.requirements:
777 ui.write(_(' added: %s\n') %
781 ui.write(_(' added: %s\n') %
778 _(', ').join(sorted(newreqs - repo.requirements)))
782 _(', ').join(sorted(newreqs - repo.requirements)))
779
783
780 ui.write('\n')
784 ui.write('\n')
781
785
782 def printupgradeactions():
786 def printupgradeactions():
783 for a in actions:
787 for a in actions:
784 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
788 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
785
789
786 if not run:
790 if not run:
787 fromconfig = []
791 fromconfig = []
788 onlydefault = []
792 onlydefault = []
789
793
790 for d in deficiencies:
794 for d in deficiencies:
791 if d.fromconfig(repo):
795 if d.fromconfig(repo):
792 fromconfig.append(d)
796 fromconfig.append(d)
793 elif d.default:
797 elif d.default:
794 onlydefault.append(d)
798 onlydefault.append(d)
795
799
796 if fromconfig or onlydefault:
800 if fromconfig or onlydefault:
797
801
798 if fromconfig:
802 if fromconfig:
799 ui.write(_('repository lacks features recommended by '
803 ui.write(_('repository lacks features recommended by '
800 'current config options:\n\n'))
804 'current config options:\n\n'))
801 for i in fromconfig:
805 for i in fromconfig:
802 ui.write('%s\n %s\n\n' % (i.name, i.description))
806 ui.write('%s\n %s\n\n' % (i.name, i.description))
803
807
804 if onlydefault:
808 if onlydefault:
805 ui.write(_('repository lacks features used by the default '
809 ui.write(_('repository lacks features used by the default '
806 'config options:\n\n'))
810 'config options:\n\n'))
807 for i in onlydefault:
811 for i in onlydefault:
808 ui.write('%s\n %s\n\n' % (i.name, i.description))
812 ui.write('%s\n %s\n\n' % (i.name, i.description))
809
813
810 ui.write('\n')
814 ui.write('\n')
811 else:
815 else:
812 ui.write(_('(no feature deficiencies found in existing '
816 ui.write(_('(no feature deficiencies found in existing '
813 'repository)\n'))
817 'repository)\n'))
814
818
815 ui.write(_('performing an upgrade with "--run" will make the following '
819 ui.write(_('performing an upgrade with "--run" will make the following '
816 'changes:\n\n'))
820 'changes:\n\n'))
817
821
818 printrequirements()
822 printrequirements()
819 printupgradeactions()
823 printupgradeactions()
820
824
821 unusedoptimize = [i for i in alloptimizations if i not in actions]
825 unusedoptimize = [i for i in alloptimizations if i not in actions]
822
826
823 if unusedoptimize:
827 if unusedoptimize:
824 ui.write(_('additional optimizations are available by specifying '
828 ui.write(_('additional optimizations are available by specifying '
825 '"--optimize <name>":\n\n'))
829 '"--optimize <name>":\n\n'))
826 for i in unusedoptimize:
830 for i in unusedoptimize:
827 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
831 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
828 return
832 return
829
833
830 # Else we're in the run=true case.
834 # Else we're in the run=true case.
831 ui.write(_('upgrade will perform the following actions:\n\n'))
835 ui.write(_('upgrade will perform the following actions:\n\n'))
832 printrequirements()
836 printrequirements()
833 printupgradeactions()
837 printupgradeactions()
834
838
835 upgradeactions = [a.name for a in actions]
839 upgradeactions = [a.name for a in actions]
836
840
837 ui.write(_('beginning upgrade...\n'))
841 ui.write(_('beginning upgrade...\n'))
838 with repo.wlock(), repo.lock():
842 with repo.wlock(), repo.lock():
839 ui.write(_('repository locked and read-only\n'))
843 ui.write(_('repository locked and read-only\n'))
840 # Our strategy for upgrading the repository is to create a new,
844 # Our strategy for upgrading the repository is to create a new,
841 # temporary repository, write data to it, then do a swap of the
845 # temporary repository, write data to it, then do a swap of the
842 # data. There are less heavyweight ways to do this, but it is easier
846 # data. There are less heavyweight ways to do this, but it is easier
843 # to create a new repo object than to instantiate all the components
847 # to create a new repo object than to instantiate all the components
844 # (like the store) separately.
848 # (like the store) separately.
845 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
849 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
846 backuppath = None
850 backuppath = None
847 try:
851 try:
848 ui.write(_('creating temporary repository to stage migrated '
852 ui.write(_('creating temporary repository to stage migrated '
849 'data: %s\n') % tmppath)
853 'data: %s\n') % tmppath)
850
854
851 # clone ui without using ui.copy because repo.ui is protected
855 # clone ui without using ui.copy because repo.ui is protected
852 repoui = repo.ui.__class__(repo.ui)
856 repoui = repo.ui.__class__(repo.ui)
853 dstrepo = hg.repository(repoui, path=tmppath, create=True)
857 dstrepo = hg.repository(repoui, path=tmppath, create=True)
854
858
855 with dstrepo.wlock(), dstrepo.lock():
859 with dstrepo.wlock(), dstrepo.lock():
856 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
860 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
857 upgradeactions)
861 upgradeactions)
858
862
859 finally:
863 finally:
860 ui.write(_('removing temporary repository %s\n') % tmppath)
864 ui.write(_('removing temporary repository %s\n') % tmppath)
861 repo.vfs.rmtree(tmppath, forcibly=True)
865 repo.vfs.rmtree(tmppath, forcibly=True)
862
866
863 if backuppath:
867 if backuppath:
864 ui.warn(_('copy of old repository backed up at %s\n') %
868 ui.warn(_('copy of old repository backed up at %s\n') %
865 backuppath)
869 backuppath)
866 ui.warn(_('the old repository will not be deleted; remove '
870 ui.warn(_('the old repository will not be deleted; remove '
867 'it to free up disk space once the upgraded '
871 'it to free up disk space once the upgraded '
868 'repository is verified\n'))
872 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now