##// END OF EJS Templates
upgrade: sniff for filelog type...
Gregory Szorc -
r37462:c8666a9e default
parent child Browse files
Show More
@@ -1,866 +1,868
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import tempfile
11 import tempfile
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'treemanifest',
49 'treemanifest',
50 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
51 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
52 'parentdelta',
52 'parentdelta',
53 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
54 'shared',
54 'shared',
55 }
55 }
56
56
57 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
58 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
59
59
60 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
61 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
62 to be allowed.
62 to be allowed.
63 """
63 """
64 return set()
64 return set()
65
65
66 def supporteddestrequirements(repo):
66 def supporteddestrequirements(repo):
67 """Obtain requirements that upgrade supports in the destination.
67 """Obtain requirements that upgrade supports in the destination.
68
68
69 If the result of the upgrade would create requirements not in this set,
69 If the result of the upgrade would create requirements not in this set,
70 the upgrade is disallowed.
70 the upgrade is disallowed.
71
71
72 Extensions should monkeypatch this to add their custom requirements.
72 Extensions should monkeypatch this to add their custom requirements.
73 """
73 """
74 return {
74 return {
75 'dotencode',
75 'dotencode',
76 'fncache',
76 'fncache',
77 'generaldelta',
77 'generaldelta',
78 'revlogv1',
78 'revlogv1',
79 'store',
79 'store',
80 }
80 }
81
81
82 def allowednewrequirements(repo):
82 def allowednewrequirements(repo):
83 """Obtain requirements that can be added to a repository during upgrade.
83 """Obtain requirements that can be added to a repository during upgrade.
84
84
85 This is used to disallow proposed requirements from being added when
85 This is used to disallow proposed requirements from being added when
86 they weren't present before.
86 they weren't present before.
87
87
88 We use a list of allowed requirement additions instead of a list of known
88 We use a list of allowed requirement additions instead of a list of known
89 bad additions because the whitelist approach is safer and will prevent
89 bad additions because the whitelist approach is safer and will prevent
90 future, unknown requirements from accidentally being added.
90 future, unknown requirements from accidentally being added.
91 """
91 """
92 return {
92 return {
93 'dotencode',
93 'dotencode',
94 'fncache',
94 'fncache',
95 'generaldelta',
95 'generaldelta',
96 }
96 }
97
97
98 def preservedrequirements(repo):
98 def preservedrequirements(repo):
99 return set()
99 return set()
100
100
101 deficiency = 'deficiency'
101 deficiency = 'deficiency'
102 optimisation = 'optimization'
102 optimisation = 'optimization'
103
103
104 class improvement(object):
104 class improvement(object):
105 """Represents an improvement that can be made as part of an upgrade.
105 """Represents an improvement that can be made as part of an upgrade.
106
106
107 The following attributes are defined on each instance:
107 The following attributes are defined on each instance:
108
108
109 name
109 name
110 Machine-readable string uniquely identifying this improvement. It
110 Machine-readable string uniquely identifying this improvement. It
111 will be mapped to an action later in the upgrade process.
111 will be mapped to an action later in the upgrade process.
112
112
113 type
113 type
114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
114 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
115 problem. An optimization is an action (sometimes optional) that
115 problem. An optimization is an action (sometimes optional) that
116 can be taken to further improve the state of the repository.
116 can be taken to further improve the state of the repository.
117
117
118 description
118 description
119 Message intended for humans explaining the improvement in more detail,
119 Message intended for humans explaining the improvement in more detail,
120 including the implications of it. For ``deficiency`` types, should be
120 including the implications of it. For ``deficiency`` types, should be
121 worded in the present tense. For ``optimisation`` types, should be
121 worded in the present tense. For ``optimisation`` types, should be
122 worded in the future tense.
122 worded in the future tense.
123
123
124 upgrademessage
124 upgrademessage
125 Message intended for humans explaining what an upgrade addressing this
125 Message intended for humans explaining what an upgrade addressing this
126 issue will do. Should be worded in the future tense.
126 issue will do. Should be worded in the future tense.
127 """
127 """
128 def __init__(self, name, type, description, upgrademessage):
128 def __init__(self, name, type, description, upgrademessage):
129 self.name = name
129 self.name = name
130 self.type = type
130 self.type = type
131 self.description = description
131 self.description = description
132 self.upgrademessage = upgrademessage
132 self.upgrademessage = upgrademessage
133
133
134 def __eq__(self, other):
134 def __eq__(self, other):
135 if not isinstance(other, improvement):
135 if not isinstance(other, improvement):
136 # This is what python tell use to do
136 # This is what python tell use to do
137 return NotImplemented
137 return NotImplemented
138 return self.name == other.name
138 return self.name == other.name
139
139
140 def __ne__(self, other):
140 def __ne__(self, other):
141 return not self == other
141 return not self == other
142
142
143 def __hash__(self):
143 def __hash__(self):
144 return hash(self.name)
144 return hash(self.name)
145
145
146 allformatvariant = []
146 allformatvariant = []
147
147
148 def registerformatvariant(cls):
148 def registerformatvariant(cls):
149 allformatvariant.append(cls)
149 allformatvariant.append(cls)
150 return cls
150 return cls
151
151
152 class formatvariant(improvement):
152 class formatvariant(improvement):
153 """an improvement subclass dedicated to repository format"""
153 """an improvement subclass dedicated to repository format"""
154 type = deficiency
154 type = deficiency
155 ### The following attributes should be defined for each class:
155 ### The following attributes should be defined for each class:
156
156
157 # machine-readable string uniquely identifying this improvement. it will be
157 # machine-readable string uniquely identifying this improvement. it will be
158 # mapped to an action later in the upgrade process.
158 # mapped to an action later in the upgrade process.
159 name = None
159 name = None
160
160
161 # message intended for humans explaining the improvement in more detail,
161 # message intended for humans explaining the improvement in more detail,
162 # including the implications of it ``deficiency`` types, should be worded
162 # including the implications of it ``deficiency`` types, should be worded
163 # in the present tense.
163 # in the present tense.
164 description = None
164 description = None
165
165
166 # message intended for humans explaining what an upgrade addressing this
166 # message intended for humans explaining what an upgrade addressing this
167 # issue will do. should be worded in the future tense.
167 # issue will do. should be worded in the future tense.
168 upgrademessage = None
168 upgrademessage = None
169
169
170 # value of current Mercurial default for new repository
170 # value of current Mercurial default for new repository
171 default = None
171 default = None
172
172
173 def __init__(self):
173 def __init__(self):
174 raise NotImplementedError()
174 raise NotImplementedError()
175
175
176 @staticmethod
176 @staticmethod
177 def fromrepo(repo):
177 def fromrepo(repo):
178 """current value of the variant in the repository"""
178 """current value of the variant in the repository"""
179 raise NotImplementedError()
179 raise NotImplementedError()
180
180
181 @staticmethod
181 @staticmethod
182 def fromconfig(repo):
182 def fromconfig(repo):
183 """current value of the variant in the configuration"""
183 """current value of the variant in the configuration"""
184 raise NotImplementedError()
184 raise NotImplementedError()
185
185
186 class requirementformatvariant(formatvariant):
186 class requirementformatvariant(formatvariant):
187 """formatvariant based on a 'requirement' name.
187 """formatvariant based on a 'requirement' name.
188
188
189 Many format variant are controlled by a 'requirement'. We define a small
189 Many format variant are controlled by a 'requirement'. We define a small
190 subclass to factor the code.
190 subclass to factor the code.
191 """
191 """
192
192
193 # the requirement that control this format variant
193 # the requirement that control this format variant
194 _requirement = None
194 _requirement = None
195
195
196 @staticmethod
196 @staticmethod
197 def _newreporequirements(repo):
197 def _newreporequirements(repo):
198 return localrepo.newreporequirements(repo)
198 return localrepo.newreporequirements(repo)
199
199
200 @classmethod
200 @classmethod
201 def fromrepo(cls, repo):
201 def fromrepo(cls, repo):
202 assert cls._requirement is not None
202 assert cls._requirement is not None
203 return cls._requirement in repo.requirements
203 return cls._requirement in repo.requirements
204
204
205 @classmethod
205 @classmethod
206 def fromconfig(cls, repo):
206 def fromconfig(cls, repo):
207 assert cls._requirement is not None
207 assert cls._requirement is not None
208 return cls._requirement in cls._newreporequirements(repo)
208 return cls._requirement in cls._newreporequirements(repo)
209
209
210 @registerformatvariant
210 @registerformatvariant
211 class fncache(requirementformatvariant):
211 class fncache(requirementformatvariant):
212 name = 'fncache'
212 name = 'fncache'
213
213
214 _requirement = 'fncache'
214 _requirement = 'fncache'
215
215
216 default = True
216 default = True
217
217
218 description = _('long and reserved filenames may not work correctly; '
218 description = _('long and reserved filenames may not work correctly; '
219 'repository performance is sub-optimal')
219 'repository performance is sub-optimal')
220
220
221 upgrademessage = _('repository will be more resilient to storing '
221 upgrademessage = _('repository will be more resilient to storing '
222 'certain paths and performance of certain '
222 'certain paths and performance of certain '
223 'operations should be improved')
223 'operations should be improved')
224
224
225 @registerformatvariant
225 @registerformatvariant
226 class dotencode(requirementformatvariant):
226 class dotencode(requirementformatvariant):
227 name = 'dotencode'
227 name = 'dotencode'
228
228
229 _requirement = 'dotencode'
229 _requirement = 'dotencode'
230
230
231 default = True
231 default = True
232
232
233 description = _('storage of filenames beginning with a period or '
233 description = _('storage of filenames beginning with a period or '
234 'space may not work correctly')
234 'space may not work correctly')
235
235
236 upgrademessage = _('repository will be better able to store files '
236 upgrademessage = _('repository will be better able to store files '
237 'beginning with a space or period')
237 'beginning with a space or period')
238
238
239 @registerformatvariant
239 @registerformatvariant
240 class generaldelta(requirementformatvariant):
240 class generaldelta(requirementformatvariant):
241 name = 'generaldelta'
241 name = 'generaldelta'
242
242
243 _requirement = 'generaldelta'
243 _requirement = 'generaldelta'
244
244
245 default = True
245 default = True
246
246
247 description = _('deltas within internal storage are unable to '
247 description = _('deltas within internal storage are unable to '
248 'choose optimal revisions; repository is larger and '
248 'choose optimal revisions; repository is larger and '
249 'slower than it could be; interaction with other '
249 'slower than it could be; interaction with other '
250 'repositories may require extra network and CPU '
250 'repositories may require extra network and CPU '
251 'resources, making "hg push" and "hg pull" slower')
251 'resources, making "hg push" and "hg pull" slower')
252
252
253 upgrademessage = _('repository storage will be able to create '
253 upgrademessage = _('repository storage will be able to create '
254 'optimal deltas; new repository data will be '
254 'optimal deltas; new repository data will be '
255 'smaller and read times should decrease; '
255 'smaller and read times should decrease; '
256 'interacting with other repositories using this '
256 'interacting with other repositories using this '
257 'storage model should require less network and '
257 'storage model should require less network and '
258 'CPU resources, making "hg push" and "hg pull" '
258 'CPU resources, making "hg push" and "hg pull" '
259 'faster')
259 'faster')
260
260
261 @registerformatvariant
261 @registerformatvariant
262 class removecldeltachain(formatvariant):
262 class removecldeltachain(formatvariant):
263 name = 'plain-cl-delta'
263 name = 'plain-cl-delta'
264
264
265 default = True
265 default = True
266
266
267 description = _('changelog storage is using deltas instead of '
267 description = _('changelog storage is using deltas instead of '
268 'raw entries; changelog reading and any '
268 'raw entries; changelog reading and any '
269 'operation relying on changelog data are slower '
269 'operation relying on changelog data are slower '
270 'than they could be')
270 'than they could be')
271
271
272 upgrademessage = _('changelog storage will be reformated to '
272 upgrademessage = _('changelog storage will be reformated to '
273 'store raw entries; changelog reading will be '
273 'store raw entries; changelog reading will be '
274 'faster; changelog size may be reduced')
274 'faster; changelog size may be reduced')
275
275
276 @staticmethod
276 @staticmethod
277 def fromrepo(repo):
277 def fromrepo(repo):
278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
278 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
279 # changelogs with deltas.
279 # changelogs with deltas.
280 cl = repo.changelog
280 cl = repo.changelog
281 chainbase = cl.chainbase
281 chainbase = cl.chainbase
282 return all(rev == chainbase(rev) for rev in cl)
282 return all(rev == chainbase(rev) for rev in cl)
283
283
284 @staticmethod
284 @staticmethod
285 def fromconfig(repo):
285 def fromconfig(repo):
286 return True
286 return True
287
287
288 @registerformatvariant
288 @registerformatvariant
289 class compressionengine(formatvariant):
289 class compressionengine(formatvariant):
290 name = 'compression'
290 name = 'compression'
291 default = 'zlib'
291 default = 'zlib'
292
292
293 description = _('Compresion algorithm used to compress data. '
293 description = _('Compresion algorithm used to compress data. '
294 'Some engine are faster than other')
294 'Some engine are faster than other')
295
295
296 upgrademessage = _('revlog content will be recompressed with the new '
296 upgrademessage = _('revlog content will be recompressed with the new '
297 'algorithm.')
297 'algorithm.')
298
298
299 @classmethod
299 @classmethod
300 def fromrepo(cls, repo):
300 def fromrepo(cls, repo):
301 for req in repo.requirements:
301 for req in repo.requirements:
302 if req.startswith('exp-compression-'):
302 if req.startswith('exp-compression-'):
303 return req.split('-', 2)[2]
303 return req.split('-', 2)[2]
304 return 'zlib'
304 return 'zlib'
305
305
306 @classmethod
306 @classmethod
307 def fromconfig(cls, repo):
307 def fromconfig(cls, repo):
308 return repo.ui.config('experimental', 'format.compression')
308 return repo.ui.config('experimental', 'format.compression')
309
309
310 def finddeficiencies(repo):
310 def finddeficiencies(repo):
311 """returns a list of deficiencies that the repo suffer from"""
311 """returns a list of deficiencies that the repo suffer from"""
312 deficiencies = []
312 deficiencies = []
313
313
314 # We could detect lack of revlogv1 and store here, but they were added
314 # We could detect lack of revlogv1 and store here, but they were added
315 # in 0.9.2 and we don't support upgrading repos without these
315 # in 0.9.2 and we don't support upgrading repos without these
316 # requirements, so let's not bother.
316 # requirements, so let's not bother.
317
317
318 for fv in allformatvariant:
318 for fv in allformatvariant:
319 if not fv.fromrepo(repo):
319 if not fv.fromrepo(repo):
320 deficiencies.append(fv)
320 deficiencies.append(fv)
321
321
322 return deficiencies
322 return deficiencies
323
323
324 def findoptimizations(repo):
324 def findoptimizations(repo):
325 """Determine optimisation that could be used during upgrade"""
325 """Determine optimisation that could be used during upgrade"""
326 # These are unconditionally added. There is logic later that figures out
326 # These are unconditionally added. There is logic later that figures out
327 # which ones to apply.
327 # which ones to apply.
328 optimizations = []
328 optimizations = []
329
329
330 optimizations.append(improvement(
330 optimizations.append(improvement(
331 name='redeltaparent',
331 name='redeltaparent',
332 type=optimisation,
332 type=optimisation,
333 description=_('deltas within internal storage will be recalculated to '
333 description=_('deltas within internal storage will be recalculated to '
334 'choose an optimal base revision where this was not '
334 'choose an optimal base revision where this was not '
335 'already done; the size of the repository may shrink and '
335 'already done; the size of the repository may shrink and '
336 'various operations may become faster; the first time '
336 'various operations may become faster; the first time '
337 'this optimization is performed could slow down upgrade '
337 'this optimization is performed could slow down upgrade '
338 'execution considerably; subsequent invocations should '
338 'execution considerably; subsequent invocations should '
339 'not run noticeably slower'),
339 'not run noticeably slower'),
340 upgrademessage=_('deltas within internal storage will choose a new '
340 upgrademessage=_('deltas within internal storage will choose a new '
341 'base revision if needed')))
341 'base revision if needed')))
342
342
343 optimizations.append(improvement(
343 optimizations.append(improvement(
344 name='redeltamultibase',
344 name='redeltamultibase',
345 type=optimisation,
345 type=optimisation,
346 description=_('deltas within internal storage will be recalculated '
346 description=_('deltas within internal storage will be recalculated '
347 'against multiple base revision and the smallest '
347 'against multiple base revision and the smallest '
348 'difference will be used; the size of the repository may '
348 'difference will be used; the size of the repository may '
349 'shrink significantly when there are many merges; this '
349 'shrink significantly when there are many merges; this '
350 'optimization will slow down execution in proportion to '
350 'optimization will slow down execution in proportion to '
351 'the number of merges in the repository and the amount '
351 'the number of merges in the repository and the amount '
352 'of files in the repository; this slow down should not '
352 'of files in the repository; this slow down should not '
353 'be significant unless there are tens of thousands of '
353 'be significant unless there are tens of thousands of '
354 'files and thousands of merges'),
354 'files and thousands of merges'),
355 upgrademessage=_('deltas within internal storage will choose an '
355 upgrademessage=_('deltas within internal storage will choose an '
356 'optimal delta by computing deltas against multiple '
356 'optimal delta by computing deltas against multiple '
357 'parents; may slow down execution time '
357 'parents; may slow down execution time '
358 'significantly')))
358 'significantly')))
359
359
360 optimizations.append(improvement(
360 optimizations.append(improvement(
361 name='redeltaall',
361 name='redeltaall',
362 type=optimisation,
362 type=optimisation,
363 description=_('deltas within internal storage will always be '
363 description=_('deltas within internal storage will always be '
364 'recalculated without reusing prior deltas; this will '
364 'recalculated without reusing prior deltas; this will '
365 'likely make execution run several times slower; this '
365 'likely make execution run several times slower; this '
366 'optimization is typically not needed'),
366 'optimization is typically not needed'),
367 upgrademessage=_('deltas within internal storage will be fully '
367 upgrademessage=_('deltas within internal storage will be fully '
368 'recomputed; this will likely drastically slow down '
368 'recomputed; this will likely drastically slow down '
369 'execution time')))
369 'execution time')))
370
370
371 optimizations.append(improvement(
371 optimizations.append(improvement(
372 name='redeltafulladd',
372 name='redeltafulladd',
373 type=optimisation,
373 type=optimisation,
374 description=_('every revision will be re-added as if it was new '
374 description=_('every revision will be re-added as if it was new '
375 'content. It will go through the full storage '
375 'content. It will go through the full storage '
376 'mechanism giving extensions a chance to process it '
376 'mechanism giving extensions a chance to process it '
377 '(eg. lfs). This is similar to "redeltaall" but even '
377 '(eg. lfs). This is similar to "redeltaall" but even '
378 'slower since more logic is involved.'),
378 'slower since more logic is involved.'),
379 upgrademessage=_('each revision will be added as new content to the '
379 upgrademessage=_('each revision will be added as new content to the '
380 'internal storage; this will likely drastically slow '
380 'internal storage; this will likely drastically slow '
381 'down execution time, but some extensions might need '
381 'down execution time, but some extensions might need '
382 'it')))
382 'it')))
383
383
384 return optimizations
384 return optimizations
385
385
386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
386 def determineactions(repo, deficiencies, sourcereqs, destreqs):
387 """Determine upgrade actions that will be performed.
387 """Determine upgrade actions that will be performed.
388
388
389 Given a list of improvements as returned by ``finddeficiencies`` and
389 Given a list of improvements as returned by ``finddeficiencies`` and
390 ``findoptimizations``, determine the list of upgrade actions that
390 ``findoptimizations``, determine the list of upgrade actions that
391 will be performed.
391 will be performed.
392
392
393 The role of this function is to filter improvements if needed, apply
393 The role of this function is to filter improvements if needed, apply
394 recommended optimizations from the improvements list that make sense,
394 recommended optimizations from the improvements list that make sense,
395 etc.
395 etc.
396
396
397 Returns a list of action names.
397 Returns a list of action names.
398 """
398 """
399 newactions = []
399 newactions = []
400
400
401 knownreqs = supporteddestrequirements(repo)
401 knownreqs = supporteddestrequirements(repo)
402
402
403 for d in deficiencies:
403 for d in deficiencies:
404 name = d.name
404 name = d.name
405
405
406 # If the action is a requirement that doesn't show up in the
406 # If the action is a requirement that doesn't show up in the
407 # destination requirements, prune the action.
407 # destination requirements, prune the action.
408 if name in knownreqs and name not in destreqs:
408 if name in knownreqs and name not in destreqs:
409 continue
409 continue
410
410
411 newactions.append(d)
411 newactions.append(d)
412
412
413 # FUTURE consider adding some optimizations here for certain transitions.
413 # FUTURE consider adding some optimizations here for certain transitions.
414 # e.g. adding generaldelta could schedule parent redeltas.
414 # e.g. adding generaldelta could schedule parent redeltas.
415
415
416 return newactions
416 return newactions
417
417
418 def _revlogfrompath(repo, path):
418 def _revlogfrompath(repo, path):
419 """Obtain a revlog from a repo path.
419 """Obtain a revlog from a repo path.
420
420
421 An instance of the appropriate class is returned.
421 An instance of the appropriate class is returned.
422 """
422 """
423 if path == '00changelog.i':
423 if path == '00changelog.i':
424 return changelog.changelog(repo.svfs)
424 return changelog.changelog(repo.svfs)
425 elif path.endswith('00manifest.i'):
425 elif path.endswith('00manifest.i'):
426 mandir = path[:-len('00manifest.i')]
426 mandir = path[:-len('00manifest.i')]
427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
427 return manifest.manifestrevlog(repo.svfs, dir=mandir)
428 else:
428 else:
429 #reverse of "/".join(("data", path + ".i"))
429 #reverse of "/".join(("data", path + ".i"))
430 return filelog.filelog(repo.svfs, path[5:-2])
430 return filelog.filelog(repo.svfs, path[5:-2])
431
431
432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
432 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
433 """Copy revlogs between 2 repos."""
433 """Copy revlogs between 2 repos."""
434 revcount = 0
434 revcount = 0
435 srcsize = 0
435 srcsize = 0
436 srcrawsize = 0
436 srcrawsize = 0
437 dstsize = 0
437 dstsize = 0
438 fcount = 0
438 fcount = 0
439 frevcount = 0
439 frevcount = 0
440 fsrcsize = 0
440 fsrcsize = 0
441 frawsize = 0
441 frawsize = 0
442 fdstsize = 0
442 fdstsize = 0
443 mcount = 0
443 mcount = 0
444 mrevcount = 0
444 mrevcount = 0
445 msrcsize = 0
445 msrcsize = 0
446 mrawsize = 0
446 mrawsize = 0
447 mdstsize = 0
447 mdstsize = 0
448 crevcount = 0
448 crevcount = 0
449 csrcsize = 0
449 csrcsize = 0
450 crawsize = 0
450 crawsize = 0
451 cdstsize = 0
451 cdstsize = 0
452
452
453 # Perform a pass to collect metadata. This validates we can open all
453 # Perform a pass to collect metadata. This validates we can open all
454 # source files and allows a unified progress bar to be displayed.
454 # source files and allows a unified progress bar to be displayed.
455 for unencoded, encoded, size in srcrepo.store.walk():
455 for unencoded, encoded, size in srcrepo.store.walk():
456 if unencoded.endswith('.d'):
456 if unencoded.endswith('.d'):
457 continue
457 continue
458
458
459 rl = _revlogfrompath(srcrepo, unencoded)
459 rl = _revlogfrompath(srcrepo, unencoded)
460 revcount += len(rl)
460 revcount += len(rl)
461
461
462 datasize = 0
462 datasize = 0
463 rawsize = 0
463 rawsize = 0
464 idx = rl.index
464 idx = rl.index
465 for rev in rl:
465 for rev in rl:
466 e = idx[rev]
466 e = idx[rev]
467 datasize += e[1]
467 datasize += e[1]
468 rawsize += e[2]
468 rawsize += e[2]
469
469
470 srcsize += datasize
470 srcsize += datasize
471 srcrawsize += rawsize
471 srcrawsize += rawsize
472
472
473 # This is for the separate progress bars.
473 # This is for the separate progress bars.
474 if isinstance(rl, changelog.changelog):
474 if isinstance(rl, changelog.changelog):
475 crevcount += len(rl)
475 crevcount += len(rl)
476 csrcsize += datasize
476 csrcsize += datasize
477 crawsize += rawsize
477 crawsize += rawsize
478 elif isinstance(rl, manifest.manifestrevlog):
478 elif isinstance(rl, manifest.manifestrevlog):
479 mcount += 1
479 mcount += 1
480 mrevcount += len(rl)
480 mrevcount += len(rl)
481 msrcsize += datasize
481 msrcsize += datasize
482 mrawsize += rawsize
482 mrawsize += rawsize
483 elif isinstance(rl, revlog.revlog):
483 elif isinstance(rl, filelog.filelog):
484 fcount += 1
484 fcount += 1
485 frevcount += len(rl)
485 frevcount += len(rl)
486 fsrcsize += datasize
486 fsrcsize += datasize
487 frawsize += rawsize
487 frawsize += rawsize
488 else:
489 error.ProgrammingError('unknown revlog type')
488
490
489 if not revcount:
491 if not revcount:
490 return
492 return
491
493
492 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
494 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
493 '%d in changelog)\n') %
495 '%d in changelog)\n') %
494 (revcount, frevcount, mrevcount, crevcount))
496 (revcount, frevcount, mrevcount, crevcount))
495 ui.write(_('migrating %s in store; %s tracked data\n') % (
497 ui.write(_('migrating %s in store; %s tracked data\n') % (
496 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
498 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
497
499
498 # Used to keep track of progress.
500 # Used to keep track of progress.
499 progress = []
501 progress = []
500 def oncopiedrevision(rl, rev, node):
502 def oncopiedrevision(rl, rev, node):
501 progress[1] += 1
503 progress[1] += 1
502 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
504 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
503
505
504 # Do the actual copying.
506 # Do the actual copying.
505 # FUTURE this operation can be farmed off to worker processes.
507 # FUTURE this operation can be farmed off to worker processes.
506 seen = set()
508 seen = set()
507 for unencoded, encoded, size in srcrepo.store.walk():
509 for unencoded, encoded, size in srcrepo.store.walk():
508 if unencoded.endswith('.d'):
510 if unencoded.endswith('.d'):
509 continue
511 continue
510
512
511 oldrl = _revlogfrompath(srcrepo, unencoded)
513 oldrl = _revlogfrompath(srcrepo, unencoded)
512 newrl = _revlogfrompath(dstrepo, unencoded)
514 newrl = _revlogfrompath(dstrepo, unencoded)
513
515
514 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
516 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
515 ui.write(_('finished migrating %d manifest revisions across %d '
517 ui.write(_('finished migrating %d manifest revisions across %d '
516 'manifests; change in size: %s\n') %
518 'manifests; change in size: %s\n') %
517 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
519 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
518
520
519 ui.write(_('migrating changelog containing %d revisions '
521 ui.write(_('migrating changelog containing %d revisions '
520 '(%s in store; %s tracked data)\n') %
522 '(%s in store; %s tracked data)\n') %
521 (crevcount, util.bytecount(csrcsize),
523 (crevcount, util.bytecount(csrcsize),
522 util.bytecount(crawsize)))
524 util.bytecount(crawsize)))
523 seen.add('c')
525 seen.add('c')
524 progress[:] = [_('changelog revisions'), 0, crevcount]
526 progress[:] = [_('changelog revisions'), 0, crevcount]
525 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
527 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
526 ui.write(_('finished migrating %d filelog revisions across %d '
528 ui.write(_('finished migrating %d filelog revisions across %d '
527 'filelogs; change in size: %s\n') %
529 'filelogs; change in size: %s\n') %
528 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
530 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
529
531
530 ui.write(_('migrating %d manifests containing %d revisions '
532 ui.write(_('migrating %d manifests containing %d revisions '
531 '(%s in store; %s tracked data)\n') %
533 '(%s in store; %s tracked data)\n') %
532 (mcount, mrevcount, util.bytecount(msrcsize),
534 (mcount, mrevcount, util.bytecount(msrcsize),
533 util.bytecount(mrawsize)))
535 util.bytecount(mrawsize)))
534 seen.add('m')
536 seen.add('m')
535 progress[:] = [_('manifest revisions'), 0, mrevcount]
537 progress[:] = [_('manifest revisions'), 0, mrevcount]
536 elif 'f' not in seen:
538 elif 'f' not in seen:
537 ui.write(_('migrating %d filelogs containing %d revisions '
539 ui.write(_('migrating %d filelogs containing %d revisions '
538 '(%s in store; %s tracked data)\n') %
540 '(%s in store; %s tracked data)\n') %
539 (fcount, frevcount, util.bytecount(fsrcsize),
541 (fcount, frevcount, util.bytecount(fsrcsize),
540 util.bytecount(frawsize)))
542 util.bytecount(frawsize)))
541 seen.add('f')
543 seen.add('f')
542 progress[:] = [_('file revisions'), 0, frevcount]
544 progress[:] = [_('file revisions'), 0, frevcount]
543
545
544 ui.progress(progress[0], progress[1], total=progress[2])
546 ui.progress(progress[0], progress[1], total=progress[2])
545
547
546 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
548 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
547 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
549 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
548 deltareuse=deltareuse,
550 deltareuse=deltareuse,
549 aggressivemergedeltas=aggressivemergedeltas)
551 aggressivemergedeltas=aggressivemergedeltas)
550
552
551 datasize = 0
553 datasize = 0
552 idx = newrl.index
554 idx = newrl.index
553 for rev in newrl:
555 for rev in newrl:
554 datasize += idx[rev][1]
556 datasize += idx[rev][1]
555
557
556 dstsize += datasize
558 dstsize += datasize
557
559
558 if isinstance(newrl, changelog.changelog):
560 if isinstance(newrl, changelog.changelog):
559 cdstsize += datasize
561 cdstsize += datasize
560 elif isinstance(newrl, manifest.manifestrevlog):
562 elif isinstance(newrl, manifest.manifestrevlog):
561 mdstsize += datasize
563 mdstsize += datasize
562 else:
564 else:
563 fdstsize += datasize
565 fdstsize += datasize
564
566
565 ui.progress(progress[0], None)
567 ui.progress(progress[0], None)
566
568
567 ui.write(_('finished migrating %d changelog revisions; change in size: '
569 ui.write(_('finished migrating %d changelog revisions; change in size: '
568 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
570 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
569
571
570 ui.write(_('finished migrating %d total revisions; total change in store '
572 ui.write(_('finished migrating %d total revisions; total change in store '
571 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
573 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
572
574
573 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
575 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
574 """Determine whether to copy a store file during upgrade.
576 """Determine whether to copy a store file during upgrade.
575
577
576 This function is called when migrating store files from ``srcrepo`` to
578 This function is called when migrating store files from ``srcrepo`` to
577 ``dstrepo`` as part of upgrading a repository.
579 ``dstrepo`` as part of upgrading a repository.
578
580
579 Args:
581 Args:
580 srcrepo: repo we are copying from
582 srcrepo: repo we are copying from
581 dstrepo: repo we are copying to
583 dstrepo: repo we are copying to
582 requirements: set of requirements for ``dstrepo``
584 requirements: set of requirements for ``dstrepo``
583 path: store file being examined
585 path: store file being examined
584 mode: the ``ST_MODE`` file type of ``path``
586 mode: the ``ST_MODE`` file type of ``path``
585 st: ``stat`` data structure for ``path``
587 st: ``stat`` data structure for ``path``
586
588
587 Function should return ``True`` if the file is to be copied.
589 Function should return ``True`` if the file is to be copied.
588 """
590 """
589 # Skip revlogs.
591 # Skip revlogs.
590 if path.endswith(('.i', '.d')):
592 if path.endswith(('.i', '.d')):
591 return False
593 return False
592 # Skip transaction related files.
594 # Skip transaction related files.
593 if path.startswith('undo'):
595 if path.startswith('undo'):
594 return False
596 return False
595 # Only copy regular files.
597 # Only copy regular files.
596 if mode != stat.S_IFREG:
598 if mode != stat.S_IFREG:
597 return False
599 return False
598 # Skip other skipped files.
600 # Skip other skipped files.
599 if path in ('lock', 'fncache'):
601 if path in ('lock', 'fncache'):
600 return False
602 return False
601
603
602 return True
604 return True
603
605
604 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
606 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
605 """Hook point for extensions to perform additional actions during upgrade.
607 """Hook point for extensions to perform additional actions during upgrade.
606
608
607 This function is called after revlogs and store files have been copied but
609 This function is called after revlogs and store files have been copied but
608 before the new store is swapped into the original location.
610 before the new store is swapped into the original location.
609 """
611 """
610
612
611 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
613 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
612 """Do the low-level work of upgrading a repository.
614 """Do the low-level work of upgrading a repository.
613
615
614 The upgrade is effectively performed as a copy between a source
616 The upgrade is effectively performed as a copy between a source
615 repository and a temporary destination repository.
617 repository and a temporary destination repository.
616
618
617 The source repository is unmodified for as long as possible so the
619 The source repository is unmodified for as long as possible so the
618 upgrade can abort at any time without causing loss of service for
620 upgrade can abort at any time without causing loss of service for
619 readers and without corrupting the source repository.
621 readers and without corrupting the source repository.
620 """
622 """
621 assert srcrepo.currentwlock()
623 assert srcrepo.currentwlock()
622 assert dstrepo.currentwlock()
624 assert dstrepo.currentwlock()
623
625
624 ui.write(_('(it is safe to interrupt this process any time before '
626 ui.write(_('(it is safe to interrupt this process any time before '
625 'data migration completes)\n'))
627 'data migration completes)\n'))
626
628
627 if 'redeltaall' in actions:
629 if 'redeltaall' in actions:
628 deltareuse = revlog.revlog.DELTAREUSENEVER
630 deltareuse = revlog.revlog.DELTAREUSENEVER
629 elif 'redeltaparent' in actions:
631 elif 'redeltaparent' in actions:
630 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
631 elif 'redeltamultibase' in actions:
633 elif 'redeltamultibase' in actions:
632 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
634 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
633 elif 'redeltafulladd' in actions:
635 elif 'redeltafulladd' in actions:
634 deltareuse = revlog.revlog.DELTAREUSEFULLADD
636 deltareuse = revlog.revlog.DELTAREUSEFULLADD
635 else:
637 else:
636 deltareuse = revlog.revlog.DELTAREUSEALWAYS
638 deltareuse = revlog.revlog.DELTAREUSEALWAYS
637
639
638 with dstrepo.transaction('upgrade') as tr:
640 with dstrepo.transaction('upgrade') as tr:
639 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
641 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
640 'redeltamultibase' in actions)
642 'redeltamultibase' in actions)
641
643
642 # Now copy other files in the store directory.
644 # Now copy other files in the store directory.
643 # The sorted() makes execution deterministic.
645 # The sorted() makes execution deterministic.
644 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
646 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
645 if not _filterstorefile(srcrepo, dstrepo, requirements,
647 if not _filterstorefile(srcrepo, dstrepo, requirements,
646 p, kind, st):
648 p, kind, st):
647 continue
649 continue
648
650
649 srcrepo.ui.write(_('copying %s\n') % p)
651 srcrepo.ui.write(_('copying %s\n') % p)
650 src = srcrepo.store.rawvfs.join(p)
652 src = srcrepo.store.rawvfs.join(p)
651 dst = dstrepo.store.rawvfs.join(p)
653 dst = dstrepo.store.rawvfs.join(p)
652 util.copyfile(src, dst, copystat=True)
654 util.copyfile(src, dst, copystat=True)
653
655
654 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
656 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
655
657
656 ui.write(_('data fully migrated to temporary repository\n'))
658 ui.write(_('data fully migrated to temporary repository\n'))
657
659
658 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
660 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
659 backupvfs = vfsmod.vfs(backuppath)
661 backupvfs = vfsmod.vfs(backuppath)
660
662
661 # Make a backup of requires file first, as it is the first to be modified.
663 # Make a backup of requires file first, as it is the first to be modified.
662 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
664 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
663
665
664 # We install an arbitrary requirement that clients must not support
666 # We install an arbitrary requirement that clients must not support
665 # as a mechanism to lock out new clients during the data swap. This is
667 # as a mechanism to lock out new clients during the data swap. This is
666 # better than allowing a client to continue while the repository is in
668 # better than allowing a client to continue while the repository is in
667 # an inconsistent state.
669 # an inconsistent state.
668 ui.write(_('marking source repository as being upgraded; clients will be '
670 ui.write(_('marking source repository as being upgraded; clients will be '
669 'unable to read from repository\n'))
671 'unable to read from repository\n'))
670 scmutil.writerequires(srcrepo.vfs,
672 scmutil.writerequires(srcrepo.vfs,
671 srcrepo.requirements | {'upgradeinprogress'})
673 srcrepo.requirements | {'upgradeinprogress'})
672
674
673 ui.write(_('starting in-place swap of repository data\n'))
675 ui.write(_('starting in-place swap of repository data\n'))
674 ui.write(_('replaced files will be backed up at %s\n') %
676 ui.write(_('replaced files will be backed up at %s\n') %
675 backuppath)
677 backuppath)
676
678
677 # Now swap in the new store directory. Doing it as a rename should make
679 # Now swap in the new store directory. Doing it as a rename should make
678 # the operation nearly instantaneous and atomic (at least in well-behaved
680 # the operation nearly instantaneous and atomic (at least in well-behaved
679 # environments).
681 # environments).
680 ui.write(_('replacing store...\n'))
682 ui.write(_('replacing store...\n'))
681 tstart = util.timer()
683 tstart = util.timer()
682 util.rename(srcrepo.spath, backupvfs.join('store'))
684 util.rename(srcrepo.spath, backupvfs.join('store'))
683 util.rename(dstrepo.spath, srcrepo.spath)
685 util.rename(dstrepo.spath, srcrepo.spath)
684 elapsed = util.timer() - tstart
686 elapsed = util.timer() - tstart
685 ui.write(_('store replacement complete; repository was inconsistent for '
687 ui.write(_('store replacement complete; repository was inconsistent for '
686 '%0.1fs\n') % elapsed)
688 '%0.1fs\n') % elapsed)
687
689
688 # We first write the requirements file. Any new requirements will lock
690 # We first write the requirements file. Any new requirements will lock
689 # out legacy clients.
691 # out legacy clients.
690 ui.write(_('finalizing requirements file and making repository readable '
692 ui.write(_('finalizing requirements file and making repository readable '
691 'again\n'))
693 'again\n'))
692 scmutil.writerequires(srcrepo.vfs, requirements)
694 scmutil.writerequires(srcrepo.vfs, requirements)
693
695
694 # The lock file from the old store won't be removed because nothing has a
696 # The lock file from the old store won't be removed because nothing has a
695 # reference to its new location. So clean it up manually. Alternatively, we
697 # reference to its new location. So clean it up manually. Alternatively, we
696 # could update srcrepo.svfs and other variables to point to the new
698 # could update srcrepo.svfs and other variables to point to the new
697 # location. This is simpler.
699 # location. This is simpler.
698 backupvfs.unlink('store/lock')
700 backupvfs.unlink('store/lock')
699
701
700 return backuppath
702 return backuppath
701
703
702 def upgraderepo(ui, repo, run=False, optimize=None):
704 def upgraderepo(ui, repo, run=False, optimize=None):
703 """Upgrade a repository in place."""
705 """Upgrade a repository in place."""
704 optimize = set(optimize or [])
706 optimize = set(optimize or [])
705 repo = repo.unfiltered()
707 repo = repo.unfiltered()
706
708
707 # Ensure the repository can be upgraded.
709 # Ensure the repository can be upgraded.
708 missingreqs = requiredsourcerequirements(repo) - repo.requirements
710 missingreqs = requiredsourcerequirements(repo) - repo.requirements
709 if missingreqs:
711 if missingreqs:
710 raise error.Abort(_('cannot upgrade repository; requirement '
712 raise error.Abort(_('cannot upgrade repository; requirement '
711 'missing: %s') % _(', ').join(sorted(missingreqs)))
713 'missing: %s') % _(', ').join(sorted(missingreqs)))
712
714
713 blockedreqs = blocksourcerequirements(repo) & repo.requirements
715 blockedreqs = blocksourcerequirements(repo) & repo.requirements
714 if blockedreqs:
716 if blockedreqs:
715 raise error.Abort(_('cannot upgrade repository; unsupported source '
717 raise error.Abort(_('cannot upgrade repository; unsupported source '
716 'requirement: %s') %
718 'requirement: %s') %
717 _(', ').join(sorted(blockedreqs)))
719 _(', ').join(sorted(blockedreqs)))
718
720
719 # FUTURE there is potentially a need to control the wanted requirements via
721 # FUTURE there is potentially a need to control the wanted requirements via
720 # command arguments or via an extension hook point.
722 # command arguments or via an extension hook point.
721 newreqs = localrepo.newreporequirements(repo)
723 newreqs = localrepo.newreporequirements(repo)
722 newreqs.update(preservedrequirements(repo))
724 newreqs.update(preservedrequirements(repo))
723
725
724 noremovereqs = (repo.requirements - newreqs -
726 noremovereqs = (repo.requirements - newreqs -
725 supportremovedrequirements(repo))
727 supportremovedrequirements(repo))
726 if noremovereqs:
728 if noremovereqs:
727 raise error.Abort(_('cannot upgrade repository; requirement would be '
729 raise error.Abort(_('cannot upgrade repository; requirement would be '
728 'removed: %s') % _(', ').join(sorted(noremovereqs)))
730 'removed: %s') % _(', ').join(sorted(noremovereqs)))
729
731
730 noaddreqs = (newreqs - repo.requirements -
732 noaddreqs = (newreqs - repo.requirements -
731 allowednewrequirements(repo))
733 allowednewrequirements(repo))
732 if noaddreqs:
734 if noaddreqs:
733 raise error.Abort(_('cannot upgrade repository; do not support adding '
735 raise error.Abort(_('cannot upgrade repository; do not support adding '
734 'requirement: %s') %
736 'requirement: %s') %
735 _(', ').join(sorted(noaddreqs)))
737 _(', ').join(sorted(noaddreqs)))
736
738
737 unsupportedreqs = newreqs - supporteddestrequirements(repo)
739 unsupportedreqs = newreqs - supporteddestrequirements(repo)
738 if unsupportedreqs:
740 if unsupportedreqs:
739 raise error.Abort(_('cannot upgrade repository; do not support '
741 raise error.Abort(_('cannot upgrade repository; do not support '
740 'destination requirement: %s') %
742 'destination requirement: %s') %
741 _(', ').join(sorted(unsupportedreqs)))
743 _(', ').join(sorted(unsupportedreqs)))
742
744
743 # Find and validate all improvements that can be made.
745 # Find and validate all improvements that can be made.
744 alloptimizations = findoptimizations(repo)
746 alloptimizations = findoptimizations(repo)
745
747
746 # Apply and Validate arguments.
748 # Apply and Validate arguments.
747 optimizations = []
749 optimizations = []
748 for o in alloptimizations:
750 for o in alloptimizations:
749 if o.name in optimize:
751 if o.name in optimize:
750 optimizations.append(o)
752 optimizations.append(o)
751 optimize.discard(o.name)
753 optimize.discard(o.name)
752
754
753 if optimize: # anything left is unknown
755 if optimize: # anything left is unknown
754 raise error.Abort(_('unknown optimization action requested: %s') %
756 raise error.Abort(_('unknown optimization action requested: %s') %
755 ', '.join(sorted(optimize)),
757 ', '.join(sorted(optimize)),
756 hint=_('run without arguments to see valid '
758 hint=_('run without arguments to see valid '
757 'optimizations'))
759 'optimizations'))
758
760
759 deficiencies = finddeficiencies(repo)
761 deficiencies = finddeficiencies(repo)
760 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
762 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
761 actions.extend(o for o in sorted(optimizations)
763 actions.extend(o for o in sorted(optimizations)
762 # determineactions could have added optimisation
764 # determineactions could have added optimisation
763 if o not in actions)
765 if o not in actions)
764
766
765 def printrequirements():
767 def printrequirements():
766 ui.write(_('requirements\n'))
768 ui.write(_('requirements\n'))
767 ui.write(_(' preserved: %s\n') %
769 ui.write(_(' preserved: %s\n') %
768 _(', ').join(sorted(newreqs & repo.requirements)))
770 _(', ').join(sorted(newreqs & repo.requirements)))
769
771
770 if repo.requirements - newreqs:
772 if repo.requirements - newreqs:
771 ui.write(_(' removed: %s\n') %
773 ui.write(_(' removed: %s\n') %
772 _(', ').join(sorted(repo.requirements - newreqs)))
774 _(', ').join(sorted(repo.requirements - newreqs)))
773
775
774 if newreqs - repo.requirements:
776 if newreqs - repo.requirements:
775 ui.write(_(' added: %s\n') %
777 ui.write(_(' added: %s\n') %
776 _(', ').join(sorted(newreqs - repo.requirements)))
778 _(', ').join(sorted(newreqs - repo.requirements)))
777
779
778 ui.write('\n')
780 ui.write('\n')
779
781
780 def printupgradeactions():
782 def printupgradeactions():
781 for a in actions:
783 for a in actions:
782 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
784 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
783
785
784 if not run:
786 if not run:
785 fromconfig = []
787 fromconfig = []
786 onlydefault = []
788 onlydefault = []
787
789
788 for d in deficiencies:
790 for d in deficiencies:
789 if d.fromconfig(repo):
791 if d.fromconfig(repo):
790 fromconfig.append(d)
792 fromconfig.append(d)
791 elif d.default:
793 elif d.default:
792 onlydefault.append(d)
794 onlydefault.append(d)
793
795
794 if fromconfig or onlydefault:
796 if fromconfig or onlydefault:
795
797
796 if fromconfig:
798 if fromconfig:
797 ui.write(_('repository lacks features recommended by '
799 ui.write(_('repository lacks features recommended by '
798 'current config options:\n\n'))
800 'current config options:\n\n'))
799 for i in fromconfig:
801 for i in fromconfig:
800 ui.write('%s\n %s\n\n' % (i.name, i.description))
802 ui.write('%s\n %s\n\n' % (i.name, i.description))
801
803
802 if onlydefault:
804 if onlydefault:
803 ui.write(_('repository lacks features used by the default '
805 ui.write(_('repository lacks features used by the default '
804 'config options:\n\n'))
806 'config options:\n\n'))
805 for i in onlydefault:
807 for i in onlydefault:
806 ui.write('%s\n %s\n\n' % (i.name, i.description))
808 ui.write('%s\n %s\n\n' % (i.name, i.description))
807
809
808 ui.write('\n')
810 ui.write('\n')
809 else:
811 else:
810 ui.write(_('(no feature deficiencies found in existing '
812 ui.write(_('(no feature deficiencies found in existing '
811 'repository)\n'))
813 'repository)\n'))
812
814
813 ui.write(_('performing an upgrade with "--run" will make the following '
815 ui.write(_('performing an upgrade with "--run" will make the following '
814 'changes:\n\n'))
816 'changes:\n\n'))
815
817
816 printrequirements()
818 printrequirements()
817 printupgradeactions()
819 printupgradeactions()
818
820
819 unusedoptimize = [i for i in alloptimizations if i not in actions]
821 unusedoptimize = [i for i in alloptimizations if i not in actions]
820
822
821 if unusedoptimize:
823 if unusedoptimize:
822 ui.write(_('additional optimizations are available by specifying '
824 ui.write(_('additional optimizations are available by specifying '
823 '"--optimize <name>":\n\n'))
825 '"--optimize <name>":\n\n'))
824 for i in unusedoptimize:
826 for i in unusedoptimize:
825 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
827 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
826 return
828 return
827
829
828 # Else we're in the run=true case.
830 # Else we're in the run=true case.
829 ui.write(_('upgrade will perform the following actions:\n\n'))
831 ui.write(_('upgrade will perform the following actions:\n\n'))
830 printrequirements()
832 printrequirements()
831 printupgradeactions()
833 printupgradeactions()
832
834
833 upgradeactions = [a.name for a in actions]
835 upgradeactions = [a.name for a in actions]
834
836
835 ui.write(_('beginning upgrade...\n'))
837 ui.write(_('beginning upgrade...\n'))
836 with repo.wlock(), repo.lock():
838 with repo.wlock(), repo.lock():
837 ui.write(_('repository locked and read-only\n'))
839 ui.write(_('repository locked and read-only\n'))
838 # Our strategy for upgrading the repository is to create a new,
840 # Our strategy for upgrading the repository is to create a new,
839 # temporary repository, write data to it, then do a swap of the
841 # temporary repository, write data to it, then do a swap of the
840 # data. There are less heavyweight ways to do this, but it is easier
842 # data. There are less heavyweight ways to do this, but it is easier
841 # to create a new repo object than to instantiate all the components
843 # to create a new repo object than to instantiate all the components
842 # (like the store) separately.
844 # (like the store) separately.
843 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
845 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
844 backuppath = None
846 backuppath = None
845 try:
847 try:
846 ui.write(_('creating temporary repository to stage migrated '
848 ui.write(_('creating temporary repository to stage migrated '
847 'data: %s\n') % tmppath)
849 'data: %s\n') % tmppath)
848
850
849 # clone ui without using ui.copy because repo.ui is protected
851 # clone ui without using ui.copy because repo.ui is protected
850 repoui = repo.ui.__class__(repo.ui)
852 repoui = repo.ui.__class__(repo.ui)
851 dstrepo = hg.repository(repoui, path=tmppath, create=True)
853 dstrepo = hg.repository(repoui, path=tmppath, create=True)
852
854
853 with dstrepo.wlock(), dstrepo.lock():
855 with dstrepo.wlock(), dstrepo.lock():
854 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
856 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
855 upgradeactions)
857 upgradeactions)
856
858
857 finally:
859 finally:
858 ui.write(_('removing temporary repository %s\n') % tmppath)
860 ui.write(_('removing temporary repository %s\n') % tmppath)
859 repo.vfs.rmtree(tmppath, forcibly=True)
861 repo.vfs.rmtree(tmppath, forcibly=True)
860
862
861 if backuppath:
863 if backuppath:
862 ui.warn(_('copy of old repository backed up at %s\n') %
864 ui.warn(_('copy of old repository backed up at %s\n') %
863 backuppath)
865 backuppath)
864 ui.warn(_('the old repository will not be deleted; remove '
866 ui.warn(_('the old repository will not be deleted; remove '
865 'it to free up disk space once the upgraded '
867 'it to free up disk space once the upgraded '
866 'repository is verified\n'))
868 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now