##// END OF EJS Templates
upgrade: move descriptions and selection logic in individual classes...
Pierre-Yves David -
r32031:11a2461f default
parent child Browse files
Show More
@@ -1,761 +1,821
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import tempfile
11 import tempfile
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 localrepo,
17 localrepo,
18 manifest,
18 manifest,
19 revlog,
19 revlog,
20 scmutil,
20 scmutil,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 def requiredsourcerequirements(repo):
25 def requiredsourcerequirements(repo):
26 """Obtain requirements required to be present to upgrade a repo.
26 """Obtain requirements required to be present to upgrade a repo.
27
27
28 An upgrade will not be allowed if the repository doesn't have the
28 An upgrade will not be allowed if the repository doesn't have the
29 requirements returned by this function.
29 requirements returned by this function.
30 """
30 """
31 return set([
31 return set([
32 # Introduced in Mercurial 0.9.2.
32 # Introduced in Mercurial 0.9.2.
33 'revlogv1',
33 'revlogv1',
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'store',
35 'store',
36 ])
36 ])
37
37
38 def blocksourcerequirements(repo):
38 def blocksourcerequirements(repo):
39 """Obtain requirements that will prevent an upgrade from occurring.
39 """Obtain requirements that will prevent an upgrade from occurring.
40
40
41 An upgrade cannot be performed if the source repository contains a
41 An upgrade cannot be performed if the source repository contains a
42 requirements in the returned set.
42 requirements in the returned set.
43 """
43 """
44 return set([
44 return set([
45 # The upgrade code does not yet support these experimental features.
45 # The upgrade code does not yet support these experimental features.
46 # This is an artificial limitation.
46 # This is an artificial limitation.
47 'manifestv2',
47 'manifestv2',
48 'treemanifest',
48 'treemanifest',
49 # This was a precursor to generaldelta and was never enabled by default.
49 # This was a precursor to generaldelta and was never enabled by default.
50 # It should (hopefully) not exist in the wild.
50 # It should (hopefully) not exist in the wild.
51 'parentdelta',
51 'parentdelta',
52 # Upgrade should operate on the actual store, not the shared link.
52 # Upgrade should operate on the actual store, not the shared link.
53 'shared',
53 'shared',
54 ])
54 ])
55
55
56 def supportremovedrequirements(repo):
56 def supportremovedrequirements(repo):
57 """Obtain requirements that can be removed during an upgrade.
57 """Obtain requirements that can be removed during an upgrade.
58
58
59 If an upgrade were to create a repository that dropped a requirement,
59 If an upgrade were to create a repository that dropped a requirement,
60 the dropped requirement must appear in the returned set for the upgrade
60 the dropped requirement must appear in the returned set for the upgrade
61 to be allowed.
61 to be allowed.
62 """
62 """
63 return set()
63 return set()
64
64
65 def supporteddestrequirements(repo):
65 def supporteddestrequirements(repo):
66 """Obtain requirements that upgrade supports in the destination.
66 """Obtain requirements that upgrade supports in the destination.
67
67
68 If the result of the upgrade would create requirements not in this set,
68 If the result of the upgrade would create requirements not in this set,
69 the upgrade is disallowed.
69 the upgrade is disallowed.
70
70
71 Extensions should monkeypatch this to add their custom requirements.
71 Extensions should monkeypatch this to add their custom requirements.
72 """
72 """
73 return set([
73 return set([
74 'dotencode',
74 'dotencode',
75 'fncache',
75 'fncache',
76 'generaldelta',
76 'generaldelta',
77 'revlogv1',
77 'revlogv1',
78 'store',
78 'store',
79 ])
79 ])
80
80
81 def allowednewrequirements(repo):
81 def allowednewrequirements(repo):
82 """Obtain requirements that can be added to a repository during upgrade.
82 """Obtain requirements that can be added to a repository during upgrade.
83
83
84 This is used to disallow proposed requirements from being added when
84 This is used to disallow proposed requirements from being added when
85 they weren't present before.
85 they weren't present before.
86
86
87 We use a list of allowed requirement additions instead of a list of known
87 We use a list of allowed requirement additions instead of a list of known
88 bad additions because the whitelist approach is safer and will prevent
88 bad additions because the whitelist approach is safer and will prevent
89 future, unknown requirements from accidentally being added.
89 future, unknown requirements from accidentally being added.
90 """
90 """
91 return set([
91 return set([
92 'dotencode',
92 'dotencode',
93 'fncache',
93 'fncache',
94 'generaldelta',
94 'generaldelta',
95 ])
95 ])
96
96
97 deficiency = 'deficiency'
97 deficiency = 'deficiency'
98 optimisation = 'optimization'
98 optimisation = 'optimization'
99
99
100 class improvement(object):
100 class improvement(object):
101 """Represents an improvement that can be made as part of an upgrade.
101 """Represents an improvement that can be made as part of an upgrade.
102
102
103 The following attributes are defined on each instance:
103 The following attributes are defined on each instance:
104
104
105 name
105 name
106 Machine-readable string uniquely identifying this improvement. It
106 Machine-readable string uniquely identifying this improvement. It
107 will be mapped to an action later in the upgrade process.
107 will be mapped to an action later in the upgrade process.
108
108
109 type
109 type
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
111 problem. An optimization is an action (sometimes optional) that
111 problem. An optimization is an action (sometimes optional) that
112 can be taken to further improve the state of the repository.
112 can be taken to further improve the state of the repository.
113
113
114 description
114 description
115 Message intended for humans explaining the improvement in more detail,
115 Message intended for humans explaining the improvement in more detail,
116 including the implications of it. For ``deficiency`` types, should be
116 including the implications of it. For ``deficiency`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
118 worded in the future tense.
118 worded in the future tense.
119
119
120 upgrademessage
120 upgrademessage
121 Message intended for humans explaining what an upgrade addressing this
121 Message intended for humans explaining what an upgrade addressing this
122 issue will do. Should be worded in the future tense.
122 issue will do. Should be worded in the future tense.
123 """
123 """
124 def __init__(self, name, type, description, upgrademessage):
124 def __init__(self, name, type, description, upgrademessage):
125 self.name = name
125 self.name = name
126 self.type = type
126 self.type = type
127 self.description = description
127 self.description = description
128 self.upgrademessage = upgrademessage
128 self.upgrademessage = upgrademessage
129
129
130 def __eq__(self, other):
130 def __eq__(self, other):
131 if not isinstance(other, improvement):
131 if not isinstance(other, improvement):
132 # This is what python tell use to do
132 # This is what python tell use to do
133 return NotImplemented
133 return NotImplemented
134 return self.name == other.name
134 return self.name == other.name
135
135
136 def __ne__(self, other):
136 def __ne__(self, other):
137 return not self == other
137 return not self == other
138
138
139 def __hash__(self):
139 def __hash__(self):
140 return hash(self.name)
140 return hash(self.name)
141
141
142 class formatvariant(improvement):
142 class formatvariant(improvement):
143 """an improvement subclass dedicated to repository format
143 """an improvement subclass dedicated to repository format"""
144 type = deficiency
145 ### The following attributes should be defined for each class:
146
147 # machine-readable string uniquely identifying this improvement. it will be
148 # mapped to an action later in the upgrade process.
149 name = None
144
150
145 extra attributes:
151 # message intended for humans explaining the improvement in more detail,
152 # including the implications of it ``deficiency`` types, should be worded
153 # in the present tense.
154 description = None
155
156 # message intended for humans explaining what an upgrade addressing this
157 # issue will do. should be worded in the future tense.
158 upgrademessage = None
146
159
147 fromdefault (``deficiency`` types only)
160 # value of current Mercurial default for new repository
148 Boolean indicating whether the current (deficient) state deviates
161 default = None
149 from Mercurial's default configuration.
162
163 def __init__(self):
164 raise NotImplementedError()
165
166 @staticmethod
167 def fromrepo(repo):
168 """current value of the variant in the repository"""
169 raise NotImplementedError()
150
170
151 fromconfig (``deficiency`` types only)
171 @staticmethod
152 Boolean indicating whether the current (deficient) state deviates
172 def fromconfig(repo):
153 from the current Mercurial configuration.
173 """current value of the variant in the configuration"""
174 raise NotImplementedError()
175
176 class requirementformatvariant(formatvariant):
177 """formatvariant based on a 'requirement' name.
178
179 Many format variant are controlled by a 'requirement'. We define a small
180 subclass to factor the code.
154 """
181 """
155
182
156 def __init__(self, name, description, upgrademessage, fromdefault,
183 # the requirement that control this format variant
157 fromconfig):
184 _requirement = None
158 super(formatvariant, self).__init__(name, deficiency, description,
185
159 upgrademessage)
186 @staticmethod
160 self.fromdefault = fromdefault
187 def _newreporequirements(repo):
161 self.fromconfig = fromconfig
188 return localrepo.newreporequirements(repo)
189
190 @classmethod
191 def fromrepo(cls, repo):
192 assert cls._requirement is not None
193 return cls._requirement in repo.requirements
194
195 @classmethod
196 def fromconfig(cls, repo):
197 assert cls._requirement is not None
198 return cls._requirement in cls._newreporequirements(repo)
199
200 class fncache(requirementformatvariant):
201 name = 'fncache'
202
203 _requirement = 'fncache'
204
205 default = True
206
207 description = _('long and reserved filenames may not work correctly; '
208 'repository performance is sub-optimal')
209
210 upgrademessage = _('repository will be more resilient to storing '
211 'certain paths and performance of certain '
212 'operations should be improved')
213
214 class dotencode(requirementformatvariant):
215 name = 'dotencode'
216
217 _requirement = 'dotencode'
218
219 default = True
220
221 description = _('storage of filenames beginning with a period or '
222 'space may not work correctly')
223
224 upgrademessage = _('repository will be better able to store files '
225 'beginning with a space or period')
226
227 class generaldelta(requirementformatvariant):
228 name = 'generaldelta'
229
230 _requirement = 'generaldelta'
231
232 default = True
233
234 description = _('deltas within internal storage are unable to '
235 'choose optimal revisions; repository is larger and '
236 'slower than it could be; interaction with other '
237 'repositories may require extra network and CPU '
238 'resources, making "hg push" and "hg pull" slower')
239
240 upgrademessage = _('repository storage will be able to create '
241 'optimal deltas; new repository data will be '
242 'smaller and read times should decrease; '
243 'interacting with other repositories using this '
244 'storage model should require less network and '
245 'CPU resources, making "hg push" and "hg pull" '
246 'faster')
247
248 class removecldeltachain(formatvariant):
249 name = 'removecldeltachain'
250
251 default = True
252
253 description = _('changelog storage is using deltas instead of '
254 'raw entries; changelog reading and any '
255 'operation relying on changelog data are slower '
256 'than they could be')
257
258 upgrademessage = _('changelog storage will be reformated to '
259 'store raw entries; changelog reading will be '
260 'faster; changelog size may be reduced')
261
262 @staticmethod
263 def fromrepo(repo):
264 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
265 # changelogs with deltas.
266 cl = repo.changelog
267 chainbase = cl.chainbase
268 return all(rev == chainbase(rev) for rev in cl)
269
270 @staticmethod
271 def fromconfig(repo):
272 return True
162
273
163 def finddeficiencies(repo):
274 def finddeficiencies(repo):
164 """returns a list of deficiencies that the repo suffer from"""
275 """returns a list of deficiencies that the repo suffer from"""
165 newreporeqs = localrepo.newreporequirements(repo)
166
167 deficiencies = []
276 deficiencies = []
168
277
169 # We could detect lack of revlogv1 and store here, but they were added
278 # We could detect lack of revlogv1 and store here, but they were added
170 # in 0.9.2 and we don't support upgrading repos without these
279 # in 0.9.2 and we don't support upgrading repos without these
171 # requirements, so let's not bother.
280 # requirements, so let's not bother.
172
281
173 if 'fncache' not in repo.requirements:
282 if not fncache.fromrepo(repo):
174 deficiencies.append(formatvariant(
283 deficiencies.append(fncache)
175 name='fncache',
284 if not dotencode.fromrepo(repo):
176 description=_('long and reserved filenames may not work correctly; '
285 deficiencies.append(dotencode)
177 'repository performance is sub-optimal'),
286 if not generaldelta.fromrepo(repo):
178 upgrademessage=_('repository will be more resilient to storing '
287 deficiencies.append(generaldelta)
179 'certain paths and performance of certain '
288 if not removecldeltachain.fromrepo(repo):
180 'operations should be improved'),
289 deficiencies.append(removecldeltachain)
181 fromdefault=True,
182 fromconfig='fncache' in newreporeqs))
183
184 if 'dotencode' not in repo.requirements:
185 deficiencies.append(formatvariant(
186 name='dotencode',
187 description=_('storage of filenames beginning with a period or '
188 'space may not work correctly'),
189 upgrademessage=_('repository will be better able to store files '
190 'beginning with a space or period'),
191 fromdefault=True,
192 fromconfig='dotencode' in newreporeqs))
193
194 if 'generaldelta' not in repo.requirements:
195 deficiencies.append(formatvariant(
196 name='generaldelta',
197 description=_('deltas within internal storage are unable to '
198 'choose optimal revisions; repository is larger and '
199 'slower than it could be; interaction with other '
200 'repositories may require extra network and CPU '
201 'resources, making "hg push" and "hg pull" slower'),
202 upgrademessage=_('repository storage will be able to create '
203 'optimal deltas; new repository data will be '
204 'smaller and read times should decrease; '
205 'interacting with other repositories using this '
206 'storage model should require less network and '
207 'CPU resources, making "hg push" and "hg pull" '
208 'faster'),
209 fromdefault=True,
210 fromconfig='generaldelta' in newreporeqs))
211
212 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
213 # changelogs with deltas.
214 cl = repo.changelog
215 for rev in cl:
216 chainbase = cl.chainbase(rev)
217 if chainbase != rev:
218 deficiencies.append(formatvariant(
219 name='removecldeltachain',
220 description=_('changelog storage is using deltas instead of '
221 'raw entries; changelog reading and any '
222 'operation relying on changelog data are slower '
223 'than they could be'),
224 upgrademessage=_('changelog storage will be reformated to '
225 'store raw entries; changelog reading will be '
226 'faster; changelog size may be reduced'),
227 fromdefault=True,
228 fromconfig=True))
229 break
230
290
231 return deficiencies
291 return deficiencies
232
292
233 def findoptimizations(repo):
293 def findoptimizations(repo):
234 """Determine optimisation that could be used during upgrade"""
294 """Determine optimisation that could be used during upgrade"""
235 # These are unconditionally added. There is logic later that figures out
295 # These are unconditionally added. There is logic later that figures out
236 # which ones to apply.
296 # which ones to apply.
237 optimizations = []
297 optimizations = []
238
298
239 optimizations.append(improvement(
299 optimizations.append(improvement(
240 name='redeltaparent',
300 name='redeltaparent',
241 type=optimisation,
301 type=optimisation,
242 description=_('deltas within internal storage will be recalculated to '
302 description=_('deltas within internal storage will be recalculated to '
243 'choose an optimal base revision where this was not '
303 'choose an optimal base revision where this was not '
244 'already done; the size of the repository may shrink and '
304 'already done; the size of the repository may shrink and '
245 'various operations may become faster; the first time '
305 'various operations may become faster; the first time '
246 'this optimization is performed could slow down upgrade '
306 'this optimization is performed could slow down upgrade '
247 'execution considerably; subsequent invocations should '
307 'execution considerably; subsequent invocations should '
248 'not run noticeably slower'),
308 'not run noticeably slower'),
249 upgrademessage=_('deltas within internal storage will choose a new '
309 upgrademessage=_('deltas within internal storage will choose a new '
250 'base revision if needed')))
310 'base revision if needed')))
251
311
252 optimizations.append(improvement(
312 optimizations.append(improvement(
253 name='redeltamultibase',
313 name='redeltamultibase',
254 type=optimisation,
314 type=optimisation,
255 description=_('deltas within internal storage will be recalculated '
315 description=_('deltas within internal storage will be recalculated '
256 'against multiple base revision and the smallest '
316 'against multiple base revision and the smallest '
257 'difference will be used; the size of the repository may '
317 'difference will be used; the size of the repository may '
258 'shrink significantly when there are many merges; this '
318 'shrink significantly when there are many merges; this '
259 'optimization will slow down execution in proportion to '
319 'optimization will slow down execution in proportion to '
260 'the number of merges in the repository and the amount '
320 'the number of merges in the repository and the amount '
261 'of files in the repository; this slow down should not '
321 'of files in the repository; this slow down should not '
262 'be significant unless there are tens of thousands of '
322 'be significant unless there are tens of thousands of '
263 'files and thousands of merges'),
323 'files and thousands of merges'),
264 upgrademessage=_('deltas within internal storage will choose an '
324 upgrademessage=_('deltas within internal storage will choose an '
265 'optimal delta by computing deltas against multiple '
325 'optimal delta by computing deltas against multiple '
266 'parents; may slow down execution time '
326 'parents; may slow down execution time '
267 'significantly')))
327 'significantly')))
268
328
269 optimizations.append(improvement(
329 optimizations.append(improvement(
270 name='redeltaall',
330 name='redeltaall',
271 type=optimisation,
331 type=optimisation,
272 description=_('deltas within internal storage will always be '
332 description=_('deltas within internal storage will always be '
273 'recalculated without reusing prior deltas; this will '
333 'recalculated without reusing prior deltas; this will '
274 'likely make execution run several times slower; this '
334 'likely make execution run several times slower; this '
275 'optimization is typically not needed'),
335 'optimization is typically not needed'),
276 upgrademessage=_('deltas within internal storage will be fully '
336 upgrademessage=_('deltas within internal storage will be fully '
277 'recomputed; this will likely drastically slow down '
337 'recomputed; this will likely drastically slow down '
278 'execution time')))
338 'execution time')))
279
339
280 return optimizations
340 return optimizations
281
341
282 def determineactions(repo, deficiencies, sourcereqs, destreqs):
342 def determineactions(repo, deficiencies, sourcereqs, destreqs):
283 """Determine upgrade actions that will be performed.
343 """Determine upgrade actions that will be performed.
284
344
285 Given a list of improvements as returned by ``finddeficiencies`` and
345 Given a list of improvements as returned by ``finddeficiencies`` and
286 ``findoptimizations``, determine the list of upgrade actions that
346 ``findoptimizations``, determine the list of upgrade actions that
287 will be performed.
347 will be performed.
288
348
289 The role of this function is to filter improvements if needed, apply
349 The role of this function is to filter improvements if needed, apply
290 recommended optimizations from the improvements list that make sense,
350 recommended optimizations from the improvements list that make sense,
291 etc.
351 etc.
292
352
293 Returns a list of action names.
353 Returns a list of action names.
294 """
354 """
295 newactions = []
355 newactions = []
296
356
297 knownreqs = supporteddestrequirements(repo)
357 knownreqs = supporteddestrequirements(repo)
298
358
299 for d in deficiencies:
359 for d in deficiencies:
300 name = d.name
360 name = d.name
301
361
302 # If the action is a requirement that doesn't show up in the
362 # If the action is a requirement that doesn't show up in the
303 # destination requirements, prune the action.
363 # destination requirements, prune the action.
304 if name in knownreqs and name not in destreqs:
364 if name in knownreqs and name not in destreqs:
305 continue
365 continue
306
366
307 newactions.append(d)
367 newactions.append(d)
308
368
309 # FUTURE consider adding some optimizations here for certain transitions.
369 # FUTURE consider adding some optimizations here for certain transitions.
310 # e.g. adding generaldelta could schedule parent redeltas.
370 # e.g. adding generaldelta could schedule parent redeltas.
311
371
312 return newactions
372 return newactions
313
373
314 def _revlogfrompath(repo, path):
374 def _revlogfrompath(repo, path):
315 """Obtain a revlog from a repo path.
375 """Obtain a revlog from a repo path.
316
376
317 An instance of the appropriate class is returned.
377 An instance of the appropriate class is returned.
318 """
378 """
319 if path == '00changelog.i':
379 if path == '00changelog.i':
320 return changelog.changelog(repo.svfs)
380 return changelog.changelog(repo.svfs)
321 elif path.endswith('00manifest.i'):
381 elif path.endswith('00manifest.i'):
322 mandir = path[:-len('00manifest.i')]
382 mandir = path[:-len('00manifest.i')]
323 return manifest.manifestrevlog(repo.svfs, dir=mandir)
383 return manifest.manifestrevlog(repo.svfs, dir=mandir)
324 else:
384 else:
325 # Filelogs don't do anything special with settings. So we can use a
385 # Filelogs don't do anything special with settings. So we can use a
326 # vanilla revlog.
386 # vanilla revlog.
327 return revlog.revlog(repo.svfs, path)
387 return revlog.revlog(repo.svfs, path)
328
388
329 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
389 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
330 """Copy revlogs between 2 repos."""
390 """Copy revlogs between 2 repos."""
331 revcount = 0
391 revcount = 0
332 srcsize = 0
392 srcsize = 0
333 srcrawsize = 0
393 srcrawsize = 0
334 dstsize = 0
394 dstsize = 0
335 fcount = 0
395 fcount = 0
336 frevcount = 0
396 frevcount = 0
337 fsrcsize = 0
397 fsrcsize = 0
338 frawsize = 0
398 frawsize = 0
339 fdstsize = 0
399 fdstsize = 0
340 mcount = 0
400 mcount = 0
341 mrevcount = 0
401 mrevcount = 0
342 msrcsize = 0
402 msrcsize = 0
343 mrawsize = 0
403 mrawsize = 0
344 mdstsize = 0
404 mdstsize = 0
345 crevcount = 0
405 crevcount = 0
346 csrcsize = 0
406 csrcsize = 0
347 crawsize = 0
407 crawsize = 0
348 cdstsize = 0
408 cdstsize = 0
349
409
350 # Perform a pass to collect metadata. This validates we can open all
410 # Perform a pass to collect metadata. This validates we can open all
351 # source files and allows a unified progress bar to be displayed.
411 # source files and allows a unified progress bar to be displayed.
352 for unencoded, encoded, size in srcrepo.store.walk():
412 for unencoded, encoded, size in srcrepo.store.walk():
353 if unencoded.endswith('.d'):
413 if unencoded.endswith('.d'):
354 continue
414 continue
355
415
356 rl = _revlogfrompath(srcrepo, unencoded)
416 rl = _revlogfrompath(srcrepo, unencoded)
357 revcount += len(rl)
417 revcount += len(rl)
358
418
359 datasize = 0
419 datasize = 0
360 rawsize = 0
420 rawsize = 0
361 idx = rl.index
421 idx = rl.index
362 for rev in rl:
422 for rev in rl:
363 e = idx[rev]
423 e = idx[rev]
364 datasize += e[1]
424 datasize += e[1]
365 rawsize += e[2]
425 rawsize += e[2]
366
426
367 srcsize += datasize
427 srcsize += datasize
368 srcrawsize += rawsize
428 srcrawsize += rawsize
369
429
370 # This is for the separate progress bars.
430 # This is for the separate progress bars.
371 if isinstance(rl, changelog.changelog):
431 if isinstance(rl, changelog.changelog):
372 crevcount += len(rl)
432 crevcount += len(rl)
373 csrcsize += datasize
433 csrcsize += datasize
374 crawsize += rawsize
434 crawsize += rawsize
375 elif isinstance(rl, manifest.manifestrevlog):
435 elif isinstance(rl, manifest.manifestrevlog):
376 mcount += 1
436 mcount += 1
377 mrevcount += len(rl)
437 mrevcount += len(rl)
378 msrcsize += datasize
438 msrcsize += datasize
379 mrawsize += rawsize
439 mrawsize += rawsize
380 elif isinstance(rl, revlog.revlog):
440 elif isinstance(rl, revlog.revlog):
381 fcount += 1
441 fcount += 1
382 frevcount += len(rl)
442 frevcount += len(rl)
383 fsrcsize += datasize
443 fsrcsize += datasize
384 frawsize += rawsize
444 frawsize += rawsize
385
445
386 if not revcount:
446 if not revcount:
387 return
447 return
388
448
389 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
449 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
390 '%d in changelog)\n') %
450 '%d in changelog)\n') %
391 (revcount, frevcount, mrevcount, crevcount))
451 (revcount, frevcount, mrevcount, crevcount))
392 ui.write(_('migrating %s in store; %s tracked data\n') % (
452 ui.write(_('migrating %s in store; %s tracked data\n') % (
393 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
453 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
394
454
395 # Used to keep track of progress.
455 # Used to keep track of progress.
396 progress = []
456 progress = []
397 def oncopiedrevision(rl, rev, node):
457 def oncopiedrevision(rl, rev, node):
398 progress[1] += 1
458 progress[1] += 1
399 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
459 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
400
460
401 # Do the actual copying.
461 # Do the actual copying.
402 # FUTURE this operation can be farmed off to worker processes.
462 # FUTURE this operation can be farmed off to worker processes.
403 seen = set()
463 seen = set()
404 for unencoded, encoded, size in srcrepo.store.walk():
464 for unencoded, encoded, size in srcrepo.store.walk():
405 if unencoded.endswith('.d'):
465 if unencoded.endswith('.d'):
406 continue
466 continue
407
467
408 oldrl = _revlogfrompath(srcrepo, unencoded)
468 oldrl = _revlogfrompath(srcrepo, unencoded)
409 newrl = _revlogfrompath(dstrepo, unencoded)
469 newrl = _revlogfrompath(dstrepo, unencoded)
410
470
411 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
471 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
412 ui.write(_('finished migrating %d manifest revisions across %d '
472 ui.write(_('finished migrating %d manifest revisions across %d '
413 'manifests; change in size: %s\n') %
473 'manifests; change in size: %s\n') %
414 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
474 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
415
475
416 ui.write(_('migrating changelog containing %d revisions '
476 ui.write(_('migrating changelog containing %d revisions '
417 '(%s in store; %s tracked data)\n') %
477 '(%s in store; %s tracked data)\n') %
418 (crevcount, util.bytecount(csrcsize),
478 (crevcount, util.bytecount(csrcsize),
419 util.bytecount(crawsize)))
479 util.bytecount(crawsize)))
420 seen.add('c')
480 seen.add('c')
421 progress[:] = [_('changelog revisions'), 0, crevcount]
481 progress[:] = [_('changelog revisions'), 0, crevcount]
422 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
482 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
423 ui.write(_('finished migrating %d filelog revisions across %d '
483 ui.write(_('finished migrating %d filelog revisions across %d '
424 'filelogs; change in size: %s\n') %
484 'filelogs; change in size: %s\n') %
425 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
485 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
426
486
427 ui.write(_('migrating %d manifests containing %d revisions '
487 ui.write(_('migrating %d manifests containing %d revisions '
428 '(%s in store; %s tracked data)\n') %
488 '(%s in store; %s tracked data)\n') %
429 (mcount, mrevcount, util.bytecount(msrcsize),
489 (mcount, mrevcount, util.bytecount(msrcsize),
430 util.bytecount(mrawsize)))
490 util.bytecount(mrawsize)))
431 seen.add('m')
491 seen.add('m')
432 progress[:] = [_('manifest revisions'), 0, mrevcount]
492 progress[:] = [_('manifest revisions'), 0, mrevcount]
433 elif 'f' not in seen:
493 elif 'f' not in seen:
434 ui.write(_('migrating %d filelogs containing %d revisions '
494 ui.write(_('migrating %d filelogs containing %d revisions '
435 '(%s in store; %s tracked data)\n') %
495 '(%s in store; %s tracked data)\n') %
436 (fcount, frevcount, util.bytecount(fsrcsize),
496 (fcount, frevcount, util.bytecount(fsrcsize),
437 util.bytecount(frawsize)))
497 util.bytecount(frawsize)))
438 seen.add('f')
498 seen.add('f')
439 progress[:] = [_('file revisions'), 0, frevcount]
499 progress[:] = [_('file revisions'), 0, frevcount]
440
500
441 ui.progress(progress[0], progress[1], total=progress[2])
501 ui.progress(progress[0], progress[1], total=progress[2])
442
502
443 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
503 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
444 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
504 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
445 deltareuse=deltareuse,
505 deltareuse=deltareuse,
446 aggressivemergedeltas=aggressivemergedeltas)
506 aggressivemergedeltas=aggressivemergedeltas)
447
507
448 datasize = 0
508 datasize = 0
449 idx = newrl.index
509 idx = newrl.index
450 for rev in newrl:
510 for rev in newrl:
451 datasize += idx[rev][1]
511 datasize += idx[rev][1]
452
512
453 dstsize += datasize
513 dstsize += datasize
454
514
455 if isinstance(newrl, changelog.changelog):
515 if isinstance(newrl, changelog.changelog):
456 cdstsize += datasize
516 cdstsize += datasize
457 elif isinstance(newrl, manifest.manifestrevlog):
517 elif isinstance(newrl, manifest.manifestrevlog):
458 mdstsize += datasize
518 mdstsize += datasize
459 else:
519 else:
460 fdstsize += datasize
520 fdstsize += datasize
461
521
462 ui.progress(progress[0], None)
522 ui.progress(progress[0], None)
463
523
464 ui.write(_('finished migrating %d changelog revisions; change in size: '
524 ui.write(_('finished migrating %d changelog revisions; change in size: '
465 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
525 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
466
526
467 ui.write(_('finished migrating %d total revisions; total change in store '
527 ui.write(_('finished migrating %d total revisions; total change in store '
468 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
528 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
469
529
470 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
530 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
471 """Determine whether to copy a store file during upgrade.
531 """Determine whether to copy a store file during upgrade.
472
532
473 This function is called when migrating store files from ``srcrepo`` to
533 This function is called when migrating store files from ``srcrepo`` to
474 ``dstrepo`` as part of upgrading a repository.
534 ``dstrepo`` as part of upgrading a repository.
475
535
476 Args:
536 Args:
477 srcrepo: repo we are copying from
537 srcrepo: repo we are copying from
478 dstrepo: repo we are copying to
538 dstrepo: repo we are copying to
479 requirements: set of requirements for ``dstrepo``
539 requirements: set of requirements for ``dstrepo``
480 path: store file being examined
540 path: store file being examined
481 mode: the ``ST_MODE`` file type of ``path``
541 mode: the ``ST_MODE`` file type of ``path``
482 st: ``stat`` data structure for ``path``
542 st: ``stat`` data structure for ``path``
483
543
484 Function should return ``True`` if the file is to be copied.
544 Function should return ``True`` if the file is to be copied.
485 """
545 """
486 # Skip revlogs.
546 # Skip revlogs.
487 if path.endswith(('.i', '.d')):
547 if path.endswith(('.i', '.d')):
488 return False
548 return False
489 # Skip transaction related files.
549 # Skip transaction related files.
490 if path.startswith('undo'):
550 if path.startswith('undo'):
491 return False
551 return False
492 # Only copy regular files.
552 # Only copy regular files.
493 if mode != stat.S_IFREG:
553 if mode != stat.S_IFREG:
494 return False
554 return False
495 # Skip other skipped files.
555 # Skip other skipped files.
496 if path in ('lock', 'fncache'):
556 if path in ('lock', 'fncache'):
497 return False
557 return False
498
558
499 return True
559 return True
500
560
501 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
561 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
502 """Hook point for extensions to perform additional actions during upgrade.
562 """Hook point for extensions to perform additional actions during upgrade.
503
563
504 This function is called after revlogs and store files have been copied but
564 This function is called after revlogs and store files have been copied but
505 before the new store is swapped into the original location.
565 before the new store is swapped into the original location.
506 """
566 """
507
567
508 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
568 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
509 """Do the low-level work of upgrading a repository.
569 """Do the low-level work of upgrading a repository.
510
570
511 The upgrade is effectively performed as a copy between a source
571 The upgrade is effectively performed as a copy between a source
512 repository and a temporary destination repository.
572 repository and a temporary destination repository.
513
573
514 The source repository is unmodified for as long as possible so the
574 The source repository is unmodified for as long as possible so the
515 upgrade can abort at any time without causing loss of service for
575 upgrade can abort at any time without causing loss of service for
516 readers and without corrupting the source repository.
576 readers and without corrupting the source repository.
517 """
577 """
518 assert srcrepo.currentwlock()
578 assert srcrepo.currentwlock()
519 assert dstrepo.currentwlock()
579 assert dstrepo.currentwlock()
520
580
521 ui.write(_('(it is safe to interrupt this process any time before '
581 ui.write(_('(it is safe to interrupt this process any time before '
522 'data migration completes)\n'))
582 'data migration completes)\n'))
523
583
524 if 'redeltaall' in actions:
584 if 'redeltaall' in actions:
525 deltareuse = revlog.revlog.DELTAREUSENEVER
585 deltareuse = revlog.revlog.DELTAREUSENEVER
526 elif 'redeltaparent' in actions:
586 elif 'redeltaparent' in actions:
527 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
587 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
528 elif 'redeltamultibase' in actions:
588 elif 'redeltamultibase' in actions:
529 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
589 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
530 else:
590 else:
531 deltareuse = revlog.revlog.DELTAREUSEALWAYS
591 deltareuse = revlog.revlog.DELTAREUSEALWAYS
532
592
533 with dstrepo.transaction('upgrade') as tr:
593 with dstrepo.transaction('upgrade') as tr:
534 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
594 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
535 'redeltamultibase' in actions)
595 'redeltamultibase' in actions)
536
596
537 # Now copy other files in the store directory.
597 # Now copy other files in the store directory.
538 # The sorted() makes execution deterministic.
598 # The sorted() makes execution deterministic.
539 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
599 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
540 if not _filterstorefile(srcrepo, dstrepo, requirements,
600 if not _filterstorefile(srcrepo, dstrepo, requirements,
541 p, kind, st):
601 p, kind, st):
542 continue
602 continue
543
603
544 srcrepo.ui.write(_('copying %s\n') % p)
604 srcrepo.ui.write(_('copying %s\n') % p)
545 src = srcrepo.store.rawvfs.join(p)
605 src = srcrepo.store.rawvfs.join(p)
546 dst = dstrepo.store.rawvfs.join(p)
606 dst = dstrepo.store.rawvfs.join(p)
547 util.copyfile(src, dst, copystat=True)
607 util.copyfile(src, dst, copystat=True)
548
608
549 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
609 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
550
610
551 ui.write(_('data fully migrated to temporary repository\n'))
611 ui.write(_('data fully migrated to temporary repository\n'))
552
612
553 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
613 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
554 backupvfs = vfsmod.vfs(backuppath)
614 backupvfs = vfsmod.vfs(backuppath)
555
615
556 # Make a backup of requires file first, as it is the first to be modified.
616 # Make a backup of requires file first, as it is the first to be modified.
557 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
617 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
558
618
559 # We install an arbitrary requirement that clients must not support
619 # We install an arbitrary requirement that clients must not support
560 # as a mechanism to lock out new clients during the data swap. This is
620 # as a mechanism to lock out new clients during the data swap. This is
561 # better than allowing a client to continue while the repository is in
621 # better than allowing a client to continue while the repository is in
562 # an inconsistent state.
622 # an inconsistent state.
563 ui.write(_('marking source repository as being upgraded; clients will be '
623 ui.write(_('marking source repository as being upgraded; clients will be '
564 'unable to read from repository\n'))
624 'unable to read from repository\n'))
565 scmutil.writerequires(srcrepo.vfs,
625 scmutil.writerequires(srcrepo.vfs,
566 srcrepo.requirements | set(['upgradeinprogress']))
626 srcrepo.requirements | set(['upgradeinprogress']))
567
627
568 ui.write(_('starting in-place swap of repository data\n'))
628 ui.write(_('starting in-place swap of repository data\n'))
569 ui.write(_('replaced files will be backed up at %s\n') %
629 ui.write(_('replaced files will be backed up at %s\n') %
570 backuppath)
630 backuppath)
571
631
572 # Now swap in the new store directory. Doing it as a rename should make
632 # Now swap in the new store directory. Doing it as a rename should make
573 # the operation nearly instantaneous and atomic (at least in well-behaved
633 # the operation nearly instantaneous and atomic (at least in well-behaved
574 # environments).
634 # environments).
575 ui.write(_('replacing store...\n'))
635 ui.write(_('replacing store...\n'))
576 tstart = util.timer()
636 tstart = util.timer()
577 util.rename(srcrepo.spath, backupvfs.join('store'))
637 util.rename(srcrepo.spath, backupvfs.join('store'))
578 util.rename(dstrepo.spath, srcrepo.spath)
638 util.rename(dstrepo.spath, srcrepo.spath)
579 elapsed = util.timer() - tstart
639 elapsed = util.timer() - tstart
580 ui.write(_('store replacement complete; repository was inconsistent for '
640 ui.write(_('store replacement complete; repository was inconsistent for '
581 '%0.1fs\n') % elapsed)
641 '%0.1fs\n') % elapsed)
582
642
583 # We first write the requirements file. Any new requirements will lock
643 # We first write the requirements file. Any new requirements will lock
584 # out legacy clients.
644 # out legacy clients.
585 ui.write(_('finalizing requirements file and making repository readable '
645 ui.write(_('finalizing requirements file and making repository readable '
586 'again\n'))
646 'again\n'))
587 scmutil.writerequires(srcrepo.vfs, requirements)
647 scmutil.writerequires(srcrepo.vfs, requirements)
588
648
589 # The lock file from the old store won't be removed because nothing has a
649 # The lock file from the old store won't be removed because nothing has a
590 # reference to its new location. So clean it up manually. Alternatively, we
650 # reference to its new location. So clean it up manually. Alternatively, we
591 # could update srcrepo.svfs and other variables to point to the new
651 # could update srcrepo.svfs and other variables to point to the new
592 # location. This is simpler.
652 # location. This is simpler.
593 backupvfs.unlink('store/lock')
653 backupvfs.unlink('store/lock')
594
654
595 return backuppath
655 return backuppath
596
656
597 def upgraderepo(ui, repo, run=False, optimize=None):
657 def upgraderepo(ui, repo, run=False, optimize=None):
598 """Upgrade a repository in place."""
658 """Upgrade a repository in place."""
599 optimize = set(optimize or [])
659 optimize = set(optimize or [])
600 repo = repo.unfiltered()
660 repo = repo.unfiltered()
601
661
602 # Ensure the repository can be upgraded.
662 # Ensure the repository can be upgraded.
603 missingreqs = requiredsourcerequirements(repo) - repo.requirements
663 missingreqs = requiredsourcerequirements(repo) - repo.requirements
604 if missingreqs:
664 if missingreqs:
605 raise error.Abort(_('cannot upgrade repository; requirement '
665 raise error.Abort(_('cannot upgrade repository; requirement '
606 'missing: %s') % _(', ').join(sorted(missingreqs)))
666 'missing: %s') % _(', ').join(sorted(missingreqs)))
607
667
608 blockedreqs = blocksourcerequirements(repo) & repo.requirements
668 blockedreqs = blocksourcerequirements(repo) & repo.requirements
609 if blockedreqs:
669 if blockedreqs:
610 raise error.Abort(_('cannot upgrade repository; unsupported source '
670 raise error.Abort(_('cannot upgrade repository; unsupported source '
611 'requirement: %s') %
671 'requirement: %s') %
612 _(', ').join(sorted(blockedreqs)))
672 _(', ').join(sorted(blockedreqs)))
613
673
614 # FUTURE there is potentially a need to control the wanted requirements via
674 # FUTURE there is potentially a need to control the wanted requirements via
615 # command arguments or via an extension hook point.
675 # command arguments or via an extension hook point.
616 newreqs = localrepo.newreporequirements(repo)
676 newreqs = localrepo.newreporequirements(repo)
617
677
618 noremovereqs = (repo.requirements - newreqs -
678 noremovereqs = (repo.requirements - newreqs -
619 supportremovedrequirements(repo))
679 supportremovedrequirements(repo))
620 if noremovereqs:
680 if noremovereqs:
621 raise error.Abort(_('cannot upgrade repository; requirement would be '
681 raise error.Abort(_('cannot upgrade repository; requirement would be '
622 'removed: %s') % _(', ').join(sorted(noremovereqs)))
682 'removed: %s') % _(', ').join(sorted(noremovereqs)))
623
683
624 noaddreqs = (newreqs - repo.requirements -
684 noaddreqs = (newreqs - repo.requirements -
625 allowednewrequirements(repo))
685 allowednewrequirements(repo))
626 if noaddreqs:
686 if noaddreqs:
627 raise error.Abort(_('cannot upgrade repository; do not support adding '
687 raise error.Abort(_('cannot upgrade repository; do not support adding '
628 'requirement: %s') %
688 'requirement: %s') %
629 _(', ').join(sorted(noaddreqs)))
689 _(', ').join(sorted(noaddreqs)))
630
690
631 unsupportedreqs = newreqs - supporteddestrequirements(repo)
691 unsupportedreqs = newreqs - supporteddestrequirements(repo)
632 if unsupportedreqs:
692 if unsupportedreqs:
633 raise error.Abort(_('cannot upgrade repository; do not support '
693 raise error.Abort(_('cannot upgrade repository; do not support '
634 'destination requirement: %s') %
694 'destination requirement: %s') %
635 _(', ').join(sorted(unsupportedreqs)))
695 _(', ').join(sorted(unsupportedreqs)))
636
696
637 # Find and validate all improvements that can be made.
697 # Find and validate all improvements that can be made.
638 alloptimizations = findoptimizations(repo)
698 alloptimizations = findoptimizations(repo)
639
699
640 # Apply and Validate arguments.
700 # Apply and Validate arguments.
641 optimizations = []
701 optimizations = []
642 for o in alloptimizations:
702 for o in alloptimizations:
643 if o.name in optimize:
703 if o.name in optimize:
644 optimizations.append(o)
704 optimizations.append(o)
645 optimize.discard(o.name)
705 optimize.discard(o.name)
646
706
647 if optimize: # anything left is unknown
707 if optimize: # anything left is unknown
648 raise error.Abort(_('unknown optimization action requested: %s') %
708 raise error.Abort(_('unknown optimization action requested: %s') %
649 ', '.join(sorted(optimize)),
709 ', '.join(sorted(optimize)),
650 hint=_('run without arguments to see valid '
710 hint=_('run without arguments to see valid '
651 'optimizations'))
711 'optimizations'))
652
712
653 deficiencies = finddeficiencies(repo)
713 deficiencies = finddeficiencies(repo)
654 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
714 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
655 actions.extend(o for o in sorted(optimizations)
715 actions.extend(o for o in sorted(optimizations)
656 # determineactions could have added optimisation
716 # determineactions could have added optimisation
657 if o not in actions)
717 if o not in actions)
658
718
659 def printrequirements():
719 def printrequirements():
660 ui.write(_('requirements\n'))
720 ui.write(_('requirements\n'))
661 ui.write(_(' preserved: %s\n') %
721 ui.write(_(' preserved: %s\n') %
662 _(', ').join(sorted(newreqs & repo.requirements)))
722 _(', ').join(sorted(newreqs & repo.requirements)))
663
723
664 if repo.requirements - newreqs:
724 if repo.requirements - newreqs:
665 ui.write(_(' removed: %s\n') %
725 ui.write(_(' removed: %s\n') %
666 _(', ').join(sorted(repo.requirements - newreqs)))
726 _(', ').join(sorted(repo.requirements - newreqs)))
667
727
668 if newreqs - repo.requirements:
728 if newreqs - repo.requirements:
669 ui.write(_(' added: %s\n') %
729 ui.write(_(' added: %s\n') %
670 _(', ').join(sorted(newreqs - repo.requirements)))
730 _(', ').join(sorted(newreqs - repo.requirements)))
671
731
672 ui.write('\n')
732 ui.write('\n')
673
733
674 def printupgradeactions():
734 def printupgradeactions():
675 for a in actions:
735 for a in actions:
676 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
736 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
677
737
678 if not run:
738 if not run:
679 fromconfig = []
739 fromconfig = []
680 onlydefault = []
740 onlydefault = []
681
741
682 for d in deficiencies:
742 for d in deficiencies:
683 if d.fromconfig:
743 if d.fromconfig(repo):
684 fromconfig.append(d)
744 fromconfig.append(d)
685 elif d.fromdefault:
745 elif d.default:
686 onlydefault.append(d)
746 onlydefault.append(d)
687
747
688 if fromconfig or onlydefault:
748 if fromconfig or onlydefault:
689
749
690 if fromconfig:
750 if fromconfig:
691 ui.write(_('repository lacks features recommended by '
751 ui.write(_('repository lacks features recommended by '
692 'current config options:\n\n'))
752 'current config options:\n\n'))
693 for i in fromconfig:
753 for i in fromconfig:
694 ui.write('%s\n %s\n\n' % (i.name, i.description))
754 ui.write('%s\n %s\n\n' % (i.name, i.description))
695
755
696 if onlydefault:
756 if onlydefault:
697 ui.write(_('repository lacks features used by the default '
757 ui.write(_('repository lacks features used by the default '
698 'config options:\n\n'))
758 'config options:\n\n'))
699 for i in onlydefault:
759 for i in onlydefault:
700 ui.write('%s\n %s\n\n' % (i.name, i.description))
760 ui.write('%s\n %s\n\n' % (i.name, i.description))
701
761
702 ui.write('\n')
762 ui.write('\n')
703 else:
763 else:
704 ui.write(_('(no feature deficiencies found in existing '
764 ui.write(_('(no feature deficiencies found in existing '
705 'repository)\n'))
765 'repository)\n'))
706
766
707 ui.write(_('performing an upgrade with "--run" will make the following '
767 ui.write(_('performing an upgrade with "--run" will make the following '
708 'changes:\n\n'))
768 'changes:\n\n'))
709
769
710 printrequirements()
770 printrequirements()
711 printupgradeactions()
771 printupgradeactions()
712
772
713 unusedoptimize = [i for i in alloptimizations if i not in actions]
773 unusedoptimize = [i for i in alloptimizations if i not in actions]
714
774
715 if unusedoptimize:
775 if unusedoptimize:
716 ui.write(_('additional optimizations are available by specifying '
776 ui.write(_('additional optimizations are available by specifying '
717 '"--optimize <name>":\n\n'))
777 '"--optimize <name>":\n\n'))
718 for i in unusedoptimize:
778 for i in unusedoptimize:
719 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
779 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
720 return
780 return
721
781
722 # Else we're in the run=true case.
782 # Else we're in the run=true case.
723 ui.write(_('upgrade will perform the following actions:\n\n'))
783 ui.write(_('upgrade will perform the following actions:\n\n'))
724 printrequirements()
784 printrequirements()
725 printupgradeactions()
785 printupgradeactions()
726
786
727 upgradeactions = [a.name for a in actions]
787 upgradeactions = [a.name for a in actions]
728
788
729 ui.write(_('beginning upgrade...\n'))
789 ui.write(_('beginning upgrade...\n'))
730 with repo.wlock():
790 with repo.wlock():
731 with repo.lock():
791 with repo.lock():
732 ui.write(_('repository locked and read-only\n'))
792 ui.write(_('repository locked and read-only\n'))
733 # Our strategy for upgrading the repository is to create a new,
793 # Our strategy for upgrading the repository is to create a new,
734 # temporary repository, write data to it, then do a swap of the
794 # temporary repository, write data to it, then do a swap of the
735 # data. There are less heavyweight ways to do this, but it is easier
795 # data. There are less heavyweight ways to do this, but it is easier
736 # to create a new repo object than to instantiate all the components
796 # to create a new repo object than to instantiate all the components
737 # (like the store) separately.
797 # (like the store) separately.
738 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
798 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
739 backuppath = None
799 backuppath = None
740 try:
800 try:
741 ui.write(_('creating temporary repository to stage migrated '
801 ui.write(_('creating temporary repository to stage migrated '
742 'data: %s\n') % tmppath)
802 'data: %s\n') % tmppath)
743 dstrepo = localrepo.localrepository(repo.baseui,
803 dstrepo = localrepo.localrepository(repo.baseui,
744 path=tmppath,
804 path=tmppath,
745 create=True)
805 create=True)
746
806
747 with dstrepo.wlock():
807 with dstrepo.wlock():
748 with dstrepo.lock():
808 with dstrepo.lock():
749 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
809 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
750 upgradeactions)
810 upgradeactions)
751
811
752 finally:
812 finally:
753 ui.write(_('removing temporary repository %s\n') % tmppath)
813 ui.write(_('removing temporary repository %s\n') % tmppath)
754 repo.vfs.rmtree(tmppath, forcibly=True)
814 repo.vfs.rmtree(tmppath, forcibly=True)
755
815
756 if backuppath:
816 if backuppath:
757 ui.warn(_('copy of old repository backed up at %s\n') %
817 ui.warn(_('copy of old repository backed up at %s\n') %
758 backuppath)
818 backuppath)
759 ui.warn(_('the old repository will not be deleted; remove '
819 ui.warn(_('the old repository will not be deleted; remove '
760 'it to free up disk space once the upgraded '
820 'it to free up disk space once the upgraded '
761 'repository is verified\n'))
821 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now