##// END OF EJS Templates
upgrade: introduce a 'formatvariant' class...
Pierre-Yves David -
r32030:e4722357 default
parent child Browse files
Show More
@@ -1,755 +1,761
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11 import tempfile
11 import tempfile
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 localrepo,
17 localrepo,
18 manifest,
18 manifest,
19 revlog,
19 revlog,
20 scmutil,
20 scmutil,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 def requiredsourcerequirements(repo):
25 def requiredsourcerequirements(repo):
26 """Obtain requirements required to be present to upgrade a repo.
26 """Obtain requirements required to be present to upgrade a repo.
27
27
28 An upgrade will not be allowed if the repository doesn't have the
28 An upgrade will not be allowed if the repository doesn't have the
29 requirements returned by this function.
29 requirements returned by this function.
30 """
30 """
31 return set([
31 return set([
32 # Introduced in Mercurial 0.9.2.
32 # Introduced in Mercurial 0.9.2.
33 'revlogv1',
33 'revlogv1',
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'store',
35 'store',
36 ])
36 ])
37
37
38 def blocksourcerequirements(repo):
38 def blocksourcerequirements(repo):
39 """Obtain requirements that will prevent an upgrade from occurring.
39 """Obtain requirements that will prevent an upgrade from occurring.
40
40
41 An upgrade cannot be performed if the source repository contains a
41 An upgrade cannot be performed if the source repository contains a
42 requirements in the returned set.
42 requirements in the returned set.
43 """
43 """
44 return set([
44 return set([
45 # The upgrade code does not yet support these experimental features.
45 # The upgrade code does not yet support these experimental features.
46 # This is an artificial limitation.
46 # This is an artificial limitation.
47 'manifestv2',
47 'manifestv2',
48 'treemanifest',
48 'treemanifest',
49 # This was a precursor to generaldelta and was never enabled by default.
49 # This was a precursor to generaldelta and was never enabled by default.
50 # It should (hopefully) not exist in the wild.
50 # It should (hopefully) not exist in the wild.
51 'parentdelta',
51 'parentdelta',
52 # Upgrade should operate on the actual store, not the shared link.
52 # Upgrade should operate on the actual store, not the shared link.
53 'shared',
53 'shared',
54 ])
54 ])
55
55
56 def supportremovedrequirements(repo):
56 def supportremovedrequirements(repo):
57 """Obtain requirements that can be removed during an upgrade.
57 """Obtain requirements that can be removed during an upgrade.
58
58
59 If an upgrade were to create a repository that dropped a requirement,
59 If an upgrade were to create a repository that dropped a requirement,
60 the dropped requirement must appear in the returned set for the upgrade
60 the dropped requirement must appear in the returned set for the upgrade
61 to be allowed.
61 to be allowed.
62 """
62 """
63 return set()
63 return set()
64
64
65 def supporteddestrequirements(repo):
65 def supporteddestrequirements(repo):
66 """Obtain requirements that upgrade supports in the destination.
66 """Obtain requirements that upgrade supports in the destination.
67
67
68 If the result of the upgrade would create requirements not in this set,
68 If the result of the upgrade would create requirements not in this set,
69 the upgrade is disallowed.
69 the upgrade is disallowed.
70
70
71 Extensions should monkeypatch this to add their custom requirements.
71 Extensions should monkeypatch this to add their custom requirements.
72 """
72 """
73 return set([
73 return set([
74 'dotencode',
74 'dotencode',
75 'fncache',
75 'fncache',
76 'generaldelta',
76 'generaldelta',
77 'revlogv1',
77 'revlogv1',
78 'store',
78 'store',
79 ])
79 ])
80
80
81 def allowednewrequirements(repo):
81 def allowednewrequirements(repo):
82 """Obtain requirements that can be added to a repository during upgrade.
82 """Obtain requirements that can be added to a repository during upgrade.
83
83
84 This is used to disallow proposed requirements from being added when
84 This is used to disallow proposed requirements from being added when
85 they weren't present before.
85 they weren't present before.
86
86
87 We use a list of allowed requirement additions instead of a list of known
87 We use a list of allowed requirement additions instead of a list of known
88 bad additions because the whitelist approach is safer and will prevent
88 bad additions because the whitelist approach is safer and will prevent
89 future, unknown requirements from accidentally being added.
89 future, unknown requirements from accidentally being added.
90 """
90 """
91 return set([
91 return set([
92 'dotencode',
92 'dotencode',
93 'fncache',
93 'fncache',
94 'generaldelta',
94 'generaldelta',
95 ])
95 ])
96
96
97 deficiency = 'deficiency'
97 deficiency = 'deficiency'
98 optimisation = 'optimization'
98 optimisation = 'optimization'
99
99
100 class improvement(object):
100 class improvement(object):
101 """Represents an improvement that can be made as part of an upgrade.
101 """Represents an improvement that can be made as part of an upgrade.
102
102
103 The following attributes are defined on each instance:
103 The following attributes are defined on each instance:
104
104
105 name
105 name
106 Machine-readable string uniquely identifying this improvement. It
106 Machine-readable string uniquely identifying this improvement. It
107 will be mapped to an action later in the upgrade process.
107 will be mapped to an action later in the upgrade process.
108
108
109 type
109 type
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
111 problem. An optimization is an action (sometimes optional) that
111 problem. An optimization is an action (sometimes optional) that
112 can be taken to further improve the state of the repository.
112 can be taken to further improve the state of the repository.
113
113
114 description
114 description
115 Message intended for humans explaining the improvement in more detail,
115 Message intended for humans explaining the improvement in more detail,
116 including the implications of it. For ``deficiency`` types, should be
116 including the implications of it. For ``deficiency`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
118 worded in the future tense.
118 worded in the future tense.
119
119
120 upgrademessage
120 upgrademessage
121 Message intended for humans explaining what an upgrade addressing this
121 Message intended for humans explaining what an upgrade addressing this
122 issue will do. Should be worded in the future tense.
122 issue will do. Should be worded in the future tense.
123
124 fromdefault (``deficiency`` types only)
125 Boolean indicating whether the current (deficient) state deviates
126 from Mercurial's default configuration.
127
128 fromconfig (``deficiency`` types only)
129 Boolean indicating whether the current (deficient) state deviates
130 from the current Mercurial configuration.
131 """
123 """
132 def __init__(self, name, type, description, upgrademessage, **kwargs):
124 def __init__(self, name, type, description, upgrademessage):
133 self.name = name
125 self.name = name
134 self.type = type
126 self.type = type
135 self.description = description
127 self.description = description
136 self.upgrademessage = upgrademessage
128 self.upgrademessage = upgrademessage
137
129
138 for k, v in kwargs.items():
139 setattr(self, k, v)
140
141 def __eq__(self, other):
130 def __eq__(self, other):
142 if not isinstance(other, improvement):
131 if not isinstance(other, improvement):
143 # This is what python tell use to do
132 # This is what python tell use to do
144 return NotImplemented
133 return NotImplemented
145 return self.name == other.name
134 return self.name == other.name
146
135
147 def __ne__(self, other):
136 def __ne__(self, other):
148 return not self == other
137 return not self == other
149
138
150 def __hash__(self):
139 def __hash__(self):
151 return hash(self.name)
140 return hash(self.name)
152
141
142 class formatvariant(improvement):
143 """an improvement subclass dedicated to repository format
144
145 extra attributes:
146
147 fromdefault (``deficiency`` types only)
148 Boolean indicating whether the current (deficient) state deviates
149 from Mercurial's default configuration.
150
151 fromconfig (``deficiency`` types only)
152 Boolean indicating whether the current (deficient) state deviates
153 from the current Mercurial configuration.
154 """
155
156 def __init__(self, name, description, upgrademessage, fromdefault,
157 fromconfig):
158 super(formatvariant, self).__init__(name, deficiency, description,
159 upgrademessage)
160 self.fromdefault = fromdefault
161 self.fromconfig = fromconfig
162
153 def finddeficiencies(repo):
163 def finddeficiencies(repo):
154 """returns a list of deficiencies that the repo suffer from"""
164 """returns a list of deficiencies that the repo suffer from"""
155 newreporeqs = localrepo.newreporequirements(repo)
165 newreporeqs = localrepo.newreporequirements(repo)
156
166
157 deficiencies = []
167 deficiencies = []
158
168
159 # We could detect lack of revlogv1 and store here, but they were added
169 # We could detect lack of revlogv1 and store here, but they were added
160 # in 0.9.2 and we don't support upgrading repos without these
170 # in 0.9.2 and we don't support upgrading repos without these
161 # requirements, so let's not bother.
171 # requirements, so let's not bother.
162
172
163 if 'fncache' not in repo.requirements:
173 if 'fncache' not in repo.requirements:
164 deficiencies.append(improvement(
174 deficiencies.append(formatvariant(
165 name='fncache',
175 name='fncache',
166 type=deficiency,
167 description=_('long and reserved filenames may not work correctly; '
176 description=_('long and reserved filenames may not work correctly; '
168 'repository performance is sub-optimal'),
177 'repository performance is sub-optimal'),
169 upgrademessage=_('repository will be more resilient to storing '
178 upgrademessage=_('repository will be more resilient to storing '
170 'certain paths and performance of certain '
179 'certain paths and performance of certain '
171 'operations should be improved'),
180 'operations should be improved'),
172 fromdefault=True,
181 fromdefault=True,
173 fromconfig='fncache' in newreporeqs))
182 fromconfig='fncache' in newreporeqs))
174
183
175 if 'dotencode' not in repo.requirements:
184 if 'dotencode' not in repo.requirements:
176 deficiencies.append(improvement(
185 deficiencies.append(formatvariant(
177 name='dotencode',
186 name='dotencode',
178 type=deficiency,
179 description=_('storage of filenames beginning with a period or '
187 description=_('storage of filenames beginning with a period or '
180 'space may not work correctly'),
188 'space may not work correctly'),
181 upgrademessage=_('repository will be better able to store files '
189 upgrademessage=_('repository will be better able to store files '
182 'beginning with a space or period'),
190 'beginning with a space or period'),
183 fromdefault=True,
191 fromdefault=True,
184 fromconfig='dotencode' in newreporeqs))
192 fromconfig='dotencode' in newreporeqs))
185
193
186 if 'generaldelta' not in repo.requirements:
194 if 'generaldelta' not in repo.requirements:
187 deficiencies.append(improvement(
195 deficiencies.append(formatvariant(
188 name='generaldelta',
196 name='generaldelta',
189 type=deficiency,
190 description=_('deltas within internal storage are unable to '
197 description=_('deltas within internal storage are unable to '
191 'choose optimal revisions; repository is larger and '
198 'choose optimal revisions; repository is larger and '
192 'slower than it could be; interaction with other '
199 'slower than it could be; interaction with other '
193 'repositories may require extra network and CPU '
200 'repositories may require extra network and CPU '
194 'resources, making "hg push" and "hg pull" slower'),
201 'resources, making "hg push" and "hg pull" slower'),
195 upgrademessage=_('repository storage will be able to create '
202 upgrademessage=_('repository storage will be able to create '
196 'optimal deltas; new repository data will be '
203 'optimal deltas; new repository data will be '
197 'smaller and read times should decrease; '
204 'smaller and read times should decrease; '
198 'interacting with other repositories using this '
205 'interacting with other repositories using this '
199 'storage model should require less network and '
206 'storage model should require less network and '
200 'CPU resources, making "hg push" and "hg pull" '
207 'CPU resources, making "hg push" and "hg pull" '
201 'faster'),
208 'faster'),
202 fromdefault=True,
209 fromdefault=True,
203 fromconfig='generaldelta' in newreporeqs))
210 fromconfig='generaldelta' in newreporeqs))
204
211
205 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
212 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
206 # changelogs with deltas.
213 # changelogs with deltas.
207 cl = repo.changelog
214 cl = repo.changelog
208 for rev in cl:
215 for rev in cl:
209 chainbase = cl.chainbase(rev)
216 chainbase = cl.chainbase(rev)
210 if chainbase != rev:
217 if chainbase != rev:
211 deficiencies.append(improvement(
218 deficiencies.append(formatvariant(
212 name='removecldeltachain',
219 name='removecldeltachain',
213 type=deficiency,
214 description=_('changelog storage is using deltas instead of '
220 description=_('changelog storage is using deltas instead of '
215 'raw entries; changelog reading and any '
221 'raw entries; changelog reading and any '
216 'operation relying on changelog data are slower '
222 'operation relying on changelog data are slower '
217 'than they could be'),
223 'than they could be'),
218 upgrademessage=_('changelog storage will be reformated to '
224 upgrademessage=_('changelog storage will be reformated to '
219 'store raw entries; changelog reading will be '
225 'store raw entries; changelog reading will be '
220 'faster; changelog size may be reduced'),
226 'faster; changelog size may be reduced'),
221 fromdefault=True,
227 fromdefault=True,
222 fromconfig=True))
228 fromconfig=True))
223 break
229 break
224
230
225 return deficiencies
231 return deficiencies
226
232
227 def findoptimizations(repo):
233 def findoptimizations(repo):
228 """Determine optimisation that could be used during upgrade"""
234 """Determine optimisation that could be used during upgrade"""
229 # These are unconditionally added. There is logic later that figures out
235 # These are unconditionally added. There is logic later that figures out
230 # which ones to apply.
236 # which ones to apply.
231 optimizations = []
237 optimizations = []
232
238
233 optimizations.append(improvement(
239 optimizations.append(improvement(
234 name='redeltaparent',
240 name='redeltaparent',
235 type=optimisation,
241 type=optimisation,
236 description=_('deltas within internal storage will be recalculated to '
242 description=_('deltas within internal storage will be recalculated to '
237 'choose an optimal base revision where this was not '
243 'choose an optimal base revision where this was not '
238 'already done; the size of the repository may shrink and '
244 'already done; the size of the repository may shrink and '
239 'various operations may become faster; the first time '
245 'various operations may become faster; the first time '
240 'this optimization is performed could slow down upgrade '
246 'this optimization is performed could slow down upgrade '
241 'execution considerably; subsequent invocations should '
247 'execution considerably; subsequent invocations should '
242 'not run noticeably slower'),
248 'not run noticeably slower'),
243 upgrademessage=_('deltas within internal storage will choose a new '
249 upgrademessage=_('deltas within internal storage will choose a new '
244 'base revision if needed')))
250 'base revision if needed')))
245
251
246 optimizations.append(improvement(
252 optimizations.append(improvement(
247 name='redeltamultibase',
253 name='redeltamultibase',
248 type=optimisation,
254 type=optimisation,
249 description=_('deltas within internal storage will be recalculated '
255 description=_('deltas within internal storage will be recalculated '
250 'against multiple base revision and the smallest '
256 'against multiple base revision and the smallest '
251 'difference will be used; the size of the repository may '
257 'difference will be used; the size of the repository may '
252 'shrink significantly when there are many merges; this '
258 'shrink significantly when there are many merges; this '
253 'optimization will slow down execution in proportion to '
259 'optimization will slow down execution in proportion to '
254 'the number of merges in the repository and the amount '
260 'the number of merges in the repository and the amount '
255 'of files in the repository; this slow down should not '
261 'of files in the repository; this slow down should not '
256 'be significant unless there are tens of thousands of '
262 'be significant unless there are tens of thousands of '
257 'files and thousands of merges'),
263 'files and thousands of merges'),
258 upgrademessage=_('deltas within internal storage will choose an '
264 upgrademessage=_('deltas within internal storage will choose an '
259 'optimal delta by computing deltas against multiple '
265 'optimal delta by computing deltas against multiple '
260 'parents; may slow down execution time '
266 'parents; may slow down execution time '
261 'significantly')))
267 'significantly')))
262
268
263 optimizations.append(improvement(
269 optimizations.append(improvement(
264 name='redeltaall',
270 name='redeltaall',
265 type=optimisation,
271 type=optimisation,
266 description=_('deltas within internal storage will always be '
272 description=_('deltas within internal storage will always be '
267 'recalculated without reusing prior deltas; this will '
273 'recalculated without reusing prior deltas; this will '
268 'likely make execution run several times slower; this '
274 'likely make execution run several times slower; this '
269 'optimization is typically not needed'),
275 'optimization is typically not needed'),
270 upgrademessage=_('deltas within internal storage will be fully '
276 upgrademessage=_('deltas within internal storage will be fully '
271 'recomputed; this will likely drastically slow down '
277 'recomputed; this will likely drastically slow down '
272 'execution time')))
278 'execution time')))
273
279
274 return optimizations
280 return optimizations
275
281
276 def determineactions(repo, deficiencies, sourcereqs, destreqs):
282 def determineactions(repo, deficiencies, sourcereqs, destreqs):
277 """Determine upgrade actions that will be performed.
283 """Determine upgrade actions that will be performed.
278
284
279 Given a list of improvements as returned by ``finddeficiencies`` and
285 Given a list of improvements as returned by ``finddeficiencies`` and
280 ``findoptimizations``, determine the list of upgrade actions that
286 ``findoptimizations``, determine the list of upgrade actions that
281 will be performed.
287 will be performed.
282
288
283 The role of this function is to filter improvements if needed, apply
289 The role of this function is to filter improvements if needed, apply
284 recommended optimizations from the improvements list that make sense,
290 recommended optimizations from the improvements list that make sense,
285 etc.
291 etc.
286
292
287 Returns a list of action names.
293 Returns a list of action names.
288 """
294 """
289 newactions = []
295 newactions = []
290
296
291 knownreqs = supporteddestrequirements(repo)
297 knownreqs = supporteddestrequirements(repo)
292
298
293 for d in deficiencies:
299 for d in deficiencies:
294 name = d.name
300 name = d.name
295
301
296 # If the action is a requirement that doesn't show up in the
302 # If the action is a requirement that doesn't show up in the
297 # destination requirements, prune the action.
303 # destination requirements, prune the action.
298 if name in knownreqs and name not in destreqs:
304 if name in knownreqs and name not in destreqs:
299 continue
305 continue
300
306
301 newactions.append(d)
307 newactions.append(d)
302
308
303 # FUTURE consider adding some optimizations here for certain transitions.
309 # FUTURE consider adding some optimizations here for certain transitions.
304 # e.g. adding generaldelta could schedule parent redeltas.
310 # e.g. adding generaldelta could schedule parent redeltas.
305
311
306 return newactions
312 return newactions
307
313
308 def _revlogfrompath(repo, path):
314 def _revlogfrompath(repo, path):
309 """Obtain a revlog from a repo path.
315 """Obtain a revlog from a repo path.
310
316
311 An instance of the appropriate class is returned.
317 An instance of the appropriate class is returned.
312 """
318 """
313 if path == '00changelog.i':
319 if path == '00changelog.i':
314 return changelog.changelog(repo.svfs)
320 return changelog.changelog(repo.svfs)
315 elif path.endswith('00manifest.i'):
321 elif path.endswith('00manifest.i'):
316 mandir = path[:-len('00manifest.i')]
322 mandir = path[:-len('00manifest.i')]
317 return manifest.manifestrevlog(repo.svfs, dir=mandir)
323 return manifest.manifestrevlog(repo.svfs, dir=mandir)
318 else:
324 else:
319 # Filelogs don't do anything special with settings. So we can use a
325 # Filelogs don't do anything special with settings. So we can use a
320 # vanilla revlog.
326 # vanilla revlog.
321 return revlog.revlog(repo.svfs, path)
327 return revlog.revlog(repo.svfs, path)
322
328
323 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
329 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
324 """Copy revlogs between 2 repos."""
330 """Copy revlogs between 2 repos."""
325 revcount = 0
331 revcount = 0
326 srcsize = 0
332 srcsize = 0
327 srcrawsize = 0
333 srcrawsize = 0
328 dstsize = 0
334 dstsize = 0
329 fcount = 0
335 fcount = 0
330 frevcount = 0
336 frevcount = 0
331 fsrcsize = 0
337 fsrcsize = 0
332 frawsize = 0
338 frawsize = 0
333 fdstsize = 0
339 fdstsize = 0
334 mcount = 0
340 mcount = 0
335 mrevcount = 0
341 mrevcount = 0
336 msrcsize = 0
342 msrcsize = 0
337 mrawsize = 0
343 mrawsize = 0
338 mdstsize = 0
344 mdstsize = 0
339 crevcount = 0
345 crevcount = 0
340 csrcsize = 0
346 csrcsize = 0
341 crawsize = 0
347 crawsize = 0
342 cdstsize = 0
348 cdstsize = 0
343
349
344 # Perform a pass to collect metadata. This validates we can open all
350 # Perform a pass to collect metadata. This validates we can open all
345 # source files and allows a unified progress bar to be displayed.
351 # source files and allows a unified progress bar to be displayed.
346 for unencoded, encoded, size in srcrepo.store.walk():
352 for unencoded, encoded, size in srcrepo.store.walk():
347 if unencoded.endswith('.d'):
353 if unencoded.endswith('.d'):
348 continue
354 continue
349
355
350 rl = _revlogfrompath(srcrepo, unencoded)
356 rl = _revlogfrompath(srcrepo, unencoded)
351 revcount += len(rl)
357 revcount += len(rl)
352
358
353 datasize = 0
359 datasize = 0
354 rawsize = 0
360 rawsize = 0
355 idx = rl.index
361 idx = rl.index
356 for rev in rl:
362 for rev in rl:
357 e = idx[rev]
363 e = idx[rev]
358 datasize += e[1]
364 datasize += e[1]
359 rawsize += e[2]
365 rawsize += e[2]
360
366
361 srcsize += datasize
367 srcsize += datasize
362 srcrawsize += rawsize
368 srcrawsize += rawsize
363
369
364 # This is for the separate progress bars.
370 # This is for the separate progress bars.
365 if isinstance(rl, changelog.changelog):
371 if isinstance(rl, changelog.changelog):
366 crevcount += len(rl)
372 crevcount += len(rl)
367 csrcsize += datasize
373 csrcsize += datasize
368 crawsize += rawsize
374 crawsize += rawsize
369 elif isinstance(rl, manifest.manifestrevlog):
375 elif isinstance(rl, manifest.manifestrevlog):
370 mcount += 1
376 mcount += 1
371 mrevcount += len(rl)
377 mrevcount += len(rl)
372 msrcsize += datasize
378 msrcsize += datasize
373 mrawsize += rawsize
379 mrawsize += rawsize
374 elif isinstance(rl, revlog.revlog):
380 elif isinstance(rl, revlog.revlog):
375 fcount += 1
381 fcount += 1
376 frevcount += len(rl)
382 frevcount += len(rl)
377 fsrcsize += datasize
383 fsrcsize += datasize
378 frawsize += rawsize
384 frawsize += rawsize
379
385
380 if not revcount:
386 if not revcount:
381 return
387 return
382
388
383 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
389 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
384 '%d in changelog)\n') %
390 '%d in changelog)\n') %
385 (revcount, frevcount, mrevcount, crevcount))
391 (revcount, frevcount, mrevcount, crevcount))
386 ui.write(_('migrating %s in store; %s tracked data\n') % (
392 ui.write(_('migrating %s in store; %s tracked data\n') % (
387 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
393 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
388
394
389 # Used to keep track of progress.
395 # Used to keep track of progress.
390 progress = []
396 progress = []
391 def oncopiedrevision(rl, rev, node):
397 def oncopiedrevision(rl, rev, node):
392 progress[1] += 1
398 progress[1] += 1
393 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
399 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
394
400
395 # Do the actual copying.
401 # Do the actual copying.
396 # FUTURE this operation can be farmed off to worker processes.
402 # FUTURE this operation can be farmed off to worker processes.
397 seen = set()
403 seen = set()
398 for unencoded, encoded, size in srcrepo.store.walk():
404 for unencoded, encoded, size in srcrepo.store.walk():
399 if unencoded.endswith('.d'):
405 if unencoded.endswith('.d'):
400 continue
406 continue
401
407
402 oldrl = _revlogfrompath(srcrepo, unencoded)
408 oldrl = _revlogfrompath(srcrepo, unencoded)
403 newrl = _revlogfrompath(dstrepo, unencoded)
409 newrl = _revlogfrompath(dstrepo, unencoded)
404
410
405 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
411 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
406 ui.write(_('finished migrating %d manifest revisions across %d '
412 ui.write(_('finished migrating %d manifest revisions across %d '
407 'manifests; change in size: %s\n') %
413 'manifests; change in size: %s\n') %
408 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
414 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
409
415
410 ui.write(_('migrating changelog containing %d revisions '
416 ui.write(_('migrating changelog containing %d revisions '
411 '(%s in store; %s tracked data)\n') %
417 '(%s in store; %s tracked data)\n') %
412 (crevcount, util.bytecount(csrcsize),
418 (crevcount, util.bytecount(csrcsize),
413 util.bytecount(crawsize)))
419 util.bytecount(crawsize)))
414 seen.add('c')
420 seen.add('c')
415 progress[:] = [_('changelog revisions'), 0, crevcount]
421 progress[:] = [_('changelog revisions'), 0, crevcount]
416 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
422 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
417 ui.write(_('finished migrating %d filelog revisions across %d '
423 ui.write(_('finished migrating %d filelog revisions across %d '
418 'filelogs; change in size: %s\n') %
424 'filelogs; change in size: %s\n') %
419 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
425 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
420
426
421 ui.write(_('migrating %d manifests containing %d revisions '
427 ui.write(_('migrating %d manifests containing %d revisions '
422 '(%s in store; %s tracked data)\n') %
428 '(%s in store; %s tracked data)\n') %
423 (mcount, mrevcount, util.bytecount(msrcsize),
429 (mcount, mrevcount, util.bytecount(msrcsize),
424 util.bytecount(mrawsize)))
430 util.bytecount(mrawsize)))
425 seen.add('m')
431 seen.add('m')
426 progress[:] = [_('manifest revisions'), 0, mrevcount]
432 progress[:] = [_('manifest revisions'), 0, mrevcount]
427 elif 'f' not in seen:
433 elif 'f' not in seen:
428 ui.write(_('migrating %d filelogs containing %d revisions '
434 ui.write(_('migrating %d filelogs containing %d revisions '
429 '(%s in store; %s tracked data)\n') %
435 '(%s in store; %s tracked data)\n') %
430 (fcount, frevcount, util.bytecount(fsrcsize),
436 (fcount, frevcount, util.bytecount(fsrcsize),
431 util.bytecount(frawsize)))
437 util.bytecount(frawsize)))
432 seen.add('f')
438 seen.add('f')
433 progress[:] = [_('file revisions'), 0, frevcount]
439 progress[:] = [_('file revisions'), 0, frevcount]
434
440
435 ui.progress(progress[0], progress[1], total=progress[2])
441 ui.progress(progress[0], progress[1], total=progress[2])
436
442
437 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
443 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
438 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
444 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
439 deltareuse=deltareuse,
445 deltareuse=deltareuse,
440 aggressivemergedeltas=aggressivemergedeltas)
446 aggressivemergedeltas=aggressivemergedeltas)
441
447
442 datasize = 0
448 datasize = 0
443 idx = newrl.index
449 idx = newrl.index
444 for rev in newrl:
450 for rev in newrl:
445 datasize += idx[rev][1]
451 datasize += idx[rev][1]
446
452
447 dstsize += datasize
453 dstsize += datasize
448
454
449 if isinstance(newrl, changelog.changelog):
455 if isinstance(newrl, changelog.changelog):
450 cdstsize += datasize
456 cdstsize += datasize
451 elif isinstance(newrl, manifest.manifestrevlog):
457 elif isinstance(newrl, manifest.manifestrevlog):
452 mdstsize += datasize
458 mdstsize += datasize
453 else:
459 else:
454 fdstsize += datasize
460 fdstsize += datasize
455
461
456 ui.progress(progress[0], None)
462 ui.progress(progress[0], None)
457
463
458 ui.write(_('finished migrating %d changelog revisions; change in size: '
464 ui.write(_('finished migrating %d changelog revisions; change in size: '
459 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
465 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
460
466
461 ui.write(_('finished migrating %d total revisions; total change in store '
467 ui.write(_('finished migrating %d total revisions; total change in store '
462 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
468 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
463
469
464 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
470 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
465 """Determine whether to copy a store file during upgrade.
471 """Determine whether to copy a store file during upgrade.
466
472
467 This function is called when migrating store files from ``srcrepo`` to
473 This function is called when migrating store files from ``srcrepo`` to
468 ``dstrepo`` as part of upgrading a repository.
474 ``dstrepo`` as part of upgrading a repository.
469
475
470 Args:
476 Args:
471 srcrepo: repo we are copying from
477 srcrepo: repo we are copying from
472 dstrepo: repo we are copying to
478 dstrepo: repo we are copying to
473 requirements: set of requirements for ``dstrepo``
479 requirements: set of requirements for ``dstrepo``
474 path: store file being examined
480 path: store file being examined
475 mode: the ``ST_MODE`` file type of ``path``
481 mode: the ``ST_MODE`` file type of ``path``
476 st: ``stat`` data structure for ``path``
482 st: ``stat`` data structure for ``path``
477
483
478 Function should return ``True`` if the file is to be copied.
484 Function should return ``True`` if the file is to be copied.
479 """
485 """
480 # Skip revlogs.
486 # Skip revlogs.
481 if path.endswith(('.i', '.d')):
487 if path.endswith(('.i', '.d')):
482 return False
488 return False
483 # Skip transaction related files.
489 # Skip transaction related files.
484 if path.startswith('undo'):
490 if path.startswith('undo'):
485 return False
491 return False
486 # Only copy regular files.
492 # Only copy regular files.
487 if mode != stat.S_IFREG:
493 if mode != stat.S_IFREG:
488 return False
494 return False
489 # Skip other skipped files.
495 # Skip other skipped files.
490 if path in ('lock', 'fncache'):
496 if path in ('lock', 'fncache'):
491 return False
497 return False
492
498
493 return True
499 return True
494
500
495 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
501 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
496 """Hook point for extensions to perform additional actions during upgrade.
502 """Hook point for extensions to perform additional actions during upgrade.
497
503
498 This function is called after revlogs and store files have been copied but
504 This function is called after revlogs and store files have been copied but
499 before the new store is swapped into the original location.
505 before the new store is swapped into the original location.
500 """
506 """
501
507
502 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
508 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
503 """Do the low-level work of upgrading a repository.
509 """Do the low-level work of upgrading a repository.
504
510
505 The upgrade is effectively performed as a copy between a source
511 The upgrade is effectively performed as a copy between a source
506 repository and a temporary destination repository.
512 repository and a temporary destination repository.
507
513
508 The source repository is unmodified for as long as possible so the
514 The source repository is unmodified for as long as possible so the
509 upgrade can abort at any time without causing loss of service for
515 upgrade can abort at any time without causing loss of service for
510 readers and without corrupting the source repository.
516 readers and without corrupting the source repository.
511 """
517 """
512 assert srcrepo.currentwlock()
518 assert srcrepo.currentwlock()
513 assert dstrepo.currentwlock()
519 assert dstrepo.currentwlock()
514
520
515 ui.write(_('(it is safe to interrupt this process any time before '
521 ui.write(_('(it is safe to interrupt this process any time before '
516 'data migration completes)\n'))
522 'data migration completes)\n'))
517
523
518 if 'redeltaall' in actions:
524 if 'redeltaall' in actions:
519 deltareuse = revlog.revlog.DELTAREUSENEVER
525 deltareuse = revlog.revlog.DELTAREUSENEVER
520 elif 'redeltaparent' in actions:
526 elif 'redeltaparent' in actions:
521 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
527 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
522 elif 'redeltamultibase' in actions:
528 elif 'redeltamultibase' in actions:
523 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
529 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
524 else:
530 else:
525 deltareuse = revlog.revlog.DELTAREUSEALWAYS
531 deltareuse = revlog.revlog.DELTAREUSEALWAYS
526
532
527 with dstrepo.transaction('upgrade') as tr:
533 with dstrepo.transaction('upgrade') as tr:
528 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
534 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
529 'redeltamultibase' in actions)
535 'redeltamultibase' in actions)
530
536
531 # Now copy other files in the store directory.
537 # Now copy other files in the store directory.
532 # The sorted() makes execution deterministic.
538 # The sorted() makes execution deterministic.
533 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
539 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
534 if not _filterstorefile(srcrepo, dstrepo, requirements,
540 if not _filterstorefile(srcrepo, dstrepo, requirements,
535 p, kind, st):
541 p, kind, st):
536 continue
542 continue
537
543
538 srcrepo.ui.write(_('copying %s\n') % p)
544 srcrepo.ui.write(_('copying %s\n') % p)
539 src = srcrepo.store.rawvfs.join(p)
545 src = srcrepo.store.rawvfs.join(p)
540 dst = dstrepo.store.rawvfs.join(p)
546 dst = dstrepo.store.rawvfs.join(p)
541 util.copyfile(src, dst, copystat=True)
547 util.copyfile(src, dst, copystat=True)
542
548
543 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
549 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
544
550
545 ui.write(_('data fully migrated to temporary repository\n'))
551 ui.write(_('data fully migrated to temporary repository\n'))
546
552
547 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
553 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
548 backupvfs = vfsmod.vfs(backuppath)
554 backupvfs = vfsmod.vfs(backuppath)
549
555
550 # Make a backup of requires file first, as it is the first to be modified.
556 # Make a backup of requires file first, as it is the first to be modified.
551 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
557 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
552
558
553 # We install an arbitrary requirement that clients must not support
559 # We install an arbitrary requirement that clients must not support
554 # as a mechanism to lock out new clients during the data swap. This is
560 # as a mechanism to lock out new clients during the data swap. This is
555 # better than allowing a client to continue while the repository is in
561 # better than allowing a client to continue while the repository is in
556 # an inconsistent state.
562 # an inconsistent state.
557 ui.write(_('marking source repository as being upgraded; clients will be '
563 ui.write(_('marking source repository as being upgraded; clients will be '
558 'unable to read from repository\n'))
564 'unable to read from repository\n'))
559 scmutil.writerequires(srcrepo.vfs,
565 scmutil.writerequires(srcrepo.vfs,
560 srcrepo.requirements | set(['upgradeinprogress']))
566 srcrepo.requirements | set(['upgradeinprogress']))
561
567
562 ui.write(_('starting in-place swap of repository data\n'))
568 ui.write(_('starting in-place swap of repository data\n'))
563 ui.write(_('replaced files will be backed up at %s\n') %
569 ui.write(_('replaced files will be backed up at %s\n') %
564 backuppath)
570 backuppath)
565
571
566 # Now swap in the new store directory. Doing it as a rename should make
572 # Now swap in the new store directory. Doing it as a rename should make
567 # the operation nearly instantaneous and atomic (at least in well-behaved
573 # the operation nearly instantaneous and atomic (at least in well-behaved
568 # environments).
574 # environments).
569 ui.write(_('replacing store...\n'))
575 ui.write(_('replacing store...\n'))
570 tstart = util.timer()
576 tstart = util.timer()
571 util.rename(srcrepo.spath, backupvfs.join('store'))
577 util.rename(srcrepo.spath, backupvfs.join('store'))
572 util.rename(dstrepo.spath, srcrepo.spath)
578 util.rename(dstrepo.spath, srcrepo.spath)
573 elapsed = util.timer() - tstart
579 elapsed = util.timer() - tstart
574 ui.write(_('store replacement complete; repository was inconsistent for '
580 ui.write(_('store replacement complete; repository was inconsistent for '
575 '%0.1fs\n') % elapsed)
581 '%0.1fs\n') % elapsed)
576
582
577 # We first write the requirements file. Any new requirements will lock
583 # We first write the requirements file. Any new requirements will lock
578 # out legacy clients.
584 # out legacy clients.
579 ui.write(_('finalizing requirements file and making repository readable '
585 ui.write(_('finalizing requirements file and making repository readable '
580 'again\n'))
586 'again\n'))
581 scmutil.writerequires(srcrepo.vfs, requirements)
587 scmutil.writerequires(srcrepo.vfs, requirements)
582
588
583 # The lock file from the old store won't be removed because nothing has a
589 # The lock file from the old store won't be removed because nothing has a
584 # reference to its new location. So clean it up manually. Alternatively, we
590 # reference to its new location. So clean it up manually. Alternatively, we
585 # could update srcrepo.svfs and other variables to point to the new
591 # could update srcrepo.svfs and other variables to point to the new
586 # location. This is simpler.
592 # location. This is simpler.
587 backupvfs.unlink('store/lock')
593 backupvfs.unlink('store/lock')
588
594
589 return backuppath
595 return backuppath
590
596
591 def upgraderepo(ui, repo, run=False, optimize=None):
597 def upgraderepo(ui, repo, run=False, optimize=None):
592 """Upgrade a repository in place."""
598 """Upgrade a repository in place."""
593 optimize = set(optimize or [])
599 optimize = set(optimize or [])
594 repo = repo.unfiltered()
600 repo = repo.unfiltered()
595
601
596 # Ensure the repository can be upgraded.
602 # Ensure the repository can be upgraded.
597 missingreqs = requiredsourcerequirements(repo) - repo.requirements
603 missingreqs = requiredsourcerequirements(repo) - repo.requirements
598 if missingreqs:
604 if missingreqs:
599 raise error.Abort(_('cannot upgrade repository; requirement '
605 raise error.Abort(_('cannot upgrade repository; requirement '
600 'missing: %s') % _(', ').join(sorted(missingreqs)))
606 'missing: %s') % _(', ').join(sorted(missingreqs)))
601
607
602 blockedreqs = blocksourcerequirements(repo) & repo.requirements
608 blockedreqs = blocksourcerequirements(repo) & repo.requirements
603 if blockedreqs:
609 if blockedreqs:
604 raise error.Abort(_('cannot upgrade repository; unsupported source '
610 raise error.Abort(_('cannot upgrade repository; unsupported source '
605 'requirement: %s') %
611 'requirement: %s') %
606 _(', ').join(sorted(blockedreqs)))
612 _(', ').join(sorted(blockedreqs)))
607
613
608 # FUTURE there is potentially a need to control the wanted requirements via
614 # FUTURE there is potentially a need to control the wanted requirements via
609 # command arguments or via an extension hook point.
615 # command arguments or via an extension hook point.
610 newreqs = localrepo.newreporequirements(repo)
616 newreqs = localrepo.newreporequirements(repo)
611
617
612 noremovereqs = (repo.requirements - newreqs -
618 noremovereqs = (repo.requirements - newreqs -
613 supportremovedrequirements(repo))
619 supportremovedrequirements(repo))
614 if noremovereqs:
620 if noremovereqs:
615 raise error.Abort(_('cannot upgrade repository; requirement would be '
621 raise error.Abort(_('cannot upgrade repository; requirement would be '
616 'removed: %s') % _(', ').join(sorted(noremovereqs)))
622 'removed: %s') % _(', ').join(sorted(noremovereqs)))
617
623
618 noaddreqs = (newreqs - repo.requirements -
624 noaddreqs = (newreqs - repo.requirements -
619 allowednewrequirements(repo))
625 allowednewrequirements(repo))
620 if noaddreqs:
626 if noaddreqs:
621 raise error.Abort(_('cannot upgrade repository; do not support adding '
627 raise error.Abort(_('cannot upgrade repository; do not support adding '
622 'requirement: %s') %
628 'requirement: %s') %
623 _(', ').join(sorted(noaddreqs)))
629 _(', ').join(sorted(noaddreqs)))
624
630
625 unsupportedreqs = newreqs - supporteddestrequirements(repo)
631 unsupportedreqs = newreqs - supporteddestrequirements(repo)
626 if unsupportedreqs:
632 if unsupportedreqs:
627 raise error.Abort(_('cannot upgrade repository; do not support '
633 raise error.Abort(_('cannot upgrade repository; do not support '
628 'destination requirement: %s') %
634 'destination requirement: %s') %
629 _(', ').join(sorted(unsupportedreqs)))
635 _(', ').join(sorted(unsupportedreqs)))
630
636
631 # Find and validate all improvements that can be made.
637 # Find and validate all improvements that can be made.
632 alloptimizations = findoptimizations(repo)
638 alloptimizations = findoptimizations(repo)
633
639
634 # Apply and Validate arguments.
640 # Apply and Validate arguments.
635 optimizations = []
641 optimizations = []
636 for o in alloptimizations:
642 for o in alloptimizations:
637 if o.name in optimize:
643 if o.name in optimize:
638 optimizations.append(o)
644 optimizations.append(o)
639 optimize.discard(o.name)
645 optimize.discard(o.name)
640
646
641 if optimize: # anything left is unknown
647 if optimize: # anything left is unknown
642 raise error.Abort(_('unknown optimization action requested: %s') %
648 raise error.Abort(_('unknown optimization action requested: %s') %
643 ', '.join(sorted(optimize)),
649 ', '.join(sorted(optimize)),
644 hint=_('run without arguments to see valid '
650 hint=_('run without arguments to see valid '
645 'optimizations'))
651 'optimizations'))
646
652
647 deficiencies = finddeficiencies(repo)
653 deficiencies = finddeficiencies(repo)
648 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
654 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
649 actions.extend(o for o in sorted(optimizations)
655 actions.extend(o for o in sorted(optimizations)
650 # determineactions could have added optimisation
656 # determineactions could have added optimisation
651 if o not in actions)
657 if o not in actions)
652
658
653 def printrequirements():
659 def printrequirements():
654 ui.write(_('requirements\n'))
660 ui.write(_('requirements\n'))
655 ui.write(_(' preserved: %s\n') %
661 ui.write(_(' preserved: %s\n') %
656 _(', ').join(sorted(newreqs & repo.requirements)))
662 _(', ').join(sorted(newreqs & repo.requirements)))
657
663
658 if repo.requirements - newreqs:
664 if repo.requirements - newreqs:
659 ui.write(_(' removed: %s\n') %
665 ui.write(_(' removed: %s\n') %
660 _(', ').join(sorted(repo.requirements - newreqs)))
666 _(', ').join(sorted(repo.requirements - newreqs)))
661
667
662 if newreqs - repo.requirements:
668 if newreqs - repo.requirements:
663 ui.write(_(' added: %s\n') %
669 ui.write(_(' added: %s\n') %
664 _(', ').join(sorted(newreqs - repo.requirements)))
670 _(', ').join(sorted(newreqs - repo.requirements)))
665
671
666 ui.write('\n')
672 ui.write('\n')
667
673
668 def printupgradeactions():
674 def printupgradeactions():
669 for a in actions:
675 for a in actions:
670 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
676 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
671
677
672 if not run:
678 if not run:
673 fromconfig = []
679 fromconfig = []
674 onlydefault = []
680 onlydefault = []
675
681
676 for d in deficiencies:
682 for d in deficiencies:
677 if d.fromconfig:
683 if d.fromconfig:
678 fromconfig.append(d)
684 fromconfig.append(d)
679 elif d.fromdefault:
685 elif d.fromdefault:
680 onlydefault.append(d)
686 onlydefault.append(d)
681
687
682 if fromconfig or onlydefault:
688 if fromconfig or onlydefault:
683
689
684 if fromconfig:
690 if fromconfig:
685 ui.write(_('repository lacks features recommended by '
691 ui.write(_('repository lacks features recommended by '
686 'current config options:\n\n'))
692 'current config options:\n\n'))
687 for i in fromconfig:
693 for i in fromconfig:
688 ui.write('%s\n %s\n\n' % (i.name, i.description))
694 ui.write('%s\n %s\n\n' % (i.name, i.description))
689
695
690 if onlydefault:
696 if onlydefault:
691 ui.write(_('repository lacks features used by the default '
697 ui.write(_('repository lacks features used by the default '
692 'config options:\n\n'))
698 'config options:\n\n'))
693 for i in onlydefault:
699 for i in onlydefault:
694 ui.write('%s\n %s\n\n' % (i.name, i.description))
700 ui.write('%s\n %s\n\n' % (i.name, i.description))
695
701
696 ui.write('\n')
702 ui.write('\n')
697 else:
703 else:
698 ui.write(_('(no feature deficiencies found in existing '
704 ui.write(_('(no feature deficiencies found in existing '
699 'repository)\n'))
705 'repository)\n'))
700
706
701 ui.write(_('performing an upgrade with "--run" will make the following '
707 ui.write(_('performing an upgrade with "--run" will make the following '
702 'changes:\n\n'))
708 'changes:\n\n'))
703
709
704 printrequirements()
710 printrequirements()
705 printupgradeactions()
711 printupgradeactions()
706
712
707 unusedoptimize = [i for i in alloptimizations if i not in actions]
713 unusedoptimize = [i for i in alloptimizations if i not in actions]
708
714
709 if unusedoptimize:
715 if unusedoptimize:
710 ui.write(_('additional optimizations are available by specifying '
716 ui.write(_('additional optimizations are available by specifying '
711 '"--optimize <name>":\n\n'))
717 '"--optimize <name>":\n\n'))
712 for i in unusedoptimize:
718 for i in unusedoptimize:
713 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
719 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
714 return
720 return
715
721
716 # Else we're in the run=true case.
722 # Else we're in the run=true case.
717 ui.write(_('upgrade will perform the following actions:\n\n'))
723 ui.write(_('upgrade will perform the following actions:\n\n'))
718 printrequirements()
724 printrequirements()
719 printupgradeactions()
725 printupgradeactions()
720
726
721 upgradeactions = [a.name for a in actions]
727 upgradeactions = [a.name for a in actions]
722
728
723 ui.write(_('beginning upgrade...\n'))
729 ui.write(_('beginning upgrade...\n'))
724 with repo.wlock():
730 with repo.wlock():
725 with repo.lock():
731 with repo.lock():
726 ui.write(_('repository locked and read-only\n'))
732 ui.write(_('repository locked and read-only\n'))
727 # Our strategy for upgrading the repository is to create a new,
733 # Our strategy for upgrading the repository is to create a new,
728 # temporary repository, write data to it, then do a swap of the
734 # temporary repository, write data to it, then do a swap of the
729 # data. There are less heavyweight ways to do this, but it is easier
735 # data. There are less heavyweight ways to do this, but it is easier
730 # to create a new repo object than to instantiate all the components
736 # to create a new repo object than to instantiate all the components
731 # (like the store) separately.
737 # (like the store) separately.
732 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
738 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
733 backuppath = None
739 backuppath = None
734 try:
740 try:
735 ui.write(_('creating temporary repository to stage migrated '
741 ui.write(_('creating temporary repository to stage migrated '
736 'data: %s\n') % tmppath)
742 'data: %s\n') % tmppath)
737 dstrepo = localrepo.localrepository(repo.baseui,
743 dstrepo = localrepo.localrepository(repo.baseui,
738 path=tmppath,
744 path=tmppath,
739 create=True)
745 create=True)
740
746
741 with dstrepo.wlock():
747 with dstrepo.wlock():
742 with dstrepo.lock():
748 with dstrepo.lock():
743 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
749 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
744 upgradeactions)
750 upgradeactions)
745
751
746 finally:
752 finally:
747 ui.write(_('removing temporary repository %s\n') % tmppath)
753 ui.write(_('removing temporary repository %s\n') % tmppath)
748 repo.vfs.rmtree(tmppath, forcibly=True)
754 repo.vfs.rmtree(tmppath, forcibly=True)
749
755
750 if backuppath:
756 if backuppath:
751 ui.warn(_('copy of old repository backed up at %s\n') %
757 ui.warn(_('copy of old repository backed up at %s\n') %
752 backuppath)
758 backuppath)
753 ui.warn(_('the old repository will not be deleted; remove '
759 ui.warn(_('the old repository will not be deleted; remove '
754 'it to free up disk space once the upgraded '
760 'it to free up disk space once the upgraded '
755 'repository is verified\n'))
761 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now