##// END OF EJS Templates
upgrade: drop the prefix to the '_finishdatamigration' function...
Pierre-Yves David -
r31874:27ec6517 default
parent child Browse files
Show More
@@ -1,758 +1,758
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import stat
11 import stat
12 import tempfile
12 import tempfile
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 manifest,
18 manifest,
19 revlog,
19 revlog,
20 scmutil,
20 scmutil,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 def requiredsourcerequirements(repo):
25 def requiredsourcerequirements(repo):
26 """Obtain requirements required to be present to upgrade a repo.
26 """Obtain requirements required to be present to upgrade a repo.
27
27
28 An upgrade will not be allowed if the repository doesn't have the
28 An upgrade will not be allowed if the repository doesn't have the
29 requirements returned by this function.
29 requirements returned by this function.
30 """
30 """
31 return set([
31 return set([
32 # Introduced in Mercurial 0.9.2.
32 # Introduced in Mercurial 0.9.2.
33 'revlogv1',
33 'revlogv1',
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'store',
35 'store',
36 ])
36 ])
37
37
38 def blocksourcerequirements(repo):
38 def blocksourcerequirements(repo):
39 """Obtain requirements that will prevent an upgrade from occurring.
39 """Obtain requirements that will prevent an upgrade from occurring.
40
40
41 An upgrade cannot be performed if the source repository contains a
41 An upgrade cannot be performed if the source repository contains a
42 requirements in the returned set.
42 requirements in the returned set.
43 """
43 """
44 return set([
44 return set([
45 # The upgrade code does not yet support these experimental features.
45 # The upgrade code does not yet support these experimental features.
46 # This is an artificial limitation.
46 # This is an artificial limitation.
47 'manifestv2',
47 'manifestv2',
48 'treemanifest',
48 'treemanifest',
49 # This was a precursor to generaldelta and was never enabled by default.
49 # This was a precursor to generaldelta and was never enabled by default.
50 # It should (hopefully) not exist in the wild.
50 # It should (hopefully) not exist in the wild.
51 'parentdelta',
51 'parentdelta',
52 # Upgrade should operate on the actual store, not the shared link.
52 # Upgrade should operate on the actual store, not the shared link.
53 'shared',
53 'shared',
54 ])
54 ])
55
55
56 def supportremovedrequirements(repo):
56 def supportremovedrequirements(repo):
57 """Obtain requirements that can be removed during an upgrade.
57 """Obtain requirements that can be removed during an upgrade.
58
58
59 If an upgrade were to create a repository that dropped a requirement,
59 If an upgrade were to create a repository that dropped a requirement,
60 the dropped requirement must appear in the returned set for the upgrade
60 the dropped requirement must appear in the returned set for the upgrade
61 to be allowed.
61 to be allowed.
62 """
62 """
63 return set()
63 return set()
64
64
65 def supporteddestrequirements(repo):
65 def supporteddestrequirements(repo):
66 """Obtain requirements that upgrade supports in the destination.
66 """Obtain requirements that upgrade supports in the destination.
67
67
68 If the result of the upgrade would create requirements not in this set,
68 If the result of the upgrade would create requirements not in this set,
69 the upgrade is disallowed.
69 the upgrade is disallowed.
70
70
71 Extensions should monkeypatch this to add their custom requirements.
71 Extensions should monkeypatch this to add their custom requirements.
72 """
72 """
73 return set([
73 return set([
74 'dotencode',
74 'dotencode',
75 'fncache',
75 'fncache',
76 'generaldelta',
76 'generaldelta',
77 'revlogv1',
77 'revlogv1',
78 'store',
78 'store',
79 ])
79 ])
80
80
81 def allowednewrequirements(repo):
81 def allowednewrequirements(repo):
82 """Obtain requirements that can be added to a repository during upgrade.
82 """Obtain requirements that can be added to a repository during upgrade.
83
83
84 This is used to disallow proposed requirements from being added when
84 This is used to disallow proposed requirements from being added when
85 they weren't present before.
85 they weren't present before.
86
86
87 We use a list of allowed requirement additions instead of a list of known
87 We use a list of allowed requirement additions instead of a list of known
88 bad additions because the whitelist approach is safer and will prevent
88 bad additions because the whitelist approach is safer and will prevent
89 future, unknown requirements from accidentally being added.
89 future, unknown requirements from accidentally being added.
90 """
90 """
91 return set([
91 return set([
92 'dotencode',
92 'dotencode',
93 'fncache',
93 'fncache',
94 'generaldelta',
94 'generaldelta',
95 ])
95 ])
96
96
97 deficiency = 'deficiency'
97 deficiency = 'deficiency'
98 optimisation = 'optimization'
98 optimisation = 'optimization'
99
99
100 class improvement(object):
100 class improvement(object):
101 """Represents an improvement that can be made as part of an upgrade.
101 """Represents an improvement that can be made as part of an upgrade.
102
102
103 The following attributes are defined on each instance:
103 The following attributes are defined on each instance:
104
104
105 name
105 name
106 Machine-readable string uniquely identifying this improvement. It
106 Machine-readable string uniquely identifying this improvement. It
107 will be mapped to an action later in the upgrade process.
107 will be mapped to an action later in the upgrade process.
108
108
109 type
109 type
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
110 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
111 problem. An optimization is an action (sometimes optional) that
111 problem. An optimization is an action (sometimes optional) that
112 can be taken to further improve the state of the repository.
112 can be taken to further improve the state of the repository.
113
113
114 description
114 description
115 Message intended for humans explaining the improvement in more detail,
115 Message intended for humans explaining the improvement in more detail,
116 including the implications of it. For ``deficiency`` types, should be
116 including the implications of it. For ``deficiency`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
117 worded in the present tense. For ``optimisation`` types, should be
118 worded in the future tense.
118 worded in the future tense.
119
119
120 upgrademessage
120 upgrademessage
121 Message intended for humans explaining what an upgrade addressing this
121 Message intended for humans explaining what an upgrade addressing this
122 issue will do. Should be worded in the future tense.
122 issue will do. Should be worded in the future tense.
123
123
124 fromdefault (``deficiency`` types only)
124 fromdefault (``deficiency`` types only)
125 Boolean indicating whether the current (deficient) state deviates
125 Boolean indicating whether the current (deficient) state deviates
126 from Mercurial's default configuration.
126 from Mercurial's default configuration.
127
127
128 fromconfig (``deficiency`` types only)
128 fromconfig (``deficiency`` types only)
129 Boolean indicating whether the current (deficient) state deviates
129 Boolean indicating whether the current (deficient) state deviates
130 from the current Mercurial configuration.
130 from the current Mercurial configuration.
131 """
131 """
132 def __init__(self, name, type, description, upgrademessage, **kwargs):
132 def __init__(self, name, type, description, upgrademessage, **kwargs):
133 self.name = name
133 self.name = name
134 self.type = type
134 self.type = type
135 self.description = description
135 self.description = description
136 self.upgrademessage = upgrademessage
136 self.upgrademessage = upgrademessage
137
137
138 for k, v in kwargs.items():
138 for k, v in kwargs.items():
139 setattr(self, k, v)
139 setattr(self, k, v)
140
140
141 def findimprovements(repo):
141 def findimprovements(repo):
142 """Determine improvements that can be made to the repo during upgrade.
142 """Determine improvements that can be made to the repo during upgrade.
143
143
144 Returns a list of ``upgradeimprovement`` describing repository deficiencies
144 Returns a list of ``upgradeimprovement`` describing repository deficiencies
145 and optimizations.
145 and optimizations.
146 """
146 """
147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
147 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
148 from . import localrepo
148 from . import localrepo
149
149
150 newreporeqs = localrepo.newreporequirements(repo)
150 newreporeqs = localrepo.newreporequirements(repo)
151
151
152 improvements = []
152 improvements = []
153
153
154 # We could detect lack of revlogv1 and store here, but they were added
154 # We could detect lack of revlogv1 and store here, but they were added
155 # in 0.9.2 and we don't support upgrading repos without these
155 # in 0.9.2 and we don't support upgrading repos without these
156 # requirements, so let's not bother.
156 # requirements, so let's not bother.
157
157
158 if 'fncache' not in repo.requirements:
158 if 'fncache' not in repo.requirements:
159 improvements.append(improvement(
159 improvements.append(improvement(
160 name='fncache',
160 name='fncache',
161 type=deficiency,
161 type=deficiency,
162 description=_('long and reserved filenames may not work correctly; '
162 description=_('long and reserved filenames may not work correctly; '
163 'repository performance is sub-optimal'),
163 'repository performance is sub-optimal'),
164 upgrademessage=_('repository will be more resilient to storing '
164 upgrademessage=_('repository will be more resilient to storing '
165 'certain paths and performance of certain '
165 'certain paths and performance of certain '
166 'operations should be improved'),
166 'operations should be improved'),
167 fromdefault=True,
167 fromdefault=True,
168 fromconfig='fncache' in newreporeqs))
168 fromconfig='fncache' in newreporeqs))
169
169
170 if 'dotencode' not in repo.requirements:
170 if 'dotencode' not in repo.requirements:
171 improvements.append(improvement(
171 improvements.append(improvement(
172 name='dotencode',
172 name='dotencode',
173 type=deficiency,
173 type=deficiency,
174 description=_('storage of filenames beginning with a period or '
174 description=_('storage of filenames beginning with a period or '
175 'space may not work correctly'),
175 'space may not work correctly'),
176 upgrademessage=_('repository will be better able to store files '
176 upgrademessage=_('repository will be better able to store files '
177 'beginning with a space or period'),
177 'beginning with a space or period'),
178 fromdefault=True,
178 fromdefault=True,
179 fromconfig='dotencode' in newreporeqs))
179 fromconfig='dotencode' in newreporeqs))
180
180
181 if 'generaldelta' not in repo.requirements:
181 if 'generaldelta' not in repo.requirements:
182 improvements.append(improvement(
182 improvements.append(improvement(
183 name='generaldelta',
183 name='generaldelta',
184 type=deficiency,
184 type=deficiency,
185 description=_('deltas within internal storage are unable to '
185 description=_('deltas within internal storage are unable to '
186 'choose optimal revisions; repository is larger and '
186 'choose optimal revisions; repository is larger and '
187 'slower than it could be; interaction with other '
187 'slower than it could be; interaction with other '
188 'repositories may require extra network and CPU '
188 'repositories may require extra network and CPU '
189 'resources, making "hg push" and "hg pull" slower'),
189 'resources, making "hg push" and "hg pull" slower'),
190 upgrademessage=_('repository storage will be able to create '
190 upgrademessage=_('repository storage will be able to create '
191 'optimal deltas; new repository data will be '
191 'optimal deltas; new repository data will be '
192 'smaller and read times should decrease; '
192 'smaller and read times should decrease; '
193 'interacting with other repositories using this '
193 'interacting with other repositories using this '
194 'storage model should require less network and '
194 'storage model should require less network and '
195 'CPU resources, making "hg push" and "hg pull" '
195 'CPU resources, making "hg push" and "hg pull" '
196 'faster'),
196 'faster'),
197 fromdefault=True,
197 fromdefault=True,
198 fromconfig='generaldelta' in newreporeqs))
198 fromconfig='generaldelta' in newreporeqs))
199
199
200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
200 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
201 # changelogs with deltas.
201 # changelogs with deltas.
202 cl = repo.changelog
202 cl = repo.changelog
203 for rev in cl:
203 for rev in cl:
204 chainbase = cl.chainbase(rev)
204 chainbase = cl.chainbase(rev)
205 if chainbase != rev:
205 if chainbase != rev:
206 improvements.append(improvement(
206 improvements.append(improvement(
207 name='removecldeltachain',
207 name='removecldeltachain',
208 type=deficiency,
208 type=deficiency,
209 description=_('changelog storage is using deltas instead of '
209 description=_('changelog storage is using deltas instead of '
210 'raw entries; changelog reading and any '
210 'raw entries; changelog reading and any '
211 'operation relying on changelog data are slower '
211 'operation relying on changelog data are slower '
212 'than they could be'),
212 'than they could be'),
213 upgrademessage=_('changelog storage will be reformated to '
213 upgrademessage=_('changelog storage will be reformated to '
214 'store raw entries; changelog reading will be '
214 'store raw entries; changelog reading will be '
215 'faster; changelog size may be reduced'),
215 'faster; changelog size may be reduced'),
216 fromdefault=True,
216 fromdefault=True,
217 fromconfig=True))
217 fromconfig=True))
218 break
218 break
219
219
220 # Now for the optimizations.
220 # Now for the optimizations.
221
221
222 # These are unconditionally added. There is logic later that figures out
222 # These are unconditionally added. There is logic later that figures out
223 # which ones to apply.
223 # which ones to apply.
224
224
225 improvements.append(improvement(
225 improvements.append(improvement(
226 name='redeltaparent',
226 name='redeltaparent',
227 type=optimisation,
227 type=optimisation,
228 description=_('deltas within internal storage will be recalculated to '
228 description=_('deltas within internal storage will be recalculated to '
229 'choose an optimal base revision where this was not '
229 'choose an optimal base revision where this was not '
230 'already done; the size of the repository may shrink and '
230 'already done; the size of the repository may shrink and '
231 'various operations may become faster; the first time '
231 'various operations may become faster; the first time '
232 'this optimization is performed could slow down upgrade '
232 'this optimization is performed could slow down upgrade '
233 'execution considerably; subsequent invocations should '
233 'execution considerably; subsequent invocations should '
234 'not run noticeably slower'),
234 'not run noticeably slower'),
235 upgrademessage=_('deltas within internal storage will choose a new '
235 upgrademessage=_('deltas within internal storage will choose a new '
236 'base revision if needed')))
236 'base revision if needed')))
237
237
238 improvements.append(improvement(
238 improvements.append(improvement(
239 name='redeltamultibase',
239 name='redeltamultibase',
240 type=optimisation,
240 type=optimisation,
241 description=_('deltas within internal storage will be recalculated '
241 description=_('deltas within internal storage will be recalculated '
242 'against multiple base revision and the smallest '
242 'against multiple base revision and the smallest '
243 'difference will be used; the size of the repository may '
243 'difference will be used; the size of the repository may '
244 'shrink significantly when there are many merges; this '
244 'shrink significantly when there are many merges; this '
245 'optimization will slow down execution in proportion to '
245 'optimization will slow down execution in proportion to '
246 'the number of merges in the repository and the amount '
246 'the number of merges in the repository and the amount '
247 'of files in the repository; this slow down should not '
247 'of files in the repository; this slow down should not '
248 'be significant unless there are tens of thousands of '
248 'be significant unless there are tens of thousands of '
249 'files and thousands of merges'),
249 'files and thousands of merges'),
250 upgrademessage=_('deltas within internal storage will choose an '
250 upgrademessage=_('deltas within internal storage will choose an '
251 'optimal delta by computing deltas against multiple '
251 'optimal delta by computing deltas against multiple '
252 'parents; may slow down execution time '
252 'parents; may slow down execution time '
253 'significantly')))
253 'significantly')))
254
254
255 improvements.append(improvement(
255 improvements.append(improvement(
256 name='redeltaall',
256 name='redeltaall',
257 type=optimisation,
257 type=optimisation,
258 description=_('deltas within internal storage will always be '
258 description=_('deltas within internal storage will always be '
259 'recalculated without reusing prior deltas; this will '
259 'recalculated without reusing prior deltas; this will '
260 'likely make execution run several times slower; this '
260 'likely make execution run several times slower; this '
261 'optimization is typically not needed'),
261 'optimization is typically not needed'),
262 upgrademessage=_('deltas within internal storage will be fully '
262 upgrademessage=_('deltas within internal storage will be fully '
263 'recomputed; this will likely drastically slow down '
263 'recomputed; this will likely drastically slow down '
264 'execution time')))
264 'execution time')))
265
265
266 return improvements
266 return improvements
267
267
268 def determineactions(repo, improvements, sourcereqs, destreqs,
268 def determineactions(repo, improvements, sourcereqs, destreqs,
269 optimize):
269 optimize):
270 """Determine upgrade actions that will be performed.
270 """Determine upgrade actions that will be performed.
271
271
272 Given a list of improvements as returned by ``upgradefindimprovements``,
272 Given a list of improvements as returned by ``upgradefindimprovements``,
273 determine the list of upgrade actions that will be performed.
273 determine the list of upgrade actions that will be performed.
274
274
275 The role of this function is to filter improvements if needed, apply
275 The role of this function is to filter improvements if needed, apply
276 recommended optimizations from the improvements list that make sense,
276 recommended optimizations from the improvements list that make sense,
277 etc.
277 etc.
278
278
279 Returns a list of action names.
279 Returns a list of action names.
280 """
280 """
281 newactions = []
281 newactions = []
282
282
283 knownreqs = supporteddestrequirements(repo)
283 knownreqs = supporteddestrequirements(repo)
284
284
285 for i in improvements:
285 for i in improvements:
286 name = i.name
286 name = i.name
287
287
288 # If the action is a requirement that doesn't show up in the
288 # If the action is a requirement that doesn't show up in the
289 # destination requirements, prune the action.
289 # destination requirements, prune the action.
290 if name in knownreqs and name not in destreqs:
290 if name in knownreqs and name not in destreqs:
291 continue
291 continue
292
292
293 if i.type == deficiency:
293 if i.type == deficiency:
294 newactions.append(name)
294 newactions.append(name)
295
295
296 newactions.extend(o for o in sorted(optimize) if o not in newactions)
296 newactions.extend(o for o in sorted(optimize) if o not in newactions)
297
297
298 # FUTURE consider adding some optimizations here for certain transitions.
298 # FUTURE consider adding some optimizations here for certain transitions.
299 # e.g. adding generaldelta could schedule parent redeltas.
299 # e.g. adding generaldelta could schedule parent redeltas.
300
300
301 return newactions
301 return newactions
302
302
303 def _revlogfrompath(repo, path):
303 def _revlogfrompath(repo, path):
304 """Obtain a revlog from a repo path.
304 """Obtain a revlog from a repo path.
305
305
306 An instance of the appropriate class is returned.
306 An instance of the appropriate class is returned.
307 """
307 """
308 if path == '00changelog.i':
308 if path == '00changelog.i':
309 return changelog.changelog(repo.svfs)
309 return changelog.changelog(repo.svfs)
310 elif path.endswith('00manifest.i'):
310 elif path.endswith('00manifest.i'):
311 mandir = path[:-len('00manifest.i')]
311 mandir = path[:-len('00manifest.i')]
312 return manifest.manifestrevlog(repo.svfs, dir=mandir)
312 return manifest.manifestrevlog(repo.svfs, dir=mandir)
313 else:
313 else:
314 # Filelogs don't do anything special with settings. So we can use a
314 # Filelogs don't do anything special with settings. So we can use a
315 # vanilla revlog.
315 # vanilla revlog.
316 return revlog.revlog(repo.svfs, path)
316 return revlog.revlog(repo.svfs, path)
317
317
318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
318 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
319 """Copy revlogs between 2 repos."""
319 """Copy revlogs between 2 repos."""
320 revcount = 0
320 revcount = 0
321 srcsize = 0
321 srcsize = 0
322 srcrawsize = 0
322 srcrawsize = 0
323 dstsize = 0
323 dstsize = 0
324 fcount = 0
324 fcount = 0
325 frevcount = 0
325 frevcount = 0
326 fsrcsize = 0
326 fsrcsize = 0
327 frawsize = 0
327 frawsize = 0
328 fdstsize = 0
328 fdstsize = 0
329 mcount = 0
329 mcount = 0
330 mrevcount = 0
330 mrevcount = 0
331 msrcsize = 0
331 msrcsize = 0
332 mrawsize = 0
332 mrawsize = 0
333 mdstsize = 0
333 mdstsize = 0
334 crevcount = 0
334 crevcount = 0
335 csrcsize = 0
335 csrcsize = 0
336 crawsize = 0
336 crawsize = 0
337 cdstsize = 0
337 cdstsize = 0
338
338
339 # Perform a pass to collect metadata. This validates we can open all
339 # Perform a pass to collect metadata. This validates we can open all
340 # source files and allows a unified progress bar to be displayed.
340 # source files and allows a unified progress bar to be displayed.
341 for unencoded, encoded, size in srcrepo.store.walk():
341 for unencoded, encoded, size in srcrepo.store.walk():
342 if unencoded.endswith('.d'):
342 if unencoded.endswith('.d'):
343 continue
343 continue
344
344
345 rl = _revlogfrompath(srcrepo, unencoded)
345 rl = _revlogfrompath(srcrepo, unencoded)
346 revcount += len(rl)
346 revcount += len(rl)
347
347
348 datasize = 0
348 datasize = 0
349 rawsize = 0
349 rawsize = 0
350 idx = rl.index
350 idx = rl.index
351 for rev in rl:
351 for rev in rl:
352 e = idx[rev]
352 e = idx[rev]
353 datasize += e[1]
353 datasize += e[1]
354 rawsize += e[2]
354 rawsize += e[2]
355
355
356 srcsize += datasize
356 srcsize += datasize
357 srcrawsize += rawsize
357 srcrawsize += rawsize
358
358
359 # This is for the separate progress bars.
359 # This is for the separate progress bars.
360 if isinstance(rl, changelog.changelog):
360 if isinstance(rl, changelog.changelog):
361 crevcount += len(rl)
361 crevcount += len(rl)
362 csrcsize += datasize
362 csrcsize += datasize
363 crawsize += rawsize
363 crawsize += rawsize
364 elif isinstance(rl, manifest.manifestrevlog):
364 elif isinstance(rl, manifest.manifestrevlog):
365 mcount += 1
365 mcount += 1
366 mrevcount += len(rl)
366 mrevcount += len(rl)
367 msrcsize += datasize
367 msrcsize += datasize
368 mrawsize += rawsize
368 mrawsize += rawsize
369 elif isinstance(rl, revlog.revlog):
369 elif isinstance(rl, revlog.revlog):
370 fcount += 1
370 fcount += 1
371 frevcount += len(rl)
371 frevcount += len(rl)
372 fsrcsize += datasize
372 fsrcsize += datasize
373 frawsize += rawsize
373 frawsize += rawsize
374
374
375 if not revcount:
375 if not revcount:
376 return
376 return
377
377
378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
378 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
379 '%d in changelog)\n') %
379 '%d in changelog)\n') %
380 (revcount, frevcount, mrevcount, crevcount))
380 (revcount, frevcount, mrevcount, crevcount))
381 ui.write(_('migrating %s in store; %s tracked data\n') % (
381 ui.write(_('migrating %s in store; %s tracked data\n') % (
382 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
382 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
383
383
384 # Used to keep track of progress.
384 # Used to keep track of progress.
385 progress = []
385 progress = []
386 def oncopiedrevision(rl, rev, node):
386 def oncopiedrevision(rl, rev, node):
387 progress[1] += 1
387 progress[1] += 1
388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
388 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
389
389
390 # Do the actual copying.
390 # Do the actual copying.
391 # FUTURE this operation can be farmed off to worker processes.
391 # FUTURE this operation can be farmed off to worker processes.
392 seen = set()
392 seen = set()
393 for unencoded, encoded, size in srcrepo.store.walk():
393 for unencoded, encoded, size in srcrepo.store.walk():
394 if unencoded.endswith('.d'):
394 if unencoded.endswith('.d'):
395 continue
395 continue
396
396
397 oldrl = _revlogfrompath(srcrepo, unencoded)
397 oldrl = _revlogfrompath(srcrepo, unencoded)
398 newrl = _revlogfrompath(dstrepo, unencoded)
398 newrl = _revlogfrompath(dstrepo, unencoded)
399
399
400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
400 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
401 ui.write(_('finished migrating %d manifest revisions across %d '
401 ui.write(_('finished migrating %d manifest revisions across %d '
402 'manifests; change in size: %s\n') %
402 'manifests; change in size: %s\n') %
403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
403 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
404
404
405 ui.write(_('migrating changelog containing %d revisions '
405 ui.write(_('migrating changelog containing %d revisions '
406 '(%s in store; %s tracked data)\n') %
406 '(%s in store; %s tracked data)\n') %
407 (crevcount, util.bytecount(csrcsize),
407 (crevcount, util.bytecount(csrcsize),
408 util.bytecount(crawsize)))
408 util.bytecount(crawsize)))
409 seen.add('c')
409 seen.add('c')
410 progress[:] = [_('changelog revisions'), 0, crevcount]
410 progress[:] = [_('changelog revisions'), 0, crevcount]
411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
411 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
412 ui.write(_('finished migrating %d filelog revisions across %d '
412 ui.write(_('finished migrating %d filelog revisions across %d '
413 'filelogs; change in size: %s\n') %
413 'filelogs; change in size: %s\n') %
414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
414 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
415
415
416 ui.write(_('migrating %d manifests containing %d revisions '
416 ui.write(_('migrating %d manifests containing %d revisions '
417 '(%s in store; %s tracked data)\n') %
417 '(%s in store; %s tracked data)\n') %
418 (mcount, mrevcount, util.bytecount(msrcsize),
418 (mcount, mrevcount, util.bytecount(msrcsize),
419 util.bytecount(mrawsize)))
419 util.bytecount(mrawsize)))
420 seen.add('m')
420 seen.add('m')
421 progress[:] = [_('manifest revisions'), 0, mrevcount]
421 progress[:] = [_('manifest revisions'), 0, mrevcount]
422 elif 'f' not in seen:
422 elif 'f' not in seen:
423 ui.write(_('migrating %d filelogs containing %d revisions '
423 ui.write(_('migrating %d filelogs containing %d revisions '
424 '(%s in store; %s tracked data)\n') %
424 '(%s in store; %s tracked data)\n') %
425 (fcount, frevcount, util.bytecount(fsrcsize),
425 (fcount, frevcount, util.bytecount(fsrcsize),
426 util.bytecount(frawsize)))
426 util.bytecount(frawsize)))
427 seen.add('f')
427 seen.add('f')
428 progress[:] = [_('file revisions'), 0, frevcount]
428 progress[:] = [_('file revisions'), 0, frevcount]
429
429
430 ui.progress(progress[0], progress[1], total=progress[2])
430 ui.progress(progress[0], progress[1], total=progress[2])
431
431
432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
432 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
433 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
434 deltareuse=deltareuse,
434 deltareuse=deltareuse,
435 aggressivemergedeltas=aggressivemergedeltas)
435 aggressivemergedeltas=aggressivemergedeltas)
436
436
437 datasize = 0
437 datasize = 0
438 idx = newrl.index
438 idx = newrl.index
439 for rev in newrl:
439 for rev in newrl:
440 datasize += idx[rev][1]
440 datasize += idx[rev][1]
441
441
442 dstsize += datasize
442 dstsize += datasize
443
443
444 if isinstance(newrl, changelog.changelog):
444 if isinstance(newrl, changelog.changelog):
445 cdstsize += datasize
445 cdstsize += datasize
446 elif isinstance(newrl, manifest.manifestrevlog):
446 elif isinstance(newrl, manifest.manifestrevlog):
447 mdstsize += datasize
447 mdstsize += datasize
448 else:
448 else:
449 fdstsize += datasize
449 fdstsize += datasize
450
450
451 ui.progress(progress[0], None)
451 ui.progress(progress[0], None)
452
452
453 ui.write(_('finished migrating %d changelog revisions; change in size: '
453 ui.write(_('finished migrating %d changelog revisions; change in size: '
454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
454 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
455
455
456 ui.write(_('finished migrating %d total revisions; total change in store '
456 ui.write(_('finished migrating %d total revisions; total change in store '
457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
457 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
458
458
459 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
459 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
460 """Determine whether to copy a store file during upgrade.
460 """Determine whether to copy a store file during upgrade.
461
461
462 This function is called when migrating store files from ``srcrepo`` to
462 This function is called when migrating store files from ``srcrepo`` to
463 ``dstrepo`` as part of upgrading a repository.
463 ``dstrepo`` as part of upgrading a repository.
464
464
465 Args:
465 Args:
466 srcrepo: repo we are copying from
466 srcrepo: repo we are copying from
467 dstrepo: repo we are copying to
467 dstrepo: repo we are copying to
468 requirements: set of requirements for ``dstrepo``
468 requirements: set of requirements for ``dstrepo``
469 path: store file being examined
469 path: store file being examined
470 mode: the ``ST_MODE`` file type of ``path``
470 mode: the ``ST_MODE`` file type of ``path``
471 st: ``stat`` data structure for ``path``
471 st: ``stat`` data structure for ``path``
472
472
473 Function should return ``True`` if the file is to be copied.
473 Function should return ``True`` if the file is to be copied.
474 """
474 """
475 # Skip revlogs.
475 # Skip revlogs.
476 if path.endswith(('.i', '.d')):
476 if path.endswith(('.i', '.d')):
477 return False
477 return False
478 # Skip transaction related files.
478 # Skip transaction related files.
479 if path.startswith('undo'):
479 if path.startswith('undo'):
480 return False
480 return False
481 # Only copy regular files.
481 # Only copy regular files.
482 if mode != stat.S_IFREG:
482 if mode != stat.S_IFREG:
483 return False
483 return False
484 # Skip other skipped files.
484 # Skip other skipped files.
485 if path in ('lock', 'fncache'):
485 if path in ('lock', 'fncache'):
486 return False
486 return False
487
487
488 return True
488 return True
489
489
490 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
490 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
491 """Hook point for extensions to perform additional actions during upgrade.
491 """Hook point for extensions to perform additional actions during upgrade.
492
492
493 This function is called after revlogs and store files have been copied but
493 This function is called after revlogs and store files have been copied but
494 before the new store is swapped into the original location.
494 before the new store is swapped into the original location.
495 """
495 """
496
496
497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
497 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
498 """Do the low-level work of upgrading a repository.
498 """Do the low-level work of upgrading a repository.
499
499
500 The upgrade is effectively performed as a copy between a source
500 The upgrade is effectively performed as a copy between a source
501 repository and a temporary destination repository.
501 repository and a temporary destination repository.
502
502
503 The source repository is unmodified for as long as possible so the
503 The source repository is unmodified for as long as possible so the
504 upgrade can abort at any time without causing loss of service for
504 upgrade can abort at any time without causing loss of service for
505 readers and without corrupting the source repository.
505 readers and without corrupting the source repository.
506 """
506 """
507 assert srcrepo.currentwlock()
507 assert srcrepo.currentwlock()
508 assert dstrepo.currentwlock()
508 assert dstrepo.currentwlock()
509
509
510 ui.write(_('(it is safe to interrupt this process any time before '
510 ui.write(_('(it is safe to interrupt this process any time before '
511 'data migration completes)\n'))
511 'data migration completes)\n'))
512
512
513 if 'redeltaall' in actions:
513 if 'redeltaall' in actions:
514 deltareuse = revlog.revlog.DELTAREUSENEVER
514 deltareuse = revlog.revlog.DELTAREUSENEVER
515 elif 'redeltaparent' in actions:
515 elif 'redeltaparent' in actions:
516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
516 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
517 elif 'redeltamultibase' in actions:
517 elif 'redeltamultibase' in actions:
518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
518 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
519 else:
519 else:
520 deltareuse = revlog.revlog.DELTAREUSEALWAYS
520 deltareuse = revlog.revlog.DELTAREUSEALWAYS
521
521
522 with dstrepo.transaction('upgrade') as tr:
522 with dstrepo.transaction('upgrade') as tr:
523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
523 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
524 'redeltamultibase' in actions)
524 'redeltamultibase' in actions)
525
525
526 # Now copy other files in the store directory.
526 # Now copy other files in the store directory.
527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
527 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
528 if not _filterstorefile(srcrepo, dstrepo, requirements,
528 if not _filterstorefile(srcrepo, dstrepo, requirements,
529 p, kind, st):
529 p, kind, st):
530 continue
530 continue
531
531
532 srcrepo.ui.write(_('copying %s\n') % p)
532 srcrepo.ui.write(_('copying %s\n') % p)
533 src = srcrepo.store.vfs.join(p)
533 src = srcrepo.store.vfs.join(p)
534 dst = dstrepo.store.vfs.join(p)
534 dst = dstrepo.store.vfs.join(p)
535 util.copyfile(src, dst, copystat=True)
535 util.copyfile(src, dst, copystat=True)
536
536
537 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
537 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
538
538
539 ui.write(_('data fully migrated to temporary repository\n'))
539 ui.write(_('data fully migrated to temporary repository\n'))
540
540
541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
541 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
542 backupvfs = vfsmod.vfs(backuppath)
542 backupvfs = vfsmod.vfs(backuppath)
543
543
544 # Make a backup of requires file first, as it is the first to be modified.
544 # Make a backup of requires file first, as it is the first to be modified.
545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
545 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
546
546
547 # We install an arbitrary requirement that clients must not support
547 # We install an arbitrary requirement that clients must not support
548 # as a mechanism to lock out new clients during the data swap. This is
548 # as a mechanism to lock out new clients during the data swap. This is
549 # better than allowing a client to continue while the repository is in
549 # better than allowing a client to continue while the repository is in
550 # an inconsistent state.
550 # an inconsistent state.
551 ui.write(_('marking source repository as being upgraded; clients will be '
551 ui.write(_('marking source repository as being upgraded; clients will be '
552 'unable to read from repository\n'))
552 'unable to read from repository\n'))
553 scmutil.writerequires(srcrepo.vfs,
553 scmutil.writerequires(srcrepo.vfs,
554 srcrepo.requirements | set(['upgradeinprogress']))
554 srcrepo.requirements | set(['upgradeinprogress']))
555
555
556 ui.write(_('starting in-place swap of repository data\n'))
556 ui.write(_('starting in-place swap of repository data\n'))
557 ui.write(_('replaced files will be backed up at %s\n') %
557 ui.write(_('replaced files will be backed up at %s\n') %
558 backuppath)
558 backuppath)
559
559
560 # Now swap in the new store directory. Doing it as a rename should make
560 # Now swap in the new store directory. Doing it as a rename should make
561 # the operation nearly instantaneous and atomic (at least in well-behaved
561 # the operation nearly instantaneous and atomic (at least in well-behaved
562 # environments).
562 # environments).
563 ui.write(_('replacing store...\n'))
563 ui.write(_('replacing store...\n'))
564 tstart = util.timer()
564 tstart = util.timer()
565 util.rename(srcrepo.spath, backupvfs.join('store'))
565 util.rename(srcrepo.spath, backupvfs.join('store'))
566 util.rename(dstrepo.spath, srcrepo.spath)
566 util.rename(dstrepo.spath, srcrepo.spath)
567 elapsed = util.timer() - tstart
567 elapsed = util.timer() - tstart
568 ui.write(_('store replacement complete; repository was inconsistent for '
568 ui.write(_('store replacement complete; repository was inconsistent for '
569 '%0.1fs\n') % elapsed)
569 '%0.1fs\n') % elapsed)
570
570
571 # We first write the requirements file. Any new requirements will lock
571 # We first write the requirements file. Any new requirements will lock
572 # out legacy clients.
572 # out legacy clients.
573 ui.write(_('finalizing requirements file and making repository readable '
573 ui.write(_('finalizing requirements file and making repository readable '
574 'again\n'))
574 'again\n'))
575 scmutil.writerequires(srcrepo.vfs, requirements)
575 scmutil.writerequires(srcrepo.vfs, requirements)
576
576
577 # The lock file from the old store won't be removed because nothing has a
577 # The lock file from the old store won't be removed because nothing has a
578 # reference to its new location. So clean it up manually. Alternatively, we
578 # reference to its new location. So clean it up manually. Alternatively, we
579 # could update srcrepo.svfs and other variables to point to the new
579 # could update srcrepo.svfs and other variables to point to the new
580 # location. This is simpler.
580 # location. This is simpler.
581 backupvfs.unlink('store/lock')
581 backupvfs.unlink('store/lock')
582
582
583 return backuppath
583 return backuppath
584
584
585 def upgraderepo(ui, repo, run=False, optimize=None):
585 def upgraderepo(ui, repo, run=False, optimize=None):
586 """Upgrade a repository in place."""
586 """Upgrade a repository in place."""
587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
587 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
588 from . import localrepo
588 from . import localrepo
589
589
590 optimize = set(optimize or [])
590 optimize = set(optimize or [])
591 repo = repo.unfiltered()
591 repo = repo.unfiltered()
592
592
593 # Ensure the repository can be upgraded.
593 # Ensure the repository can be upgraded.
594 missingreqs = requiredsourcerequirements(repo) - repo.requirements
594 missingreqs = requiredsourcerequirements(repo) - repo.requirements
595 if missingreqs:
595 if missingreqs:
596 raise error.Abort(_('cannot upgrade repository; requirement '
596 raise error.Abort(_('cannot upgrade repository; requirement '
597 'missing: %s') % _(', ').join(sorted(missingreqs)))
597 'missing: %s') % _(', ').join(sorted(missingreqs)))
598
598
599 blockedreqs = blocksourcerequirements(repo) & repo.requirements
599 blockedreqs = blocksourcerequirements(repo) & repo.requirements
600 if blockedreqs:
600 if blockedreqs:
601 raise error.Abort(_('cannot upgrade repository; unsupported source '
601 raise error.Abort(_('cannot upgrade repository; unsupported source '
602 'requirement: %s') %
602 'requirement: %s') %
603 _(', ').join(sorted(blockedreqs)))
603 _(', ').join(sorted(blockedreqs)))
604
604
605 # FUTURE there is potentially a need to control the wanted requirements via
605 # FUTURE there is potentially a need to control the wanted requirements via
606 # command arguments or via an extension hook point.
606 # command arguments or via an extension hook point.
607 newreqs = localrepo.newreporequirements(repo)
607 newreqs = localrepo.newreporequirements(repo)
608
608
609 noremovereqs = (repo.requirements - newreqs -
609 noremovereqs = (repo.requirements - newreqs -
610 supportremovedrequirements(repo))
610 supportremovedrequirements(repo))
611 if noremovereqs:
611 if noremovereqs:
612 raise error.Abort(_('cannot upgrade repository; requirement would be '
612 raise error.Abort(_('cannot upgrade repository; requirement would be '
613 'removed: %s') % _(', ').join(sorted(noremovereqs)))
613 'removed: %s') % _(', ').join(sorted(noremovereqs)))
614
614
615 noaddreqs = (newreqs - repo.requirements -
615 noaddreqs = (newreqs - repo.requirements -
616 allowednewrequirements(repo))
616 allowednewrequirements(repo))
617 if noaddreqs:
617 if noaddreqs:
618 raise error.Abort(_('cannot upgrade repository; do not support adding '
618 raise error.Abort(_('cannot upgrade repository; do not support adding '
619 'requirement: %s') %
619 'requirement: %s') %
620 _(', ').join(sorted(noaddreqs)))
620 _(', ').join(sorted(noaddreqs)))
621
621
622 unsupportedreqs = newreqs - supporteddestrequirements(repo)
622 unsupportedreqs = newreqs - supporteddestrequirements(repo)
623 if unsupportedreqs:
623 if unsupportedreqs:
624 raise error.Abort(_('cannot upgrade repository; do not support '
624 raise error.Abort(_('cannot upgrade repository; do not support '
625 'destination requirement: %s') %
625 'destination requirement: %s') %
626 _(', ').join(sorted(unsupportedreqs)))
626 _(', ').join(sorted(unsupportedreqs)))
627
627
628 # Find and validate all improvements that can be made.
628 # Find and validate all improvements that can be made.
629 improvements = findimprovements(repo)
629 improvements = findimprovements(repo)
630 for i in improvements:
630 for i in improvements:
631 if i.type not in (deficiency, optimisation):
631 if i.type not in (deficiency, optimisation):
632 raise error.Abort(_('unexpected improvement type %s for %s') % (
632 raise error.Abort(_('unexpected improvement type %s for %s') % (
633 i.type, i.name))
633 i.type, i.name))
634
634
635 # Validate arguments.
635 # Validate arguments.
636 unknownoptimize = optimize - set(i.name for i in improvements
636 unknownoptimize = optimize - set(i.name for i in improvements
637 if i.type == optimisation)
637 if i.type == optimisation)
638 if unknownoptimize:
638 if unknownoptimize:
639 raise error.Abort(_('unknown optimization action requested: %s') %
639 raise error.Abort(_('unknown optimization action requested: %s') %
640 ', '.join(sorted(unknownoptimize)),
640 ', '.join(sorted(unknownoptimize)),
641 hint=_('run without arguments to see valid '
641 hint=_('run without arguments to see valid '
642 'optimizations'))
642 'optimizations'))
643
643
644 actions = determineactions(repo, improvements, repo.requirements,
644 actions = determineactions(repo, improvements, repo.requirements,
645 newreqs, optimize)
645 newreqs, optimize)
646
646
647 def printrequirements():
647 def printrequirements():
648 ui.write(_('requirements\n'))
648 ui.write(_('requirements\n'))
649 ui.write(_(' preserved: %s\n') %
649 ui.write(_(' preserved: %s\n') %
650 _(', ').join(sorted(newreqs & repo.requirements)))
650 _(', ').join(sorted(newreqs & repo.requirements)))
651
651
652 if repo.requirements - newreqs:
652 if repo.requirements - newreqs:
653 ui.write(_(' removed: %s\n') %
653 ui.write(_(' removed: %s\n') %
654 _(', ').join(sorted(repo.requirements - newreqs)))
654 _(', ').join(sorted(repo.requirements - newreqs)))
655
655
656 if newreqs - repo.requirements:
656 if newreqs - repo.requirements:
657 ui.write(_(' added: %s\n') %
657 ui.write(_(' added: %s\n') %
658 _(', ').join(sorted(newreqs - repo.requirements)))
658 _(', ').join(sorted(newreqs - repo.requirements)))
659
659
660 ui.write('\n')
660 ui.write('\n')
661
661
662 def printupgradeactions():
662 def printupgradeactions():
663 for action in actions:
663 for action in actions:
664 for i in improvements:
664 for i in improvements:
665 if i.name == action:
665 if i.name == action:
666 ui.write('%s\n %s\n\n' %
666 ui.write('%s\n %s\n\n' %
667 (i.name, i.upgrademessage))
667 (i.name, i.upgrademessage))
668
668
669 if not run:
669 if not run:
670 fromdefault = []
670 fromdefault = []
671 fromconfig = []
671 fromconfig = []
672 optimizations = []
672 optimizations = []
673
673
674 for i in improvements:
674 for i in improvements:
675 assert i.type in (deficiency, optimisation)
675 assert i.type in (deficiency, optimisation)
676 if i.type == deficiency:
676 if i.type == deficiency:
677 if i.fromdefault:
677 if i.fromdefault:
678 fromdefault.append(i)
678 fromdefault.append(i)
679 if i.fromconfig:
679 if i.fromconfig:
680 fromconfig.append(i)
680 fromconfig.append(i)
681 else:
681 else:
682 optimizations.append(i)
682 optimizations.append(i)
683
683
684 if fromdefault or fromconfig:
684 if fromdefault or fromconfig:
685 fromconfignames = set(x.name for x in fromconfig)
685 fromconfignames = set(x.name for x in fromconfig)
686 onlydefault = [i for i in fromdefault
686 onlydefault = [i for i in fromdefault
687 if i.name not in fromconfignames]
687 if i.name not in fromconfignames]
688
688
689 if fromconfig:
689 if fromconfig:
690 ui.write(_('repository lacks features recommended by '
690 ui.write(_('repository lacks features recommended by '
691 'current config options:\n\n'))
691 'current config options:\n\n'))
692 for i in fromconfig:
692 for i in fromconfig:
693 ui.write('%s\n %s\n\n' % (i.name, i.description))
693 ui.write('%s\n %s\n\n' % (i.name, i.description))
694
694
695 if onlydefault:
695 if onlydefault:
696 ui.write(_('repository lacks features used by the default '
696 ui.write(_('repository lacks features used by the default '
697 'config options:\n\n'))
697 'config options:\n\n'))
698 for i in onlydefault:
698 for i in onlydefault:
699 ui.write('%s\n %s\n\n' % (i.name, i.description))
699 ui.write('%s\n %s\n\n' % (i.name, i.description))
700
700
701 ui.write('\n')
701 ui.write('\n')
702 else:
702 else:
703 ui.write(_('(no feature deficiencies found in existing '
703 ui.write(_('(no feature deficiencies found in existing '
704 'repository)\n'))
704 'repository)\n'))
705
705
706 ui.write(_('performing an upgrade with "--run" will make the following '
706 ui.write(_('performing an upgrade with "--run" will make the following '
707 'changes:\n\n'))
707 'changes:\n\n'))
708
708
709 printrequirements()
709 printrequirements()
710 printupgradeactions()
710 printupgradeactions()
711
711
712 unusedoptimize = [i for i in improvements
712 unusedoptimize = [i for i in improvements
713 if i.name not in actions and i.type == optimisation]
713 if i.name not in actions and i.type == optimisation]
714 if unusedoptimize:
714 if unusedoptimize:
715 ui.write(_('additional optimizations are available by specifying '
715 ui.write(_('additional optimizations are available by specifying '
716 '"--optimize <name>":\n\n'))
716 '"--optimize <name>":\n\n'))
717 for i in unusedoptimize:
717 for i in unusedoptimize:
718 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
718 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
719 return
719 return
720
720
721 # Else we're in the run=true case.
721 # Else we're in the run=true case.
722 ui.write(_('upgrade will perform the following actions:\n\n'))
722 ui.write(_('upgrade will perform the following actions:\n\n'))
723 printrequirements()
723 printrequirements()
724 printupgradeactions()
724 printupgradeactions()
725
725
726 ui.write(_('beginning upgrade...\n'))
726 ui.write(_('beginning upgrade...\n'))
727 with repo.wlock():
727 with repo.wlock():
728 with repo.lock():
728 with repo.lock():
729 ui.write(_('repository locked and read-only\n'))
729 ui.write(_('repository locked and read-only\n'))
730 # Our strategy for upgrading the repository is to create a new,
730 # Our strategy for upgrading the repository is to create a new,
731 # temporary repository, write data to it, then do a swap of the
731 # temporary repository, write data to it, then do a swap of the
732 # data. There are less heavyweight ways to do this, but it is easier
732 # data. There are less heavyweight ways to do this, but it is easier
733 # to create a new repo object than to instantiate all the components
733 # to create a new repo object than to instantiate all the components
734 # (like the store) separately.
734 # (like the store) separately.
735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
735 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
736 backuppath = None
736 backuppath = None
737 try:
737 try:
738 ui.write(_('creating temporary repository to stage migrated '
738 ui.write(_('creating temporary repository to stage migrated '
739 'data: %s\n') % tmppath)
739 'data: %s\n') % tmppath)
740 dstrepo = localrepo.localrepository(repo.baseui,
740 dstrepo = localrepo.localrepository(repo.baseui,
741 path=tmppath,
741 path=tmppath,
742 create=True)
742 create=True)
743
743
744 with dstrepo.wlock():
744 with dstrepo.wlock():
745 with dstrepo.lock():
745 with dstrepo.lock():
746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
746 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
747 actions)
747 actions)
748
748
749 finally:
749 finally:
750 ui.write(_('removing temporary repository %s\n') % tmppath)
750 ui.write(_('removing temporary repository %s\n') % tmppath)
751 repo.vfs.rmtree(tmppath, forcibly=True)
751 repo.vfs.rmtree(tmppath, forcibly=True)
752
752
753 if backuppath:
753 if backuppath:
754 ui.warn(_('copy of old repository backed up at %s\n') %
754 ui.warn(_('copy of old repository backed up at %s\n') %
755 backuppath)
755 backuppath)
756 ui.warn(_('the old repository will not be deleted; remove '
756 ui.warn(_('the old repository will not be deleted; remove '
757 'it to free up disk space once the upgraded '
757 'it to free up disk space once the upgraded '
758 'repository is verified\n'))
758 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now