##// END OF EJS Templates
upgrade: start moving the "to be happening" data in a dedicated object...
marmoute -
r46671:74923cb8 default draft
parent child Browse files
Show More
@@ -1,320 +1,324 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 hg,
13 hg,
14 localrepo,
14 localrepo,
15 pycompat,
15 pycompat,
16 )
16 )
17
17
18 from .upgrade_utils import (
18 from .upgrade_utils import (
19 actions as upgrade_actions,
19 actions as upgrade_actions,
20 engine as upgrade_engine,
20 engine as upgrade_engine,
21 )
21 )
22
22
23 allformatvariant = upgrade_actions.allformatvariant
23 allformatvariant = upgrade_actions.allformatvariant
24
24
25 # search without '-' to support older form on newer client.
25 # search without '-' to support older form on newer client.
26 #
26 #
27 # We don't enforce backward compatibility for debug command so this
27 # We don't enforce backward compatibility for debug command so this
28 # might eventually be dropped. However, having to use two different
28 # might eventually be dropped. However, having to use two different
29 # forms in script when comparing result is anoying enough to add
29 # forms in script when comparing result is anoying enough to add
30 # backward compatibility for a while.
30 # backward compatibility for a while.
31 legacy_opts_map = {
31 legacy_opts_map = {
32 b'redeltaparent': b're-delta-parent',
32 b'redeltaparent': b're-delta-parent',
33 b'redeltamultibase': b're-delta-multibase',
33 b'redeltamultibase': b're-delta-multibase',
34 b'redeltaall': b're-delta-all',
34 b'redeltaall': b're-delta-all',
35 b'redeltafulladd': b're-delta-fulladd',
35 b'redeltafulladd': b're-delta-fulladd',
36 }
36 }
37
37
38
38
39 def upgraderepo(
39 def upgraderepo(
40 ui,
40 ui,
41 repo,
41 repo,
42 run=False,
42 run=False,
43 optimize=None,
43 optimize=None,
44 backup=True,
44 backup=True,
45 manifest=None,
45 manifest=None,
46 changelog=None,
46 changelog=None,
47 filelogs=None,
47 filelogs=None,
48 ):
48 ):
49 """Upgrade a repository in place."""
49 """Upgrade a repository in place."""
50 if optimize is None:
50 if optimize is None:
51 optimize = []
51 optimize = []
52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
53 repo = repo.unfiltered()
53 repo = repo.unfiltered()
54
54
55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
56 specentries = (
56 specentries = (
57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
60 )
60 )
61 specified = [(y, x) for (y, x) in specentries if x is not None]
61 specified = [(y, x) for (y, x) in specentries if x is not None]
62 if specified:
62 if specified:
63 # we have some limitation on revlogs to be recloned
63 # we have some limitation on revlogs to be recloned
64 if any(x for y, x in specified):
64 if any(x for y, x in specified):
65 revlogs = set()
65 revlogs = set()
66 for upgrade, enabled in specified:
66 for upgrade, enabled in specified:
67 if enabled:
67 if enabled:
68 revlogs.add(upgrade)
68 revlogs.add(upgrade)
69 else:
69 else:
70 # none are enabled
70 # none are enabled
71 for upgrade, __ in specified:
71 for upgrade, __ in specified:
72 revlogs.discard(upgrade)
72 revlogs.discard(upgrade)
73
73
74 # Ensure the repository can be upgraded.
74 # Ensure the repository can be upgraded.
75 upgrade_actions.check_source_requirements(repo)
75 upgrade_actions.check_source_requirements(repo)
76
76
77 default_options = localrepo.defaultcreateopts(repo.ui)
77 default_options = localrepo.defaultcreateopts(repo.ui)
78 newreqs = localrepo.newreporequirements(repo.ui, default_options)
78 newreqs = localrepo.newreporequirements(repo.ui, default_options)
79 newreqs.update(upgrade_actions.preservedrequirements(repo))
79 newreqs.update(upgrade_actions.preservedrequirements(repo))
80
80
81 upgrade_actions.check_requirements_changes(repo, newreqs)
81 upgrade_actions.check_requirements_changes(repo, newreqs)
82
82
83 # Find and validate all improvements that can be made.
83 # Find and validate all improvements that can be made.
84 alloptimizations = upgrade_actions.findoptimizations(repo)
84 alloptimizations = upgrade_actions.findoptimizations(repo)
85
85
86 # Apply and Validate arguments.
86 # Apply and Validate arguments.
87 optimizations = []
87 optimizations = []
88 for o in alloptimizations:
88 for o in alloptimizations:
89 if o.name in optimize:
89 if o.name in optimize:
90 optimizations.append(o)
90 optimizations.append(o)
91 optimize.discard(o.name)
91 optimize.discard(o.name)
92
92
93 if optimize: # anything left is unknown
93 if optimize: # anything left is unknown
94 raise error.Abort(
94 raise error.Abort(
95 _(b'unknown optimization action requested: %s')
95 _(b'unknown optimization action requested: %s')
96 % b', '.join(sorted(optimize)),
96 % b', '.join(sorted(optimize)),
97 hint=_(b'run without arguments to see valid optimizations'),
97 hint=_(b'run without arguments to see valid optimizations'),
98 )
98 )
99
99
100 deficiencies = upgrade_actions.finddeficiencies(repo)
100 deficiencies = upgrade_actions.finddeficiencies(repo)
101 actions = upgrade_actions.determineactions(
101 actions = upgrade_actions.determineactions(
102 repo, deficiencies, repo.requirements, newreqs
102 repo, deficiencies, repo.requirements, newreqs
103 )
103 )
104 actions.extend(
104 actions.extend(
105 o
105 o
106 for o in sorted(optimizations)
106 for o in sorted(optimizations)
107 # determineactions could have added optimisation
107 # determineactions could have added optimisation
108 if o not in actions
108 if o not in actions
109 )
109 )
110
110
111 removedreqs = repo.requirements - newreqs
111 removedreqs = repo.requirements - newreqs
112 addedreqs = newreqs - repo.requirements
112 addedreqs = newreqs - repo.requirements
113
113
114 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
114 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
115 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
115 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
116 removedreqs | addedreqs
116 removedreqs | addedreqs
117 )
117 )
118 if incompatible:
118 if incompatible:
119 msg = _(
119 msg = _(
120 b'ignoring revlogs selection flags, format requirements '
120 b'ignoring revlogs selection flags, format requirements '
121 b'change: %s\n'
121 b'change: %s\n'
122 )
122 )
123 ui.warn(msg % b', '.join(sorted(incompatible)))
123 ui.warn(msg % b', '.join(sorted(incompatible)))
124 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
124 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
125
125
126 def write_labeled(l, label):
126 def write_labeled(l, label):
127 first = True
127 first = True
128 for r in sorted(l):
128 for r in sorted(l):
129 if not first:
129 if not first:
130 ui.write(b', ')
130 ui.write(b', ')
131 ui.write(r, label=label)
131 ui.write(r, label=label)
132 first = False
132 first = False
133
133
134 def printrequirements():
134 def printrequirements():
135 ui.write(_(b'requirements\n'))
135 ui.write(_(b'requirements\n'))
136 ui.write(_(b' preserved: '))
136 ui.write(_(b' preserved: '))
137 write_labeled(
137 write_labeled(
138 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
138 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
139 )
139 )
140 ui.write((b'\n'))
140 ui.write((b'\n'))
141 removed = repo.requirements - newreqs
141 removed = repo.requirements - newreqs
142 if repo.requirements - newreqs:
142 if repo.requirements - newreqs:
143 ui.write(_(b' removed: '))
143 ui.write(_(b' removed: '))
144 write_labeled(removed, "upgrade-repo.requirement.removed")
144 write_labeled(removed, "upgrade-repo.requirement.removed")
145 ui.write((b'\n'))
145 ui.write((b'\n'))
146 added = newreqs - repo.requirements
146 added = newreqs - repo.requirements
147 if added:
147 if added:
148 ui.write(_(b' added: '))
148 ui.write(_(b' added: '))
149 write_labeled(added, "upgrade-repo.requirement.added")
149 write_labeled(added, "upgrade-repo.requirement.added")
150 ui.write((b'\n'))
150 ui.write((b'\n'))
151 ui.write(b'\n')
151 ui.write(b'\n')
152
152
153 def printoptimisations():
153 def printoptimisations():
154 optimisations = [
154 optimisations = [
155 a for a in actions if a.type == upgrade_actions.OPTIMISATION
155 a for a in actions if a.type == upgrade_actions.OPTIMISATION
156 ]
156 ]
157 optimisations.sort(key=lambda a: a.name)
157 optimisations.sort(key=lambda a: a.name)
158 if optimisations:
158 if optimisations:
159 ui.write(_(b'optimisations: '))
159 ui.write(_(b'optimisations: '))
160 write_labeled(
160 write_labeled(
161 [a.name for a in optimisations],
161 [a.name for a in optimisations],
162 "upgrade-repo.optimisation.performed",
162 "upgrade-repo.optimisation.performed",
163 )
163 )
164 ui.write(b'\n\n')
164 ui.write(b'\n\n')
165
165
166 def printupgradeactions():
166 def printupgradeactions():
167 for a in actions:
167 for a in actions:
168 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
168 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
169
169
170 def print_affected_revlogs():
170 def print_affected_revlogs():
171 if not revlogs:
171 if not revlogs:
172 ui.write((b'no revlogs to process\n'))
172 ui.write((b'no revlogs to process\n'))
173 else:
173 else:
174 ui.write((b'processed revlogs:\n'))
174 ui.write((b'processed revlogs:\n'))
175 for r in sorted(revlogs):
175 for r in sorted(revlogs):
176 ui.write((b' - %s\n' % r))
176 ui.write((b' - %s\n' % r))
177 ui.write((b'\n'))
177 ui.write((b'\n'))
178
178
179 upgrade_op = upgrade_actions.UpgradeOperation(
180 newreqs,
181 [a.name for a in actions],
182 revlogs,
183 )
184
179 if not run:
185 if not run:
180 fromconfig = []
186 fromconfig = []
181 onlydefault = []
187 onlydefault = []
182
188
183 for d in deficiencies:
189 for d in deficiencies:
184 if d.fromconfig(repo):
190 if d.fromconfig(repo):
185 fromconfig.append(d)
191 fromconfig.append(d)
186 elif d.default:
192 elif d.default:
187 onlydefault.append(d)
193 onlydefault.append(d)
188
194
189 if fromconfig or onlydefault:
195 if fromconfig or onlydefault:
190
196
191 if fromconfig:
197 if fromconfig:
192 ui.status(
198 ui.status(
193 _(
199 _(
194 b'repository lacks features recommended by '
200 b'repository lacks features recommended by '
195 b'current config options:\n\n'
201 b'current config options:\n\n'
196 )
202 )
197 )
203 )
198 for i in fromconfig:
204 for i in fromconfig:
199 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
205 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
200
206
201 if onlydefault:
207 if onlydefault:
202 ui.status(
208 ui.status(
203 _(
209 _(
204 b'repository lacks features used by the default '
210 b'repository lacks features used by the default '
205 b'config options:\n\n'
211 b'config options:\n\n'
206 )
212 )
207 )
213 )
208 for i in onlydefault:
214 for i in onlydefault:
209 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
215 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
210
216
211 ui.status(b'\n')
217 ui.status(b'\n')
212 else:
218 else:
213 ui.status(
219 ui.status(
214 _(
220 _(
215 b'(no feature deficiencies found in existing '
221 b'(no feature deficiencies found in existing '
216 b'repository)\n'
222 b'repository)\n'
217 )
223 )
218 )
224 )
219
225
220 ui.status(
226 ui.status(
221 _(
227 _(
222 b'performing an upgrade with "--run" will make the following '
228 b'performing an upgrade with "--run" will make the following '
223 b'changes:\n\n'
229 b'changes:\n\n'
224 )
230 )
225 )
231 )
226
232
227 printrequirements()
233 printrequirements()
228 printoptimisations()
234 printoptimisations()
229 printupgradeactions()
235 printupgradeactions()
230 print_affected_revlogs()
236 print_affected_revlogs()
231
237
232 unusedoptimize = [i for i in alloptimizations if i not in actions]
238 unusedoptimize = [i for i in alloptimizations if i not in actions]
233
239
234 if unusedoptimize:
240 if unusedoptimize:
235 ui.status(
241 ui.status(
236 _(
242 _(
237 b'additional optimizations are available by specifying '
243 b'additional optimizations are available by specifying '
238 b'"--optimize <name>":\n\n'
244 b'"--optimize <name>":\n\n'
239 )
245 )
240 )
246 )
241 for i in unusedoptimize:
247 for i in unusedoptimize:
242 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
248 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
243 return
249 return
244
250
245 # Else we're in the run=true case.
251 # Else we're in the run=true case.
246 ui.write(_(b'upgrade will perform the following actions:\n\n'))
252 ui.write(_(b'upgrade will perform the following actions:\n\n'))
247 printrequirements()
253 printrequirements()
248 printoptimisations()
254 printoptimisations()
249 printupgradeactions()
255 printupgradeactions()
250 print_affected_revlogs()
256 print_affected_revlogs()
251
257
252 upgradeactions = [a.name for a in actions]
253
254 ui.status(_(b'beginning upgrade...\n'))
258 ui.status(_(b'beginning upgrade...\n'))
255 with repo.wlock(), repo.lock():
259 with repo.wlock(), repo.lock():
256 ui.status(_(b'repository locked and read-only\n'))
260 ui.status(_(b'repository locked and read-only\n'))
257 # Our strategy for upgrading the repository is to create a new,
261 # Our strategy for upgrading the repository is to create a new,
258 # temporary repository, write data to it, then do a swap of the
262 # temporary repository, write data to it, then do a swap of the
259 # data. There are less heavyweight ways to do this, but it is easier
263 # data. There are less heavyweight ways to do this, but it is easier
260 # to create a new repo object than to instantiate all the components
264 # to create a new repo object than to instantiate all the components
261 # (like the store) separately.
265 # (like the store) separately.
262 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
266 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
263 backuppath = None
267 backuppath = None
264 try:
268 try:
265 ui.status(
269 ui.status(
266 _(
270 _(
267 b'creating temporary repository to stage migrated '
271 b'creating temporary repository to stage migrated '
268 b'data: %s\n'
272 b'data: %s\n'
269 )
273 )
270 % tmppath
274 % tmppath
271 )
275 )
272
276
273 # clone ui without using ui.copy because repo.ui is protected
277 # clone ui without using ui.copy because repo.ui is protected
274 repoui = repo.ui.__class__(repo.ui)
278 repoui = repo.ui.__class__(repo.ui)
275 dstrepo = hg.repository(repoui, path=tmppath, create=True)
279 dstrepo = hg.repository(repoui, path=tmppath, create=True)
276
280
277 with dstrepo.wlock(), dstrepo.lock():
281 with dstrepo.wlock(), dstrepo.lock():
278 backuppath = upgrade_engine.upgrade(
282 backuppath = upgrade_engine.upgrade(
279 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
283 ui, repo, dstrepo, upgrade_op
280 )
284 )
281 if not (backup or backuppath is None):
285 if not (backup or backuppath is None):
282 ui.status(
286 ui.status(
283 _(b'removing old repository content%s\n') % backuppath
287 _(b'removing old repository content%s\n') % backuppath
284 )
288 )
285 repo.vfs.rmtree(backuppath, forcibly=True)
289 repo.vfs.rmtree(backuppath, forcibly=True)
286 backuppath = None
290 backuppath = None
287
291
288 finally:
292 finally:
289 ui.status(_(b'removing temporary repository %s\n') % tmppath)
293 ui.status(_(b'removing temporary repository %s\n') % tmppath)
290 repo.vfs.rmtree(tmppath, forcibly=True)
294 repo.vfs.rmtree(tmppath, forcibly=True)
291
295
292 if backuppath and not ui.quiet:
296 if backuppath and not ui.quiet:
293 ui.warn(
297 ui.warn(
294 _(b'copy of old repository backed up at %s\n') % backuppath
298 _(b'copy of old repository backed up at %s\n') % backuppath
295 )
299 )
296 ui.warn(
300 ui.warn(
297 _(
301 _(
298 b'the old repository will not be deleted; remove '
302 b'the old repository will not be deleted; remove '
299 b'it to free up disk space once the upgraded '
303 b'it to free up disk space once the upgraded '
300 b'repository is verified\n'
304 b'repository is verified\n'
301 )
305 )
302 )
306 )
303
307
304 if upgrade_actions.sharesafe.name in addedreqs:
308 if upgrade_actions.sharesafe.name in addedreqs:
305 ui.warn(
309 ui.warn(
306 _(
310 _(
307 b'repository upgraded to share safe mode, existing'
311 b'repository upgraded to share safe mode, existing'
308 b' shares will still work in old non-safe mode. '
312 b' shares will still work in old non-safe mode. '
309 b'Re-share existing shares to use them in safe mode'
313 b'Re-share existing shares to use them in safe mode'
310 b' New shares will be created in safe mode.\n'
314 b' New shares will be created in safe mode.\n'
311 )
315 )
312 )
316 )
313 if upgrade_actions.sharesafe.name in removedreqs:
317 if upgrade_actions.sharesafe.name in removedreqs:
314 ui.warn(
318 ui.warn(
315 _(
319 _(
316 b'repository downgraded to not use share safe mode, '
320 b'repository downgraded to not use share safe mode, '
317 b'existing shares will not work and needs to'
321 b'existing shares will not work and needs to'
318 b' be reshared.\n'
322 b' be reshared.\n'
319 )
323 )
320 )
324 )
@@ -1,719 +1,728 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 requirements,
14 requirements,
15 util,
15 util,
16 )
16 )
17
17
18 from ..utils import compression
18 from ..utils import compression
19
19
20 # list of requirements that request a clone of all revlog if added/removed
20 # list of requirements that request a clone of all revlog if added/removed
21 RECLONES_REQUIREMENTS = {
21 RECLONES_REQUIREMENTS = {
22 b'generaldelta',
22 b'generaldelta',
23 requirements.SPARSEREVLOG_REQUIREMENT,
23 requirements.SPARSEREVLOG_REQUIREMENT,
24 }
24 }
25
25
26
26
27 def preservedrequirements(repo):
27 def preservedrequirements(repo):
28 return set()
28 return set()
29
29
30
30
31 DEFICIENCY = b'deficiency'
31 DEFICIENCY = b'deficiency'
32 OPTIMISATION = b'optimization'
32 OPTIMISATION = b'optimization'
33
33
34
34
35 class improvement(object):
35 class improvement(object):
36 """Represents an improvement that can be made as part of an upgrade.
36 """Represents an improvement that can be made as part of an upgrade.
37
37
38 The following attributes are defined on each instance:
38 The following attributes are defined on each instance:
39
39
40 name
40 name
41 Machine-readable string uniquely identifying this improvement. It
41 Machine-readable string uniquely identifying this improvement. It
42 will be mapped to an action later in the upgrade process.
42 will be mapped to an action later in the upgrade process.
43
43
44 type
44 type
45 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
45 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
46 problem. An optimization is an action (sometimes optional) that
46 problem. An optimization is an action (sometimes optional) that
47 can be taken to further improve the state of the repository.
47 can be taken to further improve the state of the repository.
48
48
49 description
49 description
50 Message intended for humans explaining the improvement in more detail,
50 Message intended for humans explaining the improvement in more detail,
51 including the implications of it. For ``DEFICIENCY`` types, should be
51 including the implications of it. For ``DEFICIENCY`` types, should be
52 worded in the present tense. For ``OPTIMISATION`` types, should be
52 worded in the present tense. For ``OPTIMISATION`` types, should be
53 worded in the future tense.
53 worded in the future tense.
54
54
55 upgrademessage
55 upgrademessage
56 Message intended for humans explaining what an upgrade addressing this
56 Message intended for humans explaining what an upgrade addressing this
57 issue will do. Should be worded in the future tense.
57 issue will do. Should be worded in the future tense.
58 """
58 """
59
59
60 def __init__(self, name, type, description, upgrademessage):
60 def __init__(self, name, type, description, upgrademessage):
61 self.name = name
61 self.name = name
62 self.type = type
62 self.type = type
63 self.description = description
63 self.description = description
64 self.upgrademessage = upgrademessage
64 self.upgrademessage = upgrademessage
65
65
66 def __eq__(self, other):
66 def __eq__(self, other):
67 if not isinstance(other, improvement):
67 if not isinstance(other, improvement):
68 # This is what python tell use to do
68 # This is what python tell use to do
69 return NotImplemented
69 return NotImplemented
70 return self.name == other.name
70 return self.name == other.name
71
71
72 def __ne__(self, other):
72 def __ne__(self, other):
73 return not (self == other)
73 return not (self == other)
74
74
75 def __hash__(self):
75 def __hash__(self):
76 return hash(self.name)
76 return hash(self.name)
77
77
78
78
79 allformatvariant = []
79 allformatvariant = []
80
80
81
81
82 def registerformatvariant(cls):
82 def registerformatvariant(cls):
83 allformatvariant.append(cls)
83 allformatvariant.append(cls)
84 return cls
84 return cls
85
85
86
86
87 class formatvariant(improvement):
87 class formatvariant(improvement):
88 """an improvement subclass dedicated to repository format"""
88 """an improvement subclass dedicated to repository format"""
89
89
90 type = DEFICIENCY
90 type = DEFICIENCY
91 ### The following attributes should be defined for each class:
91 ### The following attributes should be defined for each class:
92
92
93 # machine-readable string uniquely identifying this improvement. it will be
93 # machine-readable string uniquely identifying this improvement. it will be
94 # mapped to an action later in the upgrade process.
94 # mapped to an action later in the upgrade process.
95 name = None
95 name = None
96
96
97 # message intended for humans explaining the improvement in more detail,
97 # message intended for humans explaining the improvement in more detail,
98 # including the implications of it ``DEFICIENCY`` types, should be worded
98 # including the implications of it ``DEFICIENCY`` types, should be worded
99 # in the present tense.
99 # in the present tense.
100 description = None
100 description = None
101
101
102 # message intended for humans explaining what an upgrade addressing this
102 # message intended for humans explaining what an upgrade addressing this
103 # issue will do. should be worded in the future tense.
103 # issue will do. should be worded in the future tense.
104 upgrademessage = None
104 upgrademessage = None
105
105
106 # value of current Mercurial default for new repository
106 # value of current Mercurial default for new repository
107 default = None
107 default = None
108
108
109 def __init__(self):
109 def __init__(self):
110 raise NotImplementedError()
110 raise NotImplementedError()
111
111
112 @staticmethod
112 @staticmethod
113 def fromrepo(repo):
113 def fromrepo(repo):
114 """current value of the variant in the repository"""
114 """current value of the variant in the repository"""
115 raise NotImplementedError()
115 raise NotImplementedError()
116
116
117 @staticmethod
117 @staticmethod
118 def fromconfig(repo):
118 def fromconfig(repo):
119 """current value of the variant in the configuration"""
119 """current value of the variant in the configuration"""
120 raise NotImplementedError()
120 raise NotImplementedError()
121
121
122
122
123 class requirementformatvariant(formatvariant):
123 class requirementformatvariant(formatvariant):
124 """formatvariant based on a 'requirement' name.
124 """formatvariant based on a 'requirement' name.
125
125
126 Many format variant are controlled by a 'requirement'. We define a small
126 Many format variant are controlled by a 'requirement'. We define a small
127 subclass to factor the code.
127 subclass to factor the code.
128 """
128 """
129
129
130 # the requirement that control this format variant
130 # the requirement that control this format variant
131 _requirement = None
131 _requirement = None
132
132
133 @staticmethod
133 @staticmethod
134 def _newreporequirements(ui):
134 def _newreporequirements(ui):
135 return localrepo.newreporequirements(
135 return localrepo.newreporequirements(
136 ui, localrepo.defaultcreateopts(ui)
136 ui, localrepo.defaultcreateopts(ui)
137 )
137 )
138
138
139 @classmethod
139 @classmethod
140 def fromrepo(cls, repo):
140 def fromrepo(cls, repo):
141 assert cls._requirement is not None
141 assert cls._requirement is not None
142 return cls._requirement in repo.requirements
142 return cls._requirement in repo.requirements
143
143
144 @classmethod
144 @classmethod
145 def fromconfig(cls, repo):
145 def fromconfig(cls, repo):
146 assert cls._requirement is not None
146 assert cls._requirement is not None
147 return cls._requirement in cls._newreporequirements(repo.ui)
147 return cls._requirement in cls._newreporequirements(repo.ui)
148
148
149
149
150 @registerformatvariant
150 @registerformatvariant
151 class fncache(requirementformatvariant):
151 class fncache(requirementformatvariant):
152 name = b'fncache'
152 name = b'fncache'
153
153
154 _requirement = b'fncache'
154 _requirement = b'fncache'
155
155
156 default = True
156 default = True
157
157
158 description = _(
158 description = _(
159 b'long and reserved filenames may not work correctly; '
159 b'long and reserved filenames may not work correctly; '
160 b'repository performance is sub-optimal'
160 b'repository performance is sub-optimal'
161 )
161 )
162
162
163 upgrademessage = _(
163 upgrademessage = _(
164 b'repository will be more resilient to storing '
164 b'repository will be more resilient to storing '
165 b'certain paths and performance of certain '
165 b'certain paths and performance of certain '
166 b'operations should be improved'
166 b'operations should be improved'
167 )
167 )
168
168
169
169
170 @registerformatvariant
170 @registerformatvariant
171 class dotencode(requirementformatvariant):
171 class dotencode(requirementformatvariant):
172 name = b'dotencode'
172 name = b'dotencode'
173
173
174 _requirement = b'dotencode'
174 _requirement = b'dotencode'
175
175
176 default = True
176 default = True
177
177
178 description = _(
178 description = _(
179 b'storage of filenames beginning with a period or '
179 b'storage of filenames beginning with a period or '
180 b'space may not work correctly'
180 b'space may not work correctly'
181 )
181 )
182
182
183 upgrademessage = _(
183 upgrademessage = _(
184 b'repository will be better able to store files '
184 b'repository will be better able to store files '
185 b'beginning with a space or period'
185 b'beginning with a space or period'
186 )
186 )
187
187
188
188
189 @registerformatvariant
189 @registerformatvariant
190 class generaldelta(requirementformatvariant):
190 class generaldelta(requirementformatvariant):
191 name = b'generaldelta'
191 name = b'generaldelta'
192
192
193 _requirement = b'generaldelta'
193 _requirement = b'generaldelta'
194
194
195 default = True
195 default = True
196
196
197 description = _(
197 description = _(
198 b'deltas within internal storage are unable to '
198 b'deltas within internal storage are unable to '
199 b'choose optimal revisions; repository is larger and '
199 b'choose optimal revisions; repository is larger and '
200 b'slower than it could be; interaction with other '
200 b'slower than it could be; interaction with other '
201 b'repositories may require extra network and CPU '
201 b'repositories may require extra network and CPU '
202 b'resources, making "hg push" and "hg pull" slower'
202 b'resources, making "hg push" and "hg pull" slower'
203 )
203 )
204
204
205 upgrademessage = _(
205 upgrademessage = _(
206 b'repository storage will be able to create '
206 b'repository storage will be able to create '
207 b'optimal deltas; new repository data will be '
207 b'optimal deltas; new repository data will be '
208 b'smaller and read times should decrease; '
208 b'smaller and read times should decrease; '
209 b'interacting with other repositories using this '
209 b'interacting with other repositories using this '
210 b'storage model should require less network and '
210 b'storage model should require less network and '
211 b'CPU resources, making "hg push" and "hg pull" '
211 b'CPU resources, making "hg push" and "hg pull" '
212 b'faster'
212 b'faster'
213 )
213 )
214
214
215
215
216 @registerformatvariant
216 @registerformatvariant
217 class sharesafe(requirementformatvariant):
217 class sharesafe(requirementformatvariant):
218 name = b'exp-sharesafe'
218 name = b'exp-sharesafe'
219 _requirement = requirements.SHARESAFE_REQUIREMENT
219 _requirement = requirements.SHARESAFE_REQUIREMENT
220
220
221 default = False
221 default = False
222
222
223 description = _(
223 description = _(
224 b'old shared repositories do not share source repository '
224 b'old shared repositories do not share source repository '
225 b'requirements and config. This leads to various problems '
225 b'requirements and config. This leads to various problems '
226 b'when the source repository format is upgraded or some new '
226 b'when the source repository format is upgraded or some new '
227 b'extensions are enabled.'
227 b'extensions are enabled.'
228 )
228 )
229
229
230 upgrademessage = _(
230 upgrademessage = _(
231 b'Upgrades a repository to share-safe format so that future '
231 b'Upgrades a repository to share-safe format so that future '
232 b'shares of this repository share its requirements and configs.'
232 b'shares of this repository share its requirements and configs.'
233 )
233 )
234
234
235
235
236 @registerformatvariant
236 @registerformatvariant
237 class sparserevlog(requirementformatvariant):
237 class sparserevlog(requirementformatvariant):
238 name = b'sparserevlog'
238 name = b'sparserevlog'
239
239
240 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
240 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
241
241
242 default = True
242 default = True
243
243
244 description = _(
244 description = _(
245 b'in order to limit disk reading and memory usage on older '
245 b'in order to limit disk reading and memory usage on older '
246 b'version, the span of a delta chain from its root to its '
246 b'version, the span of a delta chain from its root to its '
247 b'end is limited, whatever the relevant data in this span. '
247 b'end is limited, whatever the relevant data in this span. '
248 b'This can severly limit Mercurial ability to build good '
248 b'This can severly limit Mercurial ability to build good '
249 b'chain of delta resulting is much more storage space being '
249 b'chain of delta resulting is much more storage space being '
250 b'taken and limit reusability of on disk delta during '
250 b'taken and limit reusability of on disk delta during '
251 b'exchange.'
251 b'exchange.'
252 )
252 )
253
253
254 upgrademessage = _(
254 upgrademessage = _(
255 b'Revlog supports delta chain with more unused data '
255 b'Revlog supports delta chain with more unused data '
256 b'between payload. These gaps will be skipped at read '
256 b'between payload. These gaps will be skipped at read '
257 b'time. This allows for better delta chains, making a '
257 b'time. This allows for better delta chains, making a '
258 b'better compression and faster exchange with server.'
258 b'better compression and faster exchange with server.'
259 )
259 )
260
260
261
261
262 @registerformatvariant
262 @registerformatvariant
263 class sidedata(requirementformatvariant):
263 class sidedata(requirementformatvariant):
264 name = b'sidedata'
264 name = b'sidedata'
265
265
266 _requirement = requirements.SIDEDATA_REQUIREMENT
266 _requirement = requirements.SIDEDATA_REQUIREMENT
267
267
268 default = False
268 default = False
269
269
270 description = _(
270 description = _(
271 b'Allows storage of extra data alongside a revision, '
271 b'Allows storage of extra data alongside a revision, '
272 b'unlocking various caching options.'
272 b'unlocking various caching options.'
273 )
273 )
274
274
275 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
275 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
276
276
277
277
278 @registerformatvariant
278 @registerformatvariant
279 class persistentnodemap(requirementformatvariant):
279 class persistentnodemap(requirementformatvariant):
280 name = b'persistent-nodemap'
280 name = b'persistent-nodemap'
281
281
282 _requirement = requirements.NODEMAP_REQUIREMENT
282 _requirement = requirements.NODEMAP_REQUIREMENT
283
283
284 default = False
284 default = False
285
285
286 description = _(
286 description = _(
287 b'persist the node -> rev mapping on disk to speedup lookup'
287 b'persist the node -> rev mapping on disk to speedup lookup'
288 )
288 )
289
289
290 upgrademessage = _(b'Speedup revision lookup by node id.')
290 upgrademessage = _(b'Speedup revision lookup by node id.')
291
291
292
292
293 @registerformatvariant
293 @registerformatvariant
294 class copiessdc(requirementformatvariant):
294 class copiessdc(requirementformatvariant):
295 name = b'copies-sdc'
295 name = b'copies-sdc'
296
296
297 _requirement = requirements.COPIESSDC_REQUIREMENT
297 _requirement = requirements.COPIESSDC_REQUIREMENT
298
298
299 default = False
299 default = False
300
300
301 description = _(b'Stores copies information alongside changesets.')
301 description = _(b'Stores copies information alongside changesets.')
302
302
303 upgrademessage = _(
303 upgrademessage = _(
304 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
304 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
305 )
305 )
306
306
307
307
308 @registerformatvariant
308 @registerformatvariant
309 class removecldeltachain(formatvariant):
309 class removecldeltachain(formatvariant):
310 name = b'plain-cl-delta'
310 name = b'plain-cl-delta'
311
311
312 default = True
312 default = True
313
313
314 description = _(
314 description = _(
315 b'changelog storage is using deltas instead of '
315 b'changelog storage is using deltas instead of '
316 b'raw entries; changelog reading and any '
316 b'raw entries; changelog reading and any '
317 b'operation relying on changelog data are slower '
317 b'operation relying on changelog data are slower '
318 b'than they could be'
318 b'than they could be'
319 )
319 )
320
320
321 upgrademessage = _(
321 upgrademessage = _(
322 b'changelog storage will be reformated to '
322 b'changelog storage will be reformated to '
323 b'store raw entries; changelog reading will be '
323 b'store raw entries; changelog reading will be '
324 b'faster; changelog size may be reduced'
324 b'faster; changelog size may be reduced'
325 )
325 )
326
326
327 @staticmethod
327 @staticmethod
328 def fromrepo(repo):
328 def fromrepo(repo):
329 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
329 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 # changelogs with deltas.
330 # changelogs with deltas.
331 cl = repo.changelog
331 cl = repo.changelog
332 chainbase = cl.chainbase
332 chainbase = cl.chainbase
333 return all(rev == chainbase(rev) for rev in cl)
333 return all(rev == chainbase(rev) for rev in cl)
334
334
335 @staticmethod
335 @staticmethod
336 def fromconfig(repo):
336 def fromconfig(repo):
337 return True
337 return True
338
338
339
339
340 @registerformatvariant
340 @registerformatvariant
341 class compressionengine(formatvariant):
341 class compressionengine(formatvariant):
342 name = b'compression'
342 name = b'compression'
343 default = b'zlib'
343 default = b'zlib'
344
344
345 description = _(
345 description = _(
346 b'Compresion algorithm used to compress data. '
346 b'Compresion algorithm used to compress data. '
347 b'Some engine are faster than other'
347 b'Some engine are faster than other'
348 )
348 )
349
349
350 upgrademessage = _(
350 upgrademessage = _(
351 b'revlog content will be recompressed with the new algorithm.'
351 b'revlog content will be recompressed with the new algorithm.'
352 )
352 )
353
353
354 @classmethod
354 @classmethod
355 def fromrepo(cls, repo):
355 def fromrepo(cls, repo):
356 # we allow multiple compression engine requirement to co-exist because
356 # we allow multiple compression engine requirement to co-exist because
357 # strickly speaking, revlog seems to support mixed compression style.
357 # strickly speaking, revlog seems to support mixed compression style.
358 #
358 #
359 # The compression used for new entries will be "the last one"
359 # The compression used for new entries will be "the last one"
360 compression = b'zlib'
360 compression = b'zlib'
361 for req in repo.requirements:
361 for req in repo.requirements:
362 prefix = req.startswith
362 prefix = req.startswith
363 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
363 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
364 compression = req.split(b'-', 2)[2]
364 compression = req.split(b'-', 2)[2]
365 return compression
365 return compression
366
366
367 @classmethod
367 @classmethod
368 def fromconfig(cls, repo):
368 def fromconfig(cls, repo):
369 compengines = repo.ui.configlist(b'format', b'revlog-compression')
369 compengines = repo.ui.configlist(b'format', b'revlog-compression')
370 # return the first valid value as the selection code would do
370 # return the first valid value as the selection code would do
371 for comp in compengines:
371 for comp in compengines:
372 if comp in util.compengines:
372 if comp in util.compengines:
373 return comp
373 return comp
374
374
375 # no valide compression found lets display it all for clarity
375 # no valide compression found lets display it all for clarity
376 return b','.join(compengines)
376 return b','.join(compengines)
377
377
378
378
379 @registerformatvariant
379 @registerformatvariant
380 class compressionlevel(formatvariant):
380 class compressionlevel(formatvariant):
381 name = b'compression-level'
381 name = b'compression-level'
382 default = b'default'
382 default = b'default'
383
383
384 description = _(b'compression level')
384 description = _(b'compression level')
385
385
386 upgrademessage = _(b'revlog content will be recompressed')
386 upgrademessage = _(b'revlog content will be recompressed')
387
387
388 @classmethod
388 @classmethod
389 def fromrepo(cls, repo):
389 def fromrepo(cls, repo):
390 comp = compressionengine.fromrepo(repo)
390 comp = compressionengine.fromrepo(repo)
391 level = None
391 level = None
392 if comp == b'zlib':
392 if comp == b'zlib':
393 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
393 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
394 elif comp == b'zstd':
394 elif comp == b'zstd':
395 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
395 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
396 if level is None:
396 if level is None:
397 return b'default'
397 return b'default'
398 return bytes(level)
398 return bytes(level)
399
399
400 @classmethod
400 @classmethod
401 def fromconfig(cls, repo):
401 def fromconfig(cls, repo):
402 comp = compressionengine.fromconfig(repo)
402 comp = compressionengine.fromconfig(repo)
403 level = None
403 level = None
404 if comp == b'zlib':
404 if comp == b'zlib':
405 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
405 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
406 elif comp == b'zstd':
406 elif comp == b'zstd':
407 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
407 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
408 if level is None:
408 if level is None:
409 return b'default'
409 return b'default'
410 return bytes(level)
410 return bytes(level)
411
411
412
412
413 def finddeficiencies(repo):
413 def finddeficiencies(repo):
414 """returns a list of deficiencies that the repo suffer from"""
414 """returns a list of deficiencies that the repo suffer from"""
415 deficiencies = []
415 deficiencies = []
416
416
417 # We could detect lack of revlogv1 and store here, but they were added
417 # We could detect lack of revlogv1 and store here, but they were added
418 # in 0.9.2 and we don't support upgrading repos without these
418 # in 0.9.2 and we don't support upgrading repos without these
419 # requirements, so let's not bother.
419 # requirements, so let's not bother.
420
420
421 for fv in allformatvariant:
421 for fv in allformatvariant:
422 if not fv.fromrepo(repo):
422 if not fv.fromrepo(repo):
423 deficiencies.append(fv)
423 deficiencies.append(fv)
424
424
425 return deficiencies
425 return deficiencies
426
426
427
427
428 ALL_OPTIMISATIONS = []
428 ALL_OPTIMISATIONS = []
429
429
430
430
431 def register_optimization(obj):
431 def register_optimization(obj):
432 ALL_OPTIMISATIONS.append(obj)
432 ALL_OPTIMISATIONS.append(obj)
433 return obj
433 return obj
434
434
435
435
436 register_optimization(
436 register_optimization(
437 improvement(
437 improvement(
438 name=b're-delta-parent',
438 name=b're-delta-parent',
439 type=OPTIMISATION,
439 type=OPTIMISATION,
440 description=_(
440 description=_(
441 b'deltas within internal storage will be recalculated to '
441 b'deltas within internal storage will be recalculated to '
442 b'choose an optimal base revision where this was not '
442 b'choose an optimal base revision where this was not '
443 b'already done; the size of the repository may shrink and '
443 b'already done; the size of the repository may shrink and '
444 b'various operations may become faster; the first time '
444 b'various operations may become faster; the first time '
445 b'this optimization is performed could slow down upgrade '
445 b'this optimization is performed could slow down upgrade '
446 b'execution considerably; subsequent invocations should '
446 b'execution considerably; subsequent invocations should '
447 b'not run noticeably slower'
447 b'not run noticeably slower'
448 ),
448 ),
449 upgrademessage=_(
449 upgrademessage=_(
450 b'deltas within internal storage will choose a new '
450 b'deltas within internal storage will choose a new '
451 b'base revision if needed'
451 b'base revision if needed'
452 ),
452 ),
453 )
453 )
454 )
454 )
455
455
456 register_optimization(
456 register_optimization(
457 improvement(
457 improvement(
458 name=b're-delta-multibase',
458 name=b're-delta-multibase',
459 type=OPTIMISATION,
459 type=OPTIMISATION,
460 description=_(
460 description=_(
461 b'deltas within internal storage will be recalculated '
461 b'deltas within internal storage will be recalculated '
462 b'against multiple base revision and the smallest '
462 b'against multiple base revision and the smallest '
463 b'difference will be used; the size of the repository may '
463 b'difference will be used; the size of the repository may '
464 b'shrink significantly when there are many merges; this '
464 b'shrink significantly when there are many merges; this '
465 b'optimization will slow down execution in proportion to '
465 b'optimization will slow down execution in proportion to '
466 b'the number of merges in the repository and the amount '
466 b'the number of merges in the repository and the amount '
467 b'of files in the repository; this slow down should not '
467 b'of files in the repository; this slow down should not '
468 b'be significant unless there are tens of thousands of '
468 b'be significant unless there are tens of thousands of '
469 b'files and thousands of merges'
469 b'files and thousands of merges'
470 ),
470 ),
471 upgrademessage=_(
471 upgrademessage=_(
472 b'deltas within internal storage will choose an '
472 b'deltas within internal storage will choose an '
473 b'optimal delta by computing deltas against multiple '
473 b'optimal delta by computing deltas against multiple '
474 b'parents; may slow down execution time '
474 b'parents; may slow down execution time '
475 b'significantly'
475 b'significantly'
476 ),
476 ),
477 )
477 )
478 )
478 )
479
479
480 register_optimization(
480 register_optimization(
481 improvement(
481 improvement(
482 name=b're-delta-all',
482 name=b're-delta-all',
483 type=OPTIMISATION,
483 type=OPTIMISATION,
484 description=_(
484 description=_(
485 b'deltas within internal storage will always be '
485 b'deltas within internal storage will always be '
486 b'recalculated without reusing prior deltas; this will '
486 b'recalculated without reusing prior deltas; this will '
487 b'likely make execution run several times slower; this '
487 b'likely make execution run several times slower; this '
488 b'optimization is typically not needed'
488 b'optimization is typically not needed'
489 ),
489 ),
490 upgrademessage=_(
490 upgrademessage=_(
491 b'deltas within internal storage will be fully '
491 b'deltas within internal storage will be fully '
492 b'recomputed; this will likely drastically slow down '
492 b'recomputed; this will likely drastically slow down '
493 b'execution time'
493 b'execution time'
494 ),
494 ),
495 )
495 )
496 )
496 )
497
497
498 register_optimization(
498 register_optimization(
499 improvement(
499 improvement(
500 name=b're-delta-fulladd',
500 name=b're-delta-fulladd',
501 type=OPTIMISATION,
501 type=OPTIMISATION,
502 description=_(
502 description=_(
503 b'every revision will be re-added as if it was new '
503 b'every revision will be re-added as if it was new '
504 b'content. It will go through the full storage '
504 b'content. It will go through the full storage '
505 b'mechanism giving extensions a chance to process it '
505 b'mechanism giving extensions a chance to process it '
506 b'(eg. lfs). This is similar to "re-delta-all" but even '
506 b'(eg. lfs). This is similar to "re-delta-all" but even '
507 b'slower since more logic is involved.'
507 b'slower since more logic is involved.'
508 ),
508 ),
509 upgrademessage=_(
509 upgrademessage=_(
510 b'each revision will be added as new content to the '
510 b'each revision will be added as new content to the '
511 b'internal storage; this will likely drastically slow '
511 b'internal storage; this will likely drastically slow '
512 b'down execution time, but some extensions might need '
512 b'down execution time, but some extensions might need '
513 b'it'
513 b'it'
514 ),
514 ),
515 )
515 )
516 )
516 )
517
517
518
518
519 def findoptimizations(repo):
519 def findoptimizations(repo):
520 """Determine optimisation that could be used during upgrade"""
520 """Determine optimisation that could be used during upgrade"""
521 # These are unconditionally added. There is logic later that figures out
521 # These are unconditionally added. There is logic later that figures out
522 # which ones to apply.
522 # which ones to apply.
523 return list(ALL_OPTIMISATIONS)
523 return list(ALL_OPTIMISATIONS)
524
524
525
525
526 def determineactions(repo, deficiencies, sourcereqs, destreqs):
526 def determineactions(repo, deficiencies, sourcereqs, destreqs):
527 """Determine upgrade actions that will be performed.
527 """Determine upgrade actions that will be performed.
528
528
529 Given a list of improvements as returned by ``finddeficiencies`` and
529 Given a list of improvements as returned by ``finddeficiencies`` and
530 ``findoptimizations``, determine the list of upgrade actions that
530 ``findoptimizations``, determine the list of upgrade actions that
531 will be performed.
531 will be performed.
532
532
533 The role of this function is to filter improvements if needed, apply
533 The role of this function is to filter improvements if needed, apply
534 recommended optimizations from the improvements list that make sense,
534 recommended optimizations from the improvements list that make sense,
535 etc.
535 etc.
536
536
537 Returns a list of action names.
537 Returns a list of action names.
538 """
538 """
539 newactions = []
539 newactions = []
540
540
541 for d in deficiencies:
541 for d in deficiencies:
542 name = d._requirement
542 name = d._requirement
543
543
544 # If the action is a requirement that doesn't show up in the
544 # If the action is a requirement that doesn't show up in the
545 # destination requirements, prune the action.
545 # destination requirements, prune the action.
546 if name is not None and name not in destreqs:
546 if name is not None and name not in destreqs:
547 continue
547 continue
548
548
549 newactions.append(d)
549 newactions.append(d)
550
550
551 # FUTURE consider adding some optimizations here for certain transitions.
551 # FUTURE consider adding some optimizations here for certain transitions.
552 # e.g. adding generaldelta could schedule parent redeltas.
552 # e.g. adding generaldelta could schedule parent redeltas.
553
553
554 return newactions
554 return newactions
555
555
556
556
557 class UpgradeOperation(object):
558 """represent the work to be done during an upgrade"""
559
560 def __init__(self, requirements, actions, revlogs_to_process):
561 self.requirements = requirements
562 self.actions = actions
563 self.revlogs_to_process = revlogs_to_process
564
565
557 ### Code checking if a repository can got through the upgrade process at all. #
566 ### Code checking if a repository can got through the upgrade process at all. #
558
567
559
568
560 def requiredsourcerequirements(repo):
569 def requiredsourcerequirements(repo):
561 """Obtain requirements required to be present to upgrade a repo.
570 """Obtain requirements required to be present to upgrade a repo.
562
571
563 An upgrade will not be allowed if the repository doesn't have the
572 An upgrade will not be allowed if the repository doesn't have the
564 requirements returned by this function.
573 requirements returned by this function.
565 """
574 """
566 return {
575 return {
567 # Introduced in Mercurial 0.9.2.
576 # Introduced in Mercurial 0.9.2.
568 b'revlogv1',
577 b'revlogv1',
569 # Introduced in Mercurial 0.9.2.
578 # Introduced in Mercurial 0.9.2.
570 b'store',
579 b'store',
571 }
580 }
572
581
573
582
574 def blocksourcerequirements(repo):
583 def blocksourcerequirements(repo):
575 """Obtain requirements that will prevent an upgrade from occurring.
584 """Obtain requirements that will prevent an upgrade from occurring.
576
585
577 An upgrade cannot be performed if the source repository contains a
586 An upgrade cannot be performed if the source repository contains a
578 requirements in the returned set.
587 requirements in the returned set.
579 """
588 """
580 return {
589 return {
581 # The upgrade code does not yet support these experimental features.
590 # The upgrade code does not yet support these experimental features.
582 # This is an artificial limitation.
591 # This is an artificial limitation.
583 requirements.TREEMANIFEST_REQUIREMENT,
592 requirements.TREEMANIFEST_REQUIREMENT,
584 # This was a precursor to generaldelta and was never enabled by default.
593 # This was a precursor to generaldelta and was never enabled by default.
585 # It should (hopefully) not exist in the wild.
594 # It should (hopefully) not exist in the wild.
586 b'parentdelta',
595 b'parentdelta',
587 # Upgrade should operate on the actual store, not the shared link.
596 # Upgrade should operate on the actual store, not the shared link.
588 requirements.SHARED_REQUIREMENT,
597 requirements.SHARED_REQUIREMENT,
589 }
598 }
590
599
591
600
592 def check_source_requirements(repo):
601 def check_source_requirements(repo):
593 """Ensure that no existing requirements prevent the repository upgrade"""
602 """Ensure that no existing requirements prevent the repository upgrade"""
594
603
595 required = requiredsourcerequirements(repo)
604 required = requiredsourcerequirements(repo)
596 missingreqs = required - repo.requirements
605 missingreqs = required - repo.requirements
597 if missingreqs:
606 if missingreqs:
598 msg = _(b'cannot upgrade repository; requirement missing: %s')
607 msg = _(b'cannot upgrade repository; requirement missing: %s')
599 missingreqs = b', '.join(sorted(missingreqs))
608 missingreqs = b', '.join(sorted(missingreqs))
600 raise error.Abort(msg % missingreqs)
609 raise error.Abort(msg % missingreqs)
601
610
602 blocking = blocksourcerequirements(repo)
611 blocking = blocksourcerequirements(repo)
603 blockingreqs = blocking & repo.requirements
612 blockingreqs = blocking & repo.requirements
604 if blockingreqs:
613 if blockingreqs:
605 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
614 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
606 blockingreqs = b', '.join(sorted(blockingreqs))
615 blockingreqs = b', '.join(sorted(blockingreqs))
607 raise error.Abort(m % blockingreqs)
616 raise error.Abort(m % blockingreqs)
608
617
609
618
610 ### Verify the validity of the planned requirement changes ####################
619 ### Verify the validity of the planned requirement changes ####################
611
620
612
621
613 def supportremovedrequirements(repo):
622 def supportremovedrequirements(repo):
614 """Obtain requirements that can be removed during an upgrade.
623 """Obtain requirements that can be removed during an upgrade.
615
624
616 If an upgrade were to create a repository that dropped a requirement,
625 If an upgrade were to create a repository that dropped a requirement,
617 the dropped requirement must appear in the returned set for the upgrade
626 the dropped requirement must appear in the returned set for the upgrade
618 to be allowed.
627 to be allowed.
619 """
628 """
620 supported = {
629 supported = {
621 requirements.SPARSEREVLOG_REQUIREMENT,
630 requirements.SPARSEREVLOG_REQUIREMENT,
622 requirements.SIDEDATA_REQUIREMENT,
631 requirements.SIDEDATA_REQUIREMENT,
623 requirements.COPIESSDC_REQUIREMENT,
632 requirements.COPIESSDC_REQUIREMENT,
624 requirements.NODEMAP_REQUIREMENT,
633 requirements.NODEMAP_REQUIREMENT,
625 requirements.SHARESAFE_REQUIREMENT,
634 requirements.SHARESAFE_REQUIREMENT,
626 }
635 }
627 for name in compression.compengines:
636 for name in compression.compengines:
628 engine = compression.compengines[name]
637 engine = compression.compengines[name]
629 if engine.available() and engine.revlogheader():
638 if engine.available() and engine.revlogheader():
630 supported.add(b'exp-compression-%s' % name)
639 supported.add(b'exp-compression-%s' % name)
631 if engine.name() == b'zstd':
640 if engine.name() == b'zstd':
632 supported.add(b'revlog-compression-zstd')
641 supported.add(b'revlog-compression-zstd')
633 return supported
642 return supported
634
643
635
644
636 def supporteddestrequirements(repo):
645 def supporteddestrequirements(repo):
637 """Obtain requirements that upgrade supports in the destination.
646 """Obtain requirements that upgrade supports in the destination.
638
647
639 If the result of the upgrade would create requirements not in this set,
648 If the result of the upgrade would create requirements not in this set,
640 the upgrade is disallowed.
649 the upgrade is disallowed.
641
650
642 Extensions should monkeypatch this to add their custom requirements.
651 Extensions should monkeypatch this to add their custom requirements.
643 """
652 """
644 supported = {
653 supported = {
645 b'dotencode',
654 b'dotencode',
646 b'fncache',
655 b'fncache',
647 b'generaldelta',
656 b'generaldelta',
648 b'revlogv1',
657 b'revlogv1',
649 b'store',
658 b'store',
650 requirements.SPARSEREVLOG_REQUIREMENT,
659 requirements.SPARSEREVLOG_REQUIREMENT,
651 requirements.SIDEDATA_REQUIREMENT,
660 requirements.SIDEDATA_REQUIREMENT,
652 requirements.COPIESSDC_REQUIREMENT,
661 requirements.COPIESSDC_REQUIREMENT,
653 requirements.NODEMAP_REQUIREMENT,
662 requirements.NODEMAP_REQUIREMENT,
654 requirements.SHARESAFE_REQUIREMENT,
663 requirements.SHARESAFE_REQUIREMENT,
655 }
664 }
656 for name in compression.compengines:
665 for name in compression.compengines:
657 engine = compression.compengines[name]
666 engine = compression.compengines[name]
658 if engine.available() and engine.revlogheader():
667 if engine.available() and engine.revlogheader():
659 supported.add(b'exp-compression-%s' % name)
668 supported.add(b'exp-compression-%s' % name)
660 if engine.name() == b'zstd':
669 if engine.name() == b'zstd':
661 supported.add(b'revlog-compression-zstd')
670 supported.add(b'revlog-compression-zstd')
662 return supported
671 return supported
663
672
664
673
665 def allowednewrequirements(repo):
674 def allowednewrequirements(repo):
666 """Obtain requirements that can be added to a repository during upgrade.
675 """Obtain requirements that can be added to a repository during upgrade.
667
676
668 This is used to disallow proposed requirements from being added when
677 This is used to disallow proposed requirements from being added when
669 they weren't present before.
678 they weren't present before.
670
679
671 We use a list of allowed requirement additions instead of a list of known
680 We use a list of allowed requirement additions instead of a list of known
672 bad additions because the whitelist approach is safer and will prevent
681 bad additions because the whitelist approach is safer and will prevent
673 future, unknown requirements from accidentally being added.
682 future, unknown requirements from accidentally being added.
674 """
683 """
675 supported = {
684 supported = {
676 b'dotencode',
685 b'dotencode',
677 b'fncache',
686 b'fncache',
678 b'generaldelta',
687 b'generaldelta',
679 requirements.SPARSEREVLOG_REQUIREMENT,
688 requirements.SPARSEREVLOG_REQUIREMENT,
680 requirements.SIDEDATA_REQUIREMENT,
689 requirements.SIDEDATA_REQUIREMENT,
681 requirements.COPIESSDC_REQUIREMENT,
690 requirements.COPIESSDC_REQUIREMENT,
682 requirements.NODEMAP_REQUIREMENT,
691 requirements.NODEMAP_REQUIREMENT,
683 requirements.SHARESAFE_REQUIREMENT,
692 requirements.SHARESAFE_REQUIREMENT,
684 }
693 }
685 for name in compression.compengines:
694 for name in compression.compengines:
686 engine = compression.compengines[name]
695 engine = compression.compengines[name]
687 if engine.available() and engine.revlogheader():
696 if engine.available() and engine.revlogheader():
688 supported.add(b'exp-compression-%s' % name)
697 supported.add(b'exp-compression-%s' % name)
689 if engine.name() == b'zstd':
698 if engine.name() == b'zstd':
690 supported.add(b'revlog-compression-zstd')
699 supported.add(b'revlog-compression-zstd')
691 return supported
700 return supported
692
701
693
702
694 def check_requirements_changes(repo, new_reqs):
703 def check_requirements_changes(repo, new_reqs):
695 old_reqs = repo.requirements
704 old_reqs = repo.requirements
696
705
697 support_removal = supportremovedrequirements(repo)
706 support_removal = supportremovedrequirements(repo)
698 no_remove_reqs = old_reqs - new_reqs - support_removal
707 no_remove_reqs = old_reqs - new_reqs - support_removal
699 if no_remove_reqs:
708 if no_remove_reqs:
700 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
709 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
701 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
710 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
702 raise error.Abort(msg % no_remove_reqs)
711 raise error.Abort(msg % no_remove_reqs)
703
712
704 support_addition = allowednewrequirements(repo)
713 support_addition = allowednewrequirements(repo)
705 no_add_reqs = new_reqs - old_reqs - support_addition
714 no_add_reqs = new_reqs - old_reqs - support_addition
706 if no_add_reqs:
715 if no_add_reqs:
707 m = _(b'cannot upgrade repository; do not support adding requirement: ')
716 m = _(b'cannot upgrade repository; do not support adding requirement: ')
708 no_add_reqs = b', '.join(sorted(no_add_reqs))
717 no_add_reqs = b', '.join(sorted(no_add_reqs))
709 raise error.Abort(m + no_add_reqs)
718 raise error.Abort(m + no_add_reqs)
710
719
711 supported = supporteddestrequirements(repo)
720 supported = supporteddestrequirements(repo)
712 unsupported_reqs = new_reqs - supported
721 unsupported_reqs = new_reqs - supported
713 if unsupported_reqs:
722 if unsupported_reqs:
714 msg = _(
723 msg = _(
715 b'cannot upgrade repository; do not support destination '
724 b'cannot upgrade repository; do not support destination '
716 b'requirement: %s'
725 b'requirement: %s'
717 )
726 )
718 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
727 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
719 raise error.Abort(msg % unsupported_reqs)
728 raise error.Abort(msg % unsupported_reqs)
@@ -1,500 +1,500 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..pycompat import getattr
13 from ..pycompat import getattr
14 from .. import (
14 from .. import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 manifest,
18 manifest,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 revlog,
22 revlog,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27
27
28
28
29 def _revlogfrompath(repo, path):
29 def _revlogfrompath(repo, path):
30 """Obtain a revlog from a repo path.
30 """Obtain a revlog from a repo path.
31
31
32 An instance of the appropriate class is returned.
32 An instance of the appropriate class is returned.
33 """
33 """
34 if path == b'00changelog.i':
34 if path == b'00changelog.i':
35 return changelog.changelog(repo.svfs)
35 return changelog.changelog(repo.svfs)
36 elif path.endswith(b'00manifest.i'):
36 elif path.endswith(b'00manifest.i'):
37 mandir = path[: -len(b'00manifest.i')]
37 mandir = path[: -len(b'00manifest.i')]
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 else:
39 else:
40 # reverse of "/".join(("data", path + ".i"))
40 # reverse of "/".join(("data", path + ".i"))
41 return filelog.filelog(repo.svfs, path[5:-2])
41 return filelog.filelog(repo.svfs, path[5:-2])
42
42
43
43
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 """copy all relevant files for `oldrl` into `destrepo` store
45 """copy all relevant files for `oldrl` into `destrepo` store
46
46
47 Files are copied "as is" without any transformation. The copy is performed
47 Files are copied "as is" without any transformation. The copy is performed
48 without extra checks. Callers are responsible for making sure the copied
48 without extra checks. Callers are responsible for making sure the copied
49 content is compatible with format of the destination repository.
49 content is compatible with format of the destination repository.
50 """
50 """
51 oldrl = getattr(oldrl, '_revlog', oldrl)
51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 newrl = _revlogfrompath(destrepo, unencodedname)
52 newrl = _revlogfrompath(destrepo, unencodedname)
53 newrl = getattr(newrl, '_revlog', newrl)
53 newrl = getattr(newrl, '_revlog', newrl)
54
54
55 oldvfs = oldrl.opener
55 oldvfs = oldrl.opener
56 newvfs = newrl.opener
56 newvfs = newrl.opener
57 oldindex = oldvfs.join(oldrl.indexfile)
57 oldindex = oldvfs.join(oldrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
59 olddata = oldvfs.join(oldrl.datafile)
59 olddata = oldvfs.join(oldrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
61
61
62 with newvfs(newrl.indexfile, b'w'):
62 with newvfs(newrl.indexfile, b'w'):
63 pass # create all the directories
63 pass # create all the directories
64
64
65 util.copyfile(oldindex, newindex)
65 util.copyfile(oldindex, newindex)
66 copydata = oldrl.opener.exists(oldrl.datafile)
66 copydata = oldrl.opener.exists(oldrl.datafile)
67 if copydata:
67 if copydata:
68 util.copyfile(olddata, newdata)
68 util.copyfile(olddata, newdata)
69
69
70 if not (
70 if not (
71 unencodedname.endswith(b'00changelog.i')
71 unencodedname.endswith(b'00changelog.i')
72 or unencodedname.endswith(b'00manifest.i')
72 or unencodedname.endswith(b'00manifest.i')
73 ):
73 ):
74 destrepo.svfs.fncache.add(unencodedname)
74 destrepo.svfs.fncache.add(unencodedname)
75 if copydata:
75 if copydata:
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77
77
78
78
79 UPGRADE_CHANGELOG = b"changelog"
79 UPGRADE_CHANGELOG = b"changelog"
80 UPGRADE_MANIFEST = b"manifest"
80 UPGRADE_MANIFEST = b"manifest"
81 UPGRADE_FILELOGS = b"all-filelogs"
81 UPGRADE_FILELOGS = b"all-filelogs"
82
82
83 UPGRADE_ALL_REVLOGS = frozenset(
83 UPGRADE_ALL_REVLOGS = frozenset(
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 )
85 )
86
86
87
87
88 def getsidedatacompanion(srcrepo, dstrepo):
88 def getsidedatacompanion(srcrepo, dstrepo):
89 sidedatacompanion = None
89 sidedatacompanion = None
90 removedreqs = srcrepo.requirements - dstrepo.requirements
90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93
93
94 def sidedatacompanion(rl, rev):
94 def sidedatacompanion(rl, rev):
95 rl = getattr(rl, '_revlog', rl)
95 rl = getattr(rl, '_revlog', rl)
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 return True, (), {}, 0, 0
97 return True, (), {}, 0, 0
98 return False, (), {}, 0, 0
98 return False, (), {}, 0, 0
99
99
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 return sidedatacompanion
104 return sidedatacompanion
105
105
106
106
107 def matchrevlog(revlogfilter, entry):
107 def matchrevlog(revlogfilter, entry):
108 """check if a revlog is selected for cloning.
108 """check if a revlog is selected for cloning.
109
109
110 In other words, are there any updates which need to be done on revlog
110 In other words, are there any updates which need to be done on revlog
111 or it can be blindly copied.
111 or it can be blindly copied.
112
112
113 The store entry is checked against the passed filter"""
113 The store entry is checked against the passed filter"""
114 if entry.endswith(b'00changelog.i'):
114 if entry.endswith(b'00changelog.i'):
115 return UPGRADE_CHANGELOG in revlogfilter
115 return UPGRADE_CHANGELOG in revlogfilter
116 elif entry.endswith(b'00manifest.i'):
116 elif entry.endswith(b'00manifest.i'):
117 return UPGRADE_MANIFEST in revlogfilter
117 return UPGRADE_MANIFEST in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
119
119
120
120
121 def _clonerevlogs(
121 def _clonerevlogs(
122 ui,
122 ui,
123 srcrepo,
123 srcrepo,
124 dstrepo,
124 dstrepo,
125 tr,
125 tr,
126 deltareuse,
126 deltareuse,
127 forcedeltabothparents,
127 forcedeltabothparents,
128 revlogs=UPGRADE_ALL_REVLOGS,
128 revlogs=UPGRADE_ALL_REVLOGS,
129 ):
129 ):
130 """Copy revlogs between 2 repos."""
130 """Copy revlogs between 2 repos."""
131 revcount = 0
131 revcount = 0
132 srcsize = 0
132 srcsize = 0
133 srcrawsize = 0
133 srcrawsize = 0
134 dstsize = 0
134 dstsize = 0
135 fcount = 0
135 fcount = 0
136 frevcount = 0
136 frevcount = 0
137 fsrcsize = 0
137 fsrcsize = 0
138 frawsize = 0
138 frawsize = 0
139 fdstsize = 0
139 fdstsize = 0
140 mcount = 0
140 mcount = 0
141 mrevcount = 0
141 mrevcount = 0
142 msrcsize = 0
142 msrcsize = 0
143 mrawsize = 0
143 mrawsize = 0
144 mdstsize = 0
144 mdstsize = 0
145 crevcount = 0
145 crevcount = 0
146 csrcsize = 0
146 csrcsize = 0
147 crawsize = 0
147 crawsize = 0
148 cdstsize = 0
148 cdstsize = 0
149
149
150 alldatafiles = list(srcrepo.store.walk())
150 alldatafiles = list(srcrepo.store.walk())
151
151
152 # Perform a pass to collect metadata. This validates we can open all
152 # Perform a pass to collect metadata. This validates we can open all
153 # source files and allows a unified progress bar to be displayed.
153 # source files and allows a unified progress bar to be displayed.
154 for unencoded, encoded, size in alldatafiles:
154 for unencoded, encoded, size in alldatafiles:
155 if unencoded.endswith(b'.d'):
155 if unencoded.endswith(b'.d'):
156 continue
156 continue
157
157
158 rl = _revlogfrompath(srcrepo, unencoded)
158 rl = _revlogfrompath(srcrepo, unencoded)
159
159
160 info = rl.storageinfo(
160 info = rl.storageinfo(
161 exclusivefiles=True,
161 exclusivefiles=True,
162 revisionscount=True,
162 revisionscount=True,
163 trackedsize=True,
163 trackedsize=True,
164 storedsize=True,
164 storedsize=True,
165 )
165 )
166
166
167 revcount += info[b'revisionscount'] or 0
167 revcount += info[b'revisionscount'] or 0
168 datasize = info[b'storedsize'] or 0
168 datasize = info[b'storedsize'] or 0
169 rawsize = info[b'trackedsize'] or 0
169 rawsize = info[b'trackedsize'] or 0
170
170
171 srcsize += datasize
171 srcsize += datasize
172 srcrawsize += rawsize
172 srcrawsize += rawsize
173
173
174 # This is for the separate progress bars.
174 # This is for the separate progress bars.
175 if isinstance(rl, changelog.changelog):
175 if isinstance(rl, changelog.changelog):
176 crevcount += len(rl)
176 crevcount += len(rl)
177 csrcsize += datasize
177 csrcsize += datasize
178 crawsize += rawsize
178 crawsize += rawsize
179 elif isinstance(rl, manifest.manifestrevlog):
179 elif isinstance(rl, manifest.manifestrevlog):
180 mcount += 1
180 mcount += 1
181 mrevcount += len(rl)
181 mrevcount += len(rl)
182 msrcsize += datasize
182 msrcsize += datasize
183 mrawsize += rawsize
183 mrawsize += rawsize
184 elif isinstance(rl, filelog.filelog):
184 elif isinstance(rl, filelog.filelog):
185 fcount += 1
185 fcount += 1
186 frevcount += len(rl)
186 frevcount += len(rl)
187 fsrcsize += datasize
187 fsrcsize += datasize
188 frawsize += rawsize
188 frawsize += rawsize
189 else:
189 else:
190 error.ProgrammingError(b'unknown revlog type')
190 error.ProgrammingError(b'unknown revlog type')
191
191
192 if not revcount:
192 if not revcount:
193 return
193 return
194
194
195 ui.status(
195 ui.status(
196 _(
196 _(
197 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
197 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
198 b'%d in changelog)\n'
198 b'%d in changelog)\n'
199 )
199 )
200 % (revcount, frevcount, mrevcount, crevcount)
200 % (revcount, frevcount, mrevcount, crevcount)
201 )
201 )
202 ui.status(
202 ui.status(
203 _(b'migrating %s in store; %s tracked data\n')
203 _(b'migrating %s in store; %s tracked data\n')
204 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
204 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
205 )
205 )
206
206
207 # Used to keep track of progress.
207 # Used to keep track of progress.
208 progress = None
208 progress = None
209
209
210 def oncopiedrevision(rl, rev, node):
210 def oncopiedrevision(rl, rev, node):
211 progress.increment()
211 progress.increment()
212
212
213 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
213 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
214
214
215 # Do the actual copying.
215 # Do the actual copying.
216 # FUTURE this operation can be farmed off to worker processes.
216 # FUTURE this operation can be farmed off to worker processes.
217 seen = set()
217 seen = set()
218 for unencoded, encoded, size in alldatafiles:
218 for unencoded, encoded, size in alldatafiles:
219 if unencoded.endswith(b'.d'):
219 if unencoded.endswith(b'.d'):
220 continue
220 continue
221
221
222 oldrl = _revlogfrompath(srcrepo, unencoded)
222 oldrl = _revlogfrompath(srcrepo, unencoded)
223
223
224 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
224 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
225 ui.status(
225 ui.status(
226 _(
226 _(
227 b'finished migrating %d manifest revisions across %d '
227 b'finished migrating %d manifest revisions across %d '
228 b'manifests; change in size: %s\n'
228 b'manifests; change in size: %s\n'
229 )
229 )
230 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
230 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
231 )
231 )
232
232
233 ui.status(
233 ui.status(
234 _(
234 _(
235 b'migrating changelog containing %d revisions '
235 b'migrating changelog containing %d revisions '
236 b'(%s in store; %s tracked data)\n'
236 b'(%s in store; %s tracked data)\n'
237 )
237 )
238 % (
238 % (
239 crevcount,
239 crevcount,
240 util.bytecount(csrcsize),
240 util.bytecount(csrcsize),
241 util.bytecount(crawsize),
241 util.bytecount(crawsize),
242 )
242 )
243 )
243 )
244 seen.add(b'c')
244 seen.add(b'c')
245 progress = srcrepo.ui.makeprogress(
245 progress = srcrepo.ui.makeprogress(
246 _(b'changelog revisions'), total=crevcount
246 _(b'changelog revisions'), total=crevcount
247 )
247 )
248 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
248 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
249 ui.status(
249 ui.status(
250 _(
250 _(
251 b'finished migrating %d filelog revisions across %d '
251 b'finished migrating %d filelog revisions across %d '
252 b'filelogs; change in size: %s\n'
252 b'filelogs; change in size: %s\n'
253 )
253 )
254 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
254 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
255 )
255 )
256
256
257 ui.status(
257 ui.status(
258 _(
258 _(
259 b'migrating %d manifests containing %d revisions '
259 b'migrating %d manifests containing %d revisions '
260 b'(%s in store; %s tracked data)\n'
260 b'(%s in store; %s tracked data)\n'
261 )
261 )
262 % (
262 % (
263 mcount,
263 mcount,
264 mrevcount,
264 mrevcount,
265 util.bytecount(msrcsize),
265 util.bytecount(msrcsize),
266 util.bytecount(mrawsize),
266 util.bytecount(mrawsize),
267 )
267 )
268 )
268 )
269 seen.add(b'm')
269 seen.add(b'm')
270 if progress:
270 if progress:
271 progress.complete()
271 progress.complete()
272 progress = srcrepo.ui.makeprogress(
272 progress = srcrepo.ui.makeprogress(
273 _(b'manifest revisions'), total=mrevcount
273 _(b'manifest revisions'), total=mrevcount
274 )
274 )
275 elif b'f' not in seen:
275 elif b'f' not in seen:
276 ui.status(
276 ui.status(
277 _(
277 _(
278 b'migrating %d filelogs containing %d revisions '
278 b'migrating %d filelogs containing %d revisions '
279 b'(%s in store; %s tracked data)\n'
279 b'(%s in store; %s tracked data)\n'
280 )
280 )
281 % (
281 % (
282 fcount,
282 fcount,
283 frevcount,
283 frevcount,
284 util.bytecount(fsrcsize),
284 util.bytecount(fsrcsize),
285 util.bytecount(frawsize),
285 util.bytecount(frawsize),
286 )
286 )
287 )
287 )
288 seen.add(b'f')
288 seen.add(b'f')
289 if progress:
289 if progress:
290 progress.complete()
290 progress.complete()
291 progress = srcrepo.ui.makeprogress(
291 progress = srcrepo.ui.makeprogress(
292 _(b'file revisions'), total=frevcount
292 _(b'file revisions'), total=frevcount
293 )
293 )
294
294
295 if matchrevlog(revlogs, unencoded):
295 if matchrevlog(revlogs, unencoded):
296 ui.note(
296 ui.note(
297 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
297 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
298 )
298 )
299 newrl = _revlogfrompath(dstrepo, unencoded)
299 newrl = _revlogfrompath(dstrepo, unencoded)
300 oldrl.clone(
300 oldrl.clone(
301 tr,
301 tr,
302 newrl,
302 newrl,
303 addrevisioncb=oncopiedrevision,
303 addrevisioncb=oncopiedrevision,
304 deltareuse=deltareuse,
304 deltareuse=deltareuse,
305 forcedeltabothparents=forcedeltabothparents,
305 forcedeltabothparents=forcedeltabothparents,
306 sidedatacompanion=sidedatacompanion,
306 sidedatacompanion=sidedatacompanion,
307 )
307 )
308 else:
308 else:
309 msg = _(b'blindly copying %s containing %i revisions\n')
309 msg = _(b'blindly copying %s containing %i revisions\n')
310 ui.note(msg % (unencoded, len(oldrl)))
310 ui.note(msg % (unencoded, len(oldrl)))
311 _copyrevlog(tr, dstrepo, oldrl, unencoded)
311 _copyrevlog(tr, dstrepo, oldrl, unencoded)
312
312
313 newrl = _revlogfrompath(dstrepo, unencoded)
313 newrl = _revlogfrompath(dstrepo, unencoded)
314
314
315 info = newrl.storageinfo(storedsize=True)
315 info = newrl.storageinfo(storedsize=True)
316 datasize = info[b'storedsize'] or 0
316 datasize = info[b'storedsize'] or 0
317
317
318 dstsize += datasize
318 dstsize += datasize
319
319
320 if isinstance(newrl, changelog.changelog):
320 if isinstance(newrl, changelog.changelog):
321 cdstsize += datasize
321 cdstsize += datasize
322 elif isinstance(newrl, manifest.manifestrevlog):
322 elif isinstance(newrl, manifest.manifestrevlog):
323 mdstsize += datasize
323 mdstsize += datasize
324 else:
324 else:
325 fdstsize += datasize
325 fdstsize += datasize
326
326
327 progress.complete()
327 progress.complete()
328
328
329 ui.status(
329 ui.status(
330 _(
330 _(
331 b'finished migrating %d changelog revisions; change in size: '
331 b'finished migrating %d changelog revisions; change in size: '
332 b'%s\n'
332 b'%s\n'
333 )
333 )
334 % (crevcount, util.bytecount(cdstsize - csrcsize))
334 % (crevcount, util.bytecount(cdstsize - csrcsize))
335 )
335 )
336
336
337 ui.status(
337 ui.status(
338 _(
338 _(
339 b'finished migrating %d total revisions; total change in store '
339 b'finished migrating %d total revisions; total change in store '
340 b'size: %s\n'
340 b'size: %s\n'
341 )
341 )
342 % (revcount, util.bytecount(dstsize - srcsize))
342 % (revcount, util.bytecount(dstsize - srcsize))
343 )
343 )
344
344
345
345
346 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
346 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
347 """Determine whether to copy a store file during upgrade.
347 """Determine whether to copy a store file during upgrade.
348
348
349 This function is called when migrating store files from ``srcrepo`` to
349 This function is called when migrating store files from ``srcrepo`` to
350 ``dstrepo`` as part of upgrading a repository.
350 ``dstrepo`` as part of upgrading a repository.
351
351
352 Args:
352 Args:
353 srcrepo: repo we are copying from
353 srcrepo: repo we are copying from
354 dstrepo: repo we are copying to
354 dstrepo: repo we are copying to
355 requirements: set of requirements for ``dstrepo``
355 requirements: set of requirements for ``dstrepo``
356 path: store file being examined
356 path: store file being examined
357 mode: the ``ST_MODE`` file type of ``path``
357 mode: the ``ST_MODE`` file type of ``path``
358 st: ``stat`` data structure for ``path``
358 st: ``stat`` data structure for ``path``
359
359
360 Function should return ``True`` if the file is to be copied.
360 Function should return ``True`` if the file is to be copied.
361 """
361 """
362 # Skip revlogs.
362 # Skip revlogs.
363 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
363 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
364 return False
364 return False
365 # Skip transaction related files.
365 # Skip transaction related files.
366 if path.startswith(b'undo'):
366 if path.startswith(b'undo'):
367 return False
367 return False
368 # Only copy regular files.
368 # Only copy regular files.
369 if mode != stat.S_IFREG:
369 if mode != stat.S_IFREG:
370 return False
370 return False
371 # Skip other skipped files.
371 # Skip other skipped files.
372 if path in (b'lock', b'fncache'):
372 if path in (b'lock', b'fncache'):
373 return False
373 return False
374
374
375 return True
375 return True
376
376
377
377
378 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
378 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
379 """Hook point for extensions to perform additional actions during upgrade.
379 """Hook point for extensions to perform additional actions during upgrade.
380
380
381 This function is called after revlogs and store files have been copied but
381 This function is called after revlogs and store files have been copied but
382 before the new store is swapped into the original location.
382 before the new store is swapped into the original location.
383 """
383 """
384
384
385
385
386 def upgrade(
386 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
387 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
388 ):
389 """Do the low-level work of upgrading a repository.
387 """Do the low-level work of upgrading a repository.
390
388
391 The upgrade is effectively performed as a copy between a source
389 The upgrade is effectively performed as a copy between a source
392 repository and a temporary destination repository.
390 repository and a temporary destination repository.
393
391
394 The source repository is unmodified for as long as possible so the
392 The source repository is unmodified for as long as possible so the
395 upgrade can abort at any time without causing loss of service for
393 upgrade can abort at any time without causing loss of service for
396 readers and without corrupting the source repository.
394 readers and without corrupting the source repository.
397 """
395 """
398 assert srcrepo.currentwlock()
396 assert srcrepo.currentwlock()
399 assert dstrepo.currentwlock()
397 assert dstrepo.currentwlock()
400
398
401 ui.status(
399 ui.status(
402 _(
400 _(
403 b'(it is safe to interrupt this process any time before '
401 b'(it is safe to interrupt this process any time before '
404 b'data migration completes)\n'
402 b'data migration completes)\n'
405 )
403 )
406 )
404 )
407
405
408 if b're-delta-all' in actions:
406 if b're-delta-all' in upgrade_op.actions:
409 deltareuse = revlog.revlog.DELTAREUSENEVER
407 deltareuse = revlog.revlog.DELTAREUSENEVER
410 elif b're-delta-parent' in actions:
408 elif b're-delta-parent' in upgrade_op.actions:
411 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
409 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
412 elif b're-delta-multibase' in actions:
410 elif b're-delta-multibase' in upgrade_op.actions:
413 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
411 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
414 elif b're-delta-fulladd' in actions:
412 elif b're-delta-fulladd' in upgrade_op.actions:
415 deltareuse = revlog.revlog.DELTAREUSEFULLADD
413 deltareuse = revlog.revlog.DELTAREUSEFULLADD
416 else:
414 else:
417 deltareuse = revlog.revlog.DELTAREUSEALWAYS
415 deltareuse = revlog.revlog.DELTAREUSEALWAYS
418
416
419 with dstrepo.transaction(b'upgrade') as tr:
417 with dstrepo.transaction(b'upgrade') as tr:
420 _clonerevlogs(
418 _clonerevlogs(
421 ui,
419 ui,
422 srcrepo,
420 srcrepo,
423 dstrepo,
421 dstrepo,
424 tr,
422 tr,
425 deltareuse,
423 deltareuse,
426 b're-delta-multibase' in actions,
424 b're-delta-multibase' in upgrade_op.actions,
427 revlogs=revlogs,
425 revlogs=upgrade_op.revlogs_to_process,
428 )
426 )
429
427
430 # Now copy other files in the store directory.
428 # Now copy other files in the store directory.
431 # The sorted() makes execution deterministic.
429 # The sorted() makes execution deterministic.
432 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
430 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
433 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
431 if not _filterstorefile(
432 srcrepo, dstrepo, upgrade_op.requirements, p, kind, st
433 ):
434 continue
434 continue
435
435
436 srcrepo.ui.status(_(b'copying %s\n') % p)
436 srcrepo.ui.status(_(b'copying %s\n') % p)
437 src = srcrepo.store.rawvfs.join(p)
437 src = srcrepo.store.rawvfs.join(p)
438 dst = dstrepo.store.rawvfs.join(p)
438 dst = dstrepo.store.rawvfs.join(p)
439 util.copyfile(src, dst, copystat=True)
439 util.copyfile(src, dst, copystat=True)
440
440
441 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
441 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
442
442
443 ui.status(_(b'data fully migrated to temporary repository\n'))
443 ui.status(_(b'data fully migrated to temporary repository\n'))
444
444
445 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
445 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
446 backupvfs = vfsmod.vfs(backuppath)
446 backupvfs = vfsmod.vfs(backuppath)
447
447
448 # Make a backup of requires file first, as it is the first to be modified.
448 # Make a backup of requires file first, as it is the first to be modified.
449 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
449 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
450
450
451 # We install an arbitrary requirement that clients must not support
451 # We install an arbitrary requirement that clients must not support
452 # as a mechanism to lock out new clients during the data swap. This is
452 # as a mechanism to lock out new clients during the data swap. This is
453 # better than allowing a client to continue while the repository is in
453 # better than allowing a client to continue while the repository is in
454 # an inconsistent state.
454 # an inconsistent state.
455 ui.status(
455 ui.status(
456 _(
456 _(
457 b'marking source repository as being upgraded; clients will be '
457 b'marking source repository as being upgraded; clients will be '
458 b'unable to read from repository\n'
458 b'unable to read from repository\n'
459 )
459 )
460 )
460 )
461 scmutil.writereporequirements(
461 scmutil.writereporequirements(
462 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
462 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
463 )
463 )
464
464
465 ui.status(_(b'starting in-place swap of repository data\n'))
465 ui.status(_(b'starting in-place swap of repository data\n'))
466 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
466 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
467
467
468 # Now swap in the new store directory. Doing it as a rename should make
468 # Now swap in the new store directory. Doing it as a rename should make
469 # the operation nearly instantaneous and atomic (at least in well-behaved
469 # the operation nearly instantaneous and atomic (at least in well-behaved
470 # environments).
470 # environments).
471 ui.status(_(b'replacing store...\n'))
471 ui.status(_(b'replacing store...\n'))
472 tstart = util.timer()
472 tstart = util.timer()
473 util.rename(srcrepo.spath, backupvfs.join(b'store'))
473 util.rename(srcrepo.spath, backupvfs.join(b'store'))
474 util.rename(dstrepo.spath, srcrepo.spath)
474 util.rename(dstrepo.spath, srcrepo.spath)
475 elapsed = util.timer() - tstart
475 elapsed = util.timer() - tstart
476 ui.status(
476 ui.status(
477 _(
477 _(
478 b'store replacement complete; repository was inconsistent for '
478 b'store replacement complete; repository was inconsistent for '
479 b'%0.1fs\n'
479 b'%0.1fs\n'
480 )
480 )
481 % elapsed
481 % elapsed
482 )
482 )
483
483
484 # We first write the requirements file. Any new requirements will lock
484 # We first write the requirements file. Any new requirements will lock
485 # out legacy clients.
485 # out legacy clients.
486 ui.status(
486 ui.status(
487 _(
487 _(
488 b'finalizing requirements file and making repository readable '
488 b'finalizing requirements file and making repository readable '
489 b'again\n'
489 b'again\n'
490 )
490 )
491 )
491 )
492 scmutil.writereporequirements(srcrepo, requirements)
492 scmutil.writereporequirements(srcrepo, upgrade_op.requirements)
493
493
494 # The lock file from the old store won't be removed because nothing has a
494 # The lock file from the old store won't be removed because nothing has a
495 # reference to its new location. So clean it up manually. Alternatively, we
495 # reference to its new location. So clean it up manually. Alternatively, we
496 # could update srcrepo.svfs and other variables to point to the new
496 # could update srcrepo.svfs and other variables to point to the new
497 # location. This is simpler.
497 # location. This is simpler.
498 backupvfs.unlink(b'store/lock')
498 backupvfs.unlink(b'store/lock')
499
499
500 return backuppath
500 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now