Show More
@@ -1,299 +1,273 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from .i18n import _ |
|
10 | from .i18n import _ | |
11 | from . import ( |
|
11 | from . import ( | |
12 | error, |
|
12 | error, | |
13 | hg, |
|
13 | hg, | |
14 | localrepo, |
|
14 | localrepo, | |
15 | pycompat, |
|
15 | pycompat, | |
16 | ) |
|
16 | ) | |
17 |
|
17 | |||
18 | from .upgrade_utils import ( |
|
18 | from .upgrade_utils import ( | |
19 | actions as upgrade_actions, |
|
19 | actions as upgrade_actions, | |
20 | engine as upgrade_engine, |
|
20 | engine as upgrade_engine, | |
21 | ) |
|
21 | ) | |
22 |
|
22 | |||
23 | allformatvariant = upgrade_actions.allformatvariant |
|
23 | allformatvariant = upgrade_actions.allformatvariant | |
24 |
|
24 | |||
25 | # search without '-' to support older form on newer client. |
|
25 | # search without '-' to support older form on newer client. | |
26 | # |
|
26 | # | |
27 | # We don't enforce backward compatibility for debug command so this |
|
27 | # We don't enforce backward compatibility for debug command so this | |
28 | # might eventually be dropped. However, having to use two different |
|
28 | # might eventually be dropped. However, having to use two different | |
29 | # forms in script when comparing result is anoying enough to add |
|
29 | # forms in script when comparing result is anoying enough to add | |
30 | # backward compatibility for a while. |
|
30 | # backward compatibility for a while. | |
31 | legacy_opts_map = { |
|
31 | legacy_opts_map = { | |
32 | b'redeltaparent': b're-delta-parent', |
|
32 | b'redeltaparent': b're-delta-parent', | |
33 | b'redeltamultibase': b're-delta-multibase', |
|
33 | b'redeltamultibase': b're-delta-multibase', | |
34 | b'redeltaall': b're-delta-all', |
|
34 | b'redeltaall': b're-delta-all', | |
35 | b'redeltafulladd': b're-delta-fulladd', |
|
35 | b'redeltafulladd': b're-delta-fulladd', | |
36 | } |
|
36 | } | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | def upgraderepo( |
|
39 | def upgraderepo( | |
40 | ui, |
|
40 | ui, | |
41 | repo, |
|
41 | repo, | |
42 | run=False, |
|
42 | run=False, | |
43 | optimize=None, |
|
43 | optimize=None, | |
44 | backup=True, |
|
44 | backup=True, | |
45 | manifest=None, |
|
45 | manifest=None, | |
46 | changelog=None, |
|
46 | changelog=None, | |
47 | filelogs=None, |
|
47 | filelogs=None, | |
48 | ): |
|
48 | ): | |
49 | """Upgrade a repository in place.""" |
|
49 | """Upgrade a repository in place.""" | |
50 | if optimize is None: |
|
50 | if optimize is None: | |
51 | optimize = [] |
|
51 | optimize = [] | |
52 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
52 | optimize = {legacy_opts_map.get(o, o) for o in optimize} | |
53 | repo = repo.unfiltered() |
|
53 | repo = repo.unfiltered() | |
54 |
|
54 | |||
55 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) |
|
55 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) | |
56 | specentries = ( |
|
56 | specentries = ( | |
57 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), |
|
57 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), | |
58 | (upgrade_engine.UPGRADE_MANIFEST, manifest), |
|
58 | (upgrade_engine.UPGRADE_MANIFEST, manifest), | |
59 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), |
|
59 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), | |
60 | ) |
|
60 | ) | |
61 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
61 | specified = [(y, x) for (y, x) in specentries if x is not None] | |
62 | if specified: |
|
62 | if specified: | |
63 | # we have some limitation on revlogs to be recloned |
|
63 | # we have some limitation on revlogs to be recloned | |
64 | if any(x for y, x in specified): |
|
64 | if any(x for y, x in specified): | |
65 | revlogs = set() |
|
65 | revlogs = set() | |
66 | for upgrade, enabled in specified: |
|
66 | for upgrade, enabled in specified: | |
67 | if enabled: |
|
67 | if enabled: | |
68 | revlogs.add(upgrade) |
|
68 | revlogs.add(upgrade) | |
69 | else: |
|
69 | else: | |
70 | # none are enabled |
|
70 | # none are enabled | |
71 | for upgrade, __ in specified: |
|
71 | for upgrade, __ in specified: | |
72 | revlogs.discard(upgrade) |
|
72 | revlogs.discard(upgrade) | |
73 |
|
73 | |||
74 | # Ensure the repository can be upgraded. |
|
74 | # Ensure the repository can be upgraded. | |
75 | upgrade_actions.check_source_requirements(repo) |
|
75 | upgrade_actions.check_source_requirements(repo) | |
76 |
|
76 | |||
77 | default_options = localrepo.defaultcreateopts(repo.ui) |
|
77 | default_options = localrepo.defaultcreateopts(repo.ui) | |
78 | newreqs = localrepo.newreporequirements(repo.ui, default_options) |
|
78 | newreqs = localrepo.newreporequirements(repo.ui, default_options) | |
79 | newreqs.update(upgrade_actions.preservedrequirements(repo)) |
|
79 | newreqs.update(upgrade_actions.preservedrequirements(repo)) | |
80 |
|
80 | |||
81 | upgrade_actions.check_requirements_changes(repo, newreqs) |
|
81 | upgrade_actions.check_requirements_changes(repo, newreqs) | |
82 |
|
82 | |||
83 | # Find and validate all improvements that can be made. |
|
83 | # Find and validate all improvements that can be made. | |
84 | alloptimizations = upgrade_actions.findoptimizations(repo) |
|
84 | alloptimizations = upgrade_actions.findoptimizations(repo) | |
85 |
|
85 | |||
86 | # Apply and Validate arguments. |
|
86 | # Apply and Validate arguments. | |
87 | optimizations = [] |
|
87 | optimizations = [] | |
88 | for o in alloptimizations: |
|
88 | for o in alloptimizations: | |
89 | if o.name in optimize: |
|
89 | if o.name in optimize: | |
90 | optimizations.append(o) |
|
90 | optimizations.append(o) | |
91 | optimize.discard(o.name) |
|
91 | optimize.discard(o.name) | |
92 |
|
92 | |||
93 | if optimize: # anything left is unknown |
|
93 | if optimize: # anything left is unknown | |
94 | raise error.Abort( |
|
94 | raise error.Abort( | |
95 | _(b'unknown optimization action requested: %s') |
|
95 | _(b'unknown optimization action requested: %s') | |
96 | % b', '.join(sorted(optimize)), |
|
96 | % b', '.join(sorted(optimize)), | |
97 | hint=_(b'run without arguments to see valid optimizations'), |
|
97 | hint=_(b'run without arguments to see valid optimizations'), | |
98 | ) |
|
98 | ) | |
99 |
|
99 | |||
100 | deficiencies = upgrade_actions.finddeficiencies(repo) |
|
100 | deficiencies = upgrade_actions.finddeficiencies(repo) | |
101 | actions = upgrade_actions.determineactions( |
|
101 | actions = upgrade_actions.determineactions( | |
102 | repo, deficiencies, repo.requirements, newreqs |
|
102 | repo, deficiencies, repo.requirements, newreqs | |
103 | ) |
|
103 | ) | |
104 | actions.extend( |
|
104 | actions.extend( | |
105 | o |
|
105 | o | |
106 | for o in sorted(optimizations) |
|
106 | for o in sorted(optimizations) | |
107 | # determineactions could have added optimisation |
|
107 | # determineactions could have added optimisation | |
108 | if o not in actions |
|
108 | if o not in actions | |
109 | ) |
|
109 | ) | |
110 |
|
110 | |||
111 | removedreqs = repo.requirements - newreqs |
|
111 | removedreqs = repo.requirements - newreqs | |
112 | addedreqs = newreqs - repo.requirements |
|
112 | addedreqs = newreqs - repo.requirements | |
113 |
|
113 | |||
114 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: |
|
114 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: | |
115 | incompatible = upgrade_actions.RECLONES_REQUIREMENTS & ( |
|
115 | incompatible = upgrade_actions.RECLONES_REQUIREMENTS & ( | |
116 | removedreqs | addedreqs |
|
116 | removedreqs | addedreqs | |
117 | ) |
|
117 | ) | |
118 | if incompatible: |
|
118 | if incompatible: | |
119 | msg = _( |
|
119 | msg = _( | |
120 | b'ignoring revlogs selection flags, format requirements ' |
|
120 | b'ignoring revlogs selection flags, format requirements ' | |
121 | b'change: %s\n' |
|
121 | b'change: %s\n' | |
122 | ) |
|
122 | ) | |
123 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
123 | ui.warn(msg % b', '.join(sorted(incompatible))) | |
124 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS |
|
124 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS | |
125 |
|
125 | |||
126 | def write_labeled(l, label): |
|
|||
127 | first = True |
|
|||
128 | for r in sorted(l): |
|
|||
129 | if not first: |
|
|||
130 | ui.write(b', ') |
|
|||
131 | ui.write(r, label=label) |
|
|||
132 | first = False |
|
|||
133 |
|
||||
134 | def printrequirements(): |
|
|||
135 | ui.write(_(b'requirements\n')) |
|
|||
136 | ui.write(_(b' preserved: ')) |
|
|||
137 | write_labeled( |
|
|||
138 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
|||
139 | ) |
|
|||
140 | ui.write((b'\n')) |
|
|||
141 | removed = repo.requirements - newreqs |
|
|||
142 | if repo.requirements - newreqs: |
|
|||
143 | ui.write(_(b' removed: ')) |
|
|||
144 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
|||
145 | ui.write((b'\n')) |
|
|||
146 | added = newreqs - repo.requirements |
|
|||
147 | if added: |
|
|||
148 | ui.write(_(b' added: ')) |
|
|||
149 | write_labeled(added, "upgrade-repo.requirement.added") |
|
|||
150 | ui.write((b'\n')) |
|
|||
151 | ui.write(b'\n') |
|
|||
152 |
|
||||
153 | upgrade_op = upgrade_actions.UpgradeOperation( |
|
126 | upgrade_op = upgrade_actions.UpgradeOperation( | |
154 | ui, |
|
127 | ui, | |
155 | newreqs, |
|
128 | newreqs, | |
|
129 | repo.requirements, | |||
156 | actions, |
|
130 | actions, | |
157 | revlogs, |
|
131 | revlogs, | |
158 | ) |
|
132 | ) | |
159 |
|
133 | |||
160 | if not run: |
|
134 | if not run: | |
161 | fromconfig = [] |
|
135 | fromconfig = [] | |
162 | onlydefault = [] |
|
136 | onlydefault = [] | |
163 |
|
137 | |||
164 | for d in deficiencies: |
|
138 | for d in deficiencies: | |
165 | if d.fromconfig(repo): |
|
139 | if d.fromconfig(repo): | |
166 | fromconfig.append(d) |
|
140 | fromconfig.append(d) | |
167 | elif d.default: |
|
141 | elif d.default: | |
168 | onlydefault.append(d) |
|
142 | onlydefault.append(d) | |
169 |
|
143 | |||
170 | if fromconfig or onlydefault: |
|
144 | if fromconfig or onlydefault: | |
171 |
|
145 | |||
172 | if fromconfig: |
|
146 | if fromconfig: | |
173 | ui.status( |
|
147 | ui.status( | |
174 | _( |
|
148 | _( | |
175 | b'repository lacks features recommended by ' |
|
149 | b'repository lacks features recommended by ' | |
176 | b'current config options:\n\n' |
|
150 | b'current config options:\n\n' | |
177 | ) |
|
151 | ) | |
178 | ) |
|
152 | ) | |
179 | for i in fromconfig: |
|
153 | for i in fromconfig: | |
180 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
154 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
181 |
|
155 | |||
182 | if onlydefault: |
|
156 | if onlydefault: | |
183 | ui.status( |
|
157 | ui.status( | |
184 | _( |
|
158 | _( | |
185 | b'repository lacks features used by the default ' |
|
159 | b'repository lacks features used by the default ' | |
186 | b'config options:\n\n' |
|
160 | b'config options:\n\n' | |
187 | ) |
|
161 | ) | |
188 | ) |
|
162 | ) | |
189 | for i in onlydefault: |
|
163 | for i in onlydefault: | |
190 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
164 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) | |
191 |
|
165 | |||
192 | ui.status(b'\n') |
|
166 | ui.status(b'\n') | |
193 | else: |
|
167 | else: | |
194 | ui.status( |
|
168 | ui.status( | |
195 | _( |
|
169 | _( | |
196 | b'(no feature deficiencies found in existing ' |
|
170 | b'(no feature deficiencies found in existing ' | |
197 | b'repository)\n' |
|
171 | b'repository)\n' | |
198 | ) |
|
172 | ) | |
199 | ) |
|
173 | ) | |
200 |
|
174 | |||
201 | ui.status( |
|
175 | ui.status( | |
202 | _( |
|
176 | _( | |
203 | b'performing an upgrade with "--run" will make the following ' |
|
177 | b'performing an upgrade with "--run" will make the following ' | |
204 | b'changes:\n\n' |
|
178 | b'changes:\n\n' | |
205 | ) |
|
179 | ) | |
206 | ) |
|
180 | ) | |
207 |
|
181 | |||
208 | printrequirements() |
|
182 | upgrade_op.print_requirements() | |
209 | upgrade_op.print_optimisations() |
|
183 | upgrade_op.print_optimisations() | |
210 | upgrade_op.print_upgrade_actions() |
|
184 | upgrade_op.print_upgrade_actions() | |
211 | upgrade_op.print_affected_revlogs() |
|
185 | upgrade_op.print_affected_revlogs() | |
212 |
|
186 | |||
213 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
187 | unusedoptimize = [i for i in alloptimizations if i not in actions] | |
214 |
|
188 | |||
215 | if unusedoptimize: |
|
189 | if unusedoptimize: | |
216 | ui.status( |
|
190 | ui.status( | |
217 | _( |
|
191 | _( | |
218 | b'additional optimizations are available by specifying ' |
|
192 | b'additional optimizations are available by specifying ' | |
219 | b'"--optimize <name>":\n\n' |
|
193 | b'"--optimize <name>":\n\n' | |
220 | ) |
|
194 | ) | |
221 | ) |
|
195 | ) | |
222 | for i in unusedoptimize: |
|
196 | for i in unusedoptimize: | |
223 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
197 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) | |
224 | return |
|
198 | return | |
225 |
|
199 | |||
226 | # Else we're in the run=true case. |
|
200 | # Else we're in the run=true case. | |
227 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
201 | ui.write(_(b'upgrade will perform the following actions:\n\n')) | |
228 | printrequirements() |
|
202 | upgrade_op.print_requirements() | |
229 | upgrade_op.print_optimisations() |
|
203 | upgrade_op.print_optimisations() | |
230 | upgrade_op.print_upgrade_actions() |
|
204 | upgrade_op.print_upgrade_actions() | |
231 | upgrade_op.print_affected_revlogs() |
|
205 | upgrade_op.print_affected_revlogs() | |
232 |
|
206 | |||
233 | ui.status(_(b'beginning upgrade...\n')) |
|
207 | ui.status(_(b'beginning upgrade...\n')) | |
234 | with repo.wlock(), repo.lock(): |
|
208 | with repo.wlock(), repo.lock(): | |
235 | ui.status(_(b'repository locked and read-only\n')) |
|
209 | ui.status(_(b'repository locked and read-only\n')) | |
236 | # Our strategy for upgrading the repository is to create a new, |
|
210 | # Our strategy for upgrading the repository is to create a new, | |
237 | # temporary repository, write data to it, then do a swap of the |
|
211 | # temporary repository, write data to it, then do a swap of the | |
238 | # data. There are less heavyweight ways to do this, but it is easier |
|
212 | # data. There are less heavyweight ways to do this, but it is easier | |
239 | # to create a new repo object than to instantiate all the components |
|
213 | # to create a new repo object than to instantiate all the components | |
240 | # (like the store) separately. |
|
214 | # (like the store) separately. | |
241 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
215 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) | |
242 | backuppath = None |
|
216 | backuppath = None | |
243 | try: |
|
217 | try: | |
244 | ui.status( |
|
218 | ui.status( | |
245 | _( |
|
219 | _( | |
246 | b'creating temporary repository to stage migrated ' |
|
220 | b'creating temporary repository to stage migrated ' | |
247 | b'data: %s\n' |
|
221 | b'data: %s\n' | |
248 | ) |
|
222 | ) | |
249 | % tmppath |
|
223 | % tmppath | |
250 | ) |
|
224 | ) | |
251 |
|
225 | |||
252 | # clone ui without using ui.copy because repo.ui is protected |
|
226 | # clone ui without using ui.copy because repo.ui is protected | |
253 | repoui = repo.ui.__class__(repo.ui) |
|
227 | repoui = repo.ui.__class__(repo.ui) | |
254 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
228 | dstrepo = hg.repository(repoui, path=tmppath, create=True) | |
255 |
|
229 | |||
256 | with dstrepo.wlock(), dstrepo.lock(): |
|
230 | with dstrepo.wlock(), dstrepo.lock(): | |
257 | backuppath = upgrade_engine.upgrade( |
|
231 | backuppath = upgrade_engine.upgrade( | |
258 | ui, repo, dstrepo, upgrade_op |
|
232 | ui, repo, dstrepo, upgrade_op | |
259 | ) |
|
233 | ) | |
260 | if not (backup or backuppath is None): |
|
234 | if not (backup or backuppath is None): | |
261 | ui.status( |
|
235 | ui.status( | |
262 | _(b'removing old repository content%s\n') % backuppath |
|
236 | _(b'removing old repository content%s\n') % backuppath | |
263 | ) |
|
237 | ) | |
264 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
238 | repo.vfs.rmtree(backuppath, forcibly=True) | |
265 | backuppath = None |
|
239 | backuppath = None | |
266 |
|
240 | |||
267 | finally: |
|
241 | finally: | |
268 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
242 | ui.status(_(b'removing temporary repository %s\n') % tmppath) | |
269 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
243 | repo.vfs.rmtree(tmppath, forcibly=True) | |
270 |
|
244 | |||
271 | if backuppath and not ui.quiet: |
|
245 | if backuppath and not ui.quiet: | |
272 | ui.warn( |
|
246 | ui.warn( | |
273 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
247 | _(b'copy of old repository backed up at %s\n') % backuppath | |
274 | ) |
|
248 | ) | |
275 | ui.warn( |
|
249 | ui.warn( | |
276 | _( |
|
250 | _( | |
277 | b'the old repository will not be deleted; remove ' |
|
251 | b'the old repository will not be deleted; remove ' | |
278 | b'it to free up disk space once the upgraded ' |
|
252 | b'it to free up disk space once the upgraded ' | |
279 | b'repository is verified\n' |
|
253 | b'repository is verified\n' | |
280 | ) |
|
254 | ) | |
281 | ) |
|
255 | ) | |
282 |
|
256 | |||
283 | if upgrade_actions.sharesafe.name in addedreqs: |
|
257 | if upgrade_actions.sharesafe.name in addedreqs: | |
284 | ui.warn( |
|
258 | ui.warn( | |
285 | _( |
|
259 | _( | |
286 | b'repository upgraded to share safe mode, existing' |
|
260 | b'repository upgraded to share safe mode, existing' | |
287 | b' shares will still work in old non-safe mode. ' |
|
261 | b' shares will still work in old non-safe mode. ' | |
288 | b'Re-share existing shares to use them in safe mode' |
|
262 | b'Re-share existing shares to use them in safe mode' | |
289 | b' New shares will be created in safe mode.\n' |
|
263 | b' New shares will be created in safe mode.\n' | |
290 | ) |
|
264 | ) | |
291 | ) |
|
265 | ) | |
292 | if upgrade_actions.sharesafe.name in removedreqs: |
|
266 | if upgrade_actions.sharesafe.name in removedreqs: | |
293 | ui.warn( |
|
267 | ui.warn( | |
294 | _( |
|
268 | _( | |
295 | b'repository downgraded to not use share safe mode, ' |
|
269 | b'repository downgraded to not use share safe mode, ' | |
296 | b'existing shares will not work and needs to' |
|
270 | b'existing shares will not work and needs to' | |
297 | b' be reshared.\n' |
|
271 | b' be reshared.\n' | |
298 | ) |
|
272 | ) | |
299 | ) |
|
273 | ) |
@@ -1,769 +1,810 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from ..i18n import _ |
|
10 | from ..i18n import _ | |
11 | from .. import ( |
|
11 | from .. import ( | |
12 | error, |
|
12 | error, | |
13 | localrepo, |
|
13 | localrepo, | |
14 | requirements, |
|
14 | requirements, | |
15 | util, |
|
15 | util, | |
16 | ) |
|
16 | ) | |
17 |
|
17 | |||
18 | from ..utils import compression |
|
18 | from ..utils import compression | |
19 |
|
19 | |||
20 | # list of requirements that request a clone of all revlog if added/removed |
|
20 | # list of requirements that request a clone of all revlog if added/removed | |
21 | RECLONES_REQUIREMENTS = { |
|
21 | RECLONES_REQUIREMENTS = { | |
22 | b'generaldelta', |
|
22 | b'generaldelta', | |
23 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
23 | requirements.SPARSEREVLOG_REQUIREMENT, | |
24 | } |
|
24 | } | |
25 |
|
25 | |||
26 |
|
26 | |||
27 | def preservedrequirements(repo): |
|
27 | def preservedrequirements(repo): | |
28 | return set() |
|
28 | return set() | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | DEFICIENCY = b'deficiency' |
|
31 | DEFICIENCY = b'deficiency' | |
32 | OPTIMISATION = b'optimization' |
|
32 | OPTIMISATION = b'optimization' | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | class improvement(object): |
|
35 | class improvement(object): | |
36 | """Represents an improvement that can be made as part of an upgrade. |
|
36 | """Represents an improvement that can be made as part of an upgrade. | |
37 |
|
37 | |||
38 | The following attributes are defined on each instance: |
|
38 | The following attributes are defined on each instance: | |
39 |
|
39 | |||
40 | name |
|
40 | name | |
41 | Machine-readable string uniquely identifying this improvement. It |
|
41 | Machine-readable string uniquely identifying this improvement. It | |
42 | will be mapped to an action later in the upgrade process. |
|
42 | will be mapped to an action later in the upgrade process. | |
43 |
|
43 | |||
44 | type |
|
44 | type | |
45 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
45 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious | |
46 | problem. An optimization is an action (sometimes optional) that |
|
46 | problem. An optimization is an action (sometimes optional) that | |
47 | can be taken to further improve the state of the repository. |
|
47 | can be taken to further improve the state of the repository. | |
48 |
|
48 | |||
49 | description |
|
49 | description | |
50 | Message intended for humans explaining the improvement in more detail, |
|
50 | Message intended for humans explaining the improvement in more detail, | |
51 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
51 | including the implications of it. For ``DEFICIENCY`` types, should be | |
52 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
52 | worded in the present tense. For ``OPTIMISATION`` types, should be | |
53 | worded in the future tense. |
|
53 | worded in the future tense. | |
54 |
|
54 | |||
55 | upgrademessage |
|
55 | upgrademessage | |
56 | Message intended for humans explaining what an upgrade addressing this |
|
56 | Message intended for humans explaining what an upgrade addressing this | |
57 | issue will do. Should be worded in the future tense. |
|
57 | issue will do. Should be worded in the future tense. | |
58 | """ |
|
58 | """ | |
59 |
|
59 | |||
60 | def __init__(self, name, type, description, upgrademessage): |
|
60 | def __init__(self, name, type, description, upgrademessage): | |
61 | self.name = name |
|
61 | self.name = name | |
62 | self.type = type |
|
62 | self.type = type | |
63 | self.description = description |
|
63 | self.description = description | |
64 | self.upgrademessage = upgrademessage |
|
64 | self.upgrademessage = upgrademessage | |
65 |
|
65 | |||
66 | def __eq__(self, other): |
|
66 | def __eq__(self, other): | |
67 | if not isinstance(other, improvement): |
|
67 | if not isinstance(other, improvement): | |
68 | # This is what python tell use to do |
|
68 | # This is what python tell use to do | |
69 | return NotImplemented |
|
69 | return NotImplemented | |
70 | return self.name == other.name |
|
70 | return self.name == other.name | |
71 |
|
71 | |||
72 | def __ne__(self, other): |
|
72 | def __ne__(self, other): | |
73 | return not (self == other) |
|
73 | return not (self == other) | |
74 |
|
74 | |||
75 | def __hash__(self): |
|
75 | def __hash__(self): | |
76 | return hash(self.name) |
|
76 | return hash(self.name) | |
77 |
|
77 | |||
78 |
|
78 | |||
79 | allformatvariant = [] |
|
79 | allformatvariant = [] | |
80 |
|
80 | |||
81 |
|
81 | |||
82 | def registerformatvariant(cls): |
|
82 | def registerformatvariant(cls): | |
83 | allformatvariant.append(cls) |
|
83 | allformatvariant.append(cls) | |
84 | return cls |
|
84 | return cls | |
85 |
|
85 | |||
86 |
|
86 | |||
87 | class formatvariant(improvement): |
|
87 | class formatvariant(improvement): | |
88 | """an improvement subclass dedicated to repository format""" |
|
88 | """an improvement subclass dedicated to repository format""" | |
89 |
|
89 | |||
90 | type = DEFICIENCY |
|
90 | type = DEFICIENCY | |
91 | ### The following attributes should be defined for each class: |
|
91 | ### The following attributes should be defined for each class: | |
92 |
|
92 | |||
93 | # machine-readable string uniquely identifying this improvement. it will be |
|
93 | # machine-readable string uniquely identifying this improvement. it will be | |
94 | # mapped to an action later in the upgrade process. |
|
94 | # mapped to an action later in the upgrade process. | |
95 | name = None |
|
95 | name = None | |
96 |
|
96 | |||
97 | # message intended for humans explaining the improvement in more detail, |
|
97 | # message intended for humans explaining the improvement in more detail, | |
98 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
98 | # including the implications of it ``DEFICIENCY`` types, should be worded | |
99 | # in the present tense. |
|
99 | # in the present tense. | |
100 | description = None |
|
100 | description = None | |
101 |
|
101 | |||
102 | # message intended for humans explaining what an upgrade addressing this |
|
102 | # message intended for humans explaining what an upgrade addressing this | |
103 | # issue will do. should be worded in the future tense. |
|
103 | # issue will do. should be worded in the future tense. | |
104 | upgrademessage = None |
|
104 | upgrademessage = None | |
105 |
|
105 | |||
106 | # value of current Mercurial default for new repository |
|
106 | # value of current Mercurial default for new repository | |
107 | default = None |
|
107 | default = None | |
108 |
|
108 | |||
109 | def __init__(self): |
|
109 | def __init__(self): | |
110 | raise NotImplementedError() |
|
110 | raise NotImplementedError() | |
111 |
|
111 | |||
112 | @staticmethod |
|
112 | @staticmethod | |
113 | def fromrepo(repo): |
|
113 | def fromrepo(repo): | |
114 | """current value of the variant in the repository""" |
|
114 | """current value of the variant in the repository""" | |
115 | raise NotImplementedError() |
|
115 | raise NotImplementedError() | |
116 |
|
116 | |||
117 | @staticmethod |
|
117 | @staticmethod | |
118 | def fromconfig(repo): |
|
118 | def fromconfig(repo): | |
119 | """current value of the variant in the configuration""" |
|
119 | """current value of the variant in the configuration""" | |
120 | raise NotImplementedError() |
|
120 | raise NotImplementedError() | |
121 |
|
121 | |||
122 |
|
122 | |||
123 | class requirementformatvariant(formatvariant): |
|
123 | class requirementformatvariant(formatvariant): | |
124 | """formatvariant based on a 'requirement' name. |
|
124 | """formatvariant based on a 'requirement' name. | |
125 |
|
125 | |||
126 | Many format variant are controlled by a 'requirement'. We define a small |
|
126 | Many format variant are controlled by a 'requirement'. We define a small | |
127 | subclass to factor the code. |
|
127 | subclass to factor the code. | |
128 | """ |
|
128 | """ | |
129 |
|
129 | |||
130 | # the requirement that control this format variant |
|
130 | # the requirement that control this format variant | |
131 | _requirement = None |
|
131 | _requirement = None | |
132 |
|
132 | |||
133 | @staticmethod |
|
133 | @staticmethod | |
134 | def _newreporequirements(ui): |
|
134 | def _newreporequirements(ui): | |
135 | return localrepo.newreporequirements( |
|
135 | return localrepo.newreporequirements( | |
136 | ui, localrepo.defaultcreateopts(ui) |
|
136 | ui, localrepo.defaultcreateopts(ui) | |
137 | ) |
|
137 | ) | |
138 |
|
138 | |||
139 | @classmethod |
|
139 | @classmethod | |
140 | def fromrepo(cls, repo): |
|
140 | def fromrepo(cls, repo): | |
141 | assert cls._requirement is not None |
|
141 | assert cls._requirement is not None | |
142 | return cls._requirement in repo.requirements |
|
142 | return cls._requirement in repo.requirements | |
143 |
|
143 | |||
144 | @classmethod |
|
144 | @classmethod | |
145 | def fromconfig(cls, repo): |
|
145 | def fromconfig(cls, repo): | |
146 | assert cls._requirement is not None |
|
146 | assert cls._requirement is not None | |
147 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
147 | return cls._requirement in cls._newreporequirements(repo.ui) | |
148 |
|
148 | |||
149 |
|
149 | |||
150 | @registerformatvariant |
|
150 | @registerformatvariant | |
151 | class fncache(requirementformatvariant): |
|
151 | class fncache(requirementformatvariant): | |
152 | name = b'fncache' |
|
152 | name = b'fncache' | |
153 |
|
153 | |||
154 | _requirement = b'fncache' |
|
154 | _requirement = b'fncache' | |
155 |
|
155 | |||
156 | default = True |
|
156 | default = True | |
157 |
|
157 | |||
158 | description = _( |
|
158 | description = _( | |
159 | b'long and reserved filenames may not work correctly; ' |
|
159 | b'long and reserved filenames may not work correctly; ' | |
160 | b'repository performance is sub-optimal' |
|
160 | b'repository performance is sub-optimal' | |
161 | ) |
|
161 | ) | |
162 |
|
162 | |||
163 | upgrademessage = _( |
|
163 | upgrademessage = _( | |
164 | b'repository will be more resilient to storing ' |
|
164 | b'repository will be more resilient to storing ' | |
165 | b'certain paths and performance of certain ' |
|
165 | b'certain paths and performance of certain ' | |
166 | b'operations should be improved' |
|
166 | b'operations should be improved' | |
167 | ) |
|
167 | ) | |
168 |
|
168 | |||
169 |
|
169 | |||
170 | @registerformatvariant |
|
170 | @registerformatvariant | |
171 | class dotencode(requirementformatvariant): |
|
171 | class dotencode(requirementformatvariant): | |
172 | name = b'dotencode' |
|
172 | name = b'dotencode' | |
173 |
|
173 | |||
174 | _requirement = b'dotencode' |
|
174 | _requirement = b'dotencode' | |
175 |
|
175 | |||
176 | default = True |
|
176 | default = True | |
177 |
|
177 | |||
178 | description = _( |
|
178 | description = _( | |
179 | b'storage of filenames beginning with a period or ' |
|
179 | b'storage of filenames beginning with a period or ' | |
180 | b'space may not work correctly' |
|
180 | b'space may not work correctly' | |
181 | ) |
|
181 | ) | |
182 |
|
182 | |||
183 | upgrademessage = _( |
|
183 | upgrademessage = _( | |
184 | b'repository will be better able to store files ' |
|
184 | b'repository will be better able to store files ' | |
185 | b'beginning with a space or period' |
|
185 | b'beginning with a space or period' | |
186 | ) |
|
186 | ) | |
187 |
|
187 | |||
188 |
|
188 | |||
189 | @registerformatvariant |
|
189 | @registerformatvariant | |
190 | class generaldelta(requirementformatvariant): |
|
190 | class generaldelta(requirementformatvariant): | |
191 | name = b'generaldelta' |
|
191 | name = b'generaldelta' | |
192 |
|
192 | |||
193 | _requirement = b'generaldelta' |
|
193 | _requirement = b'generaldelta' | |
194 |
|
194 | |||
195 | default = True |
|
195 | default = True | |
196 |
|
196 | |||
197 | description = _( |
|
197 | description = _( | |
198 | b'deltas within internal storage are unable to ' |
|
198 | b'deltas within internal storage are unable to ' | |
199 | b'choose optimal revisions; repository is larger and ' |
|
199 | b'choose optimal revisions; repository is larger and ' | |
200 | b'slower than it could be; interaction with other ' |
|
200 | b'slower than it could be; interaction with other ' | |
201 | b'repositories may require extra network and CPU ' |
|
201 | b'repositories may require extra network and CPU ' | |
202 | b'resources, making "hg push" and "hg pull" slower' |
|
202 | b'resources, making "hg push" and "hg pull" slower' | |
203 | ) |
|
203 | ) | |
204 |
|
204 | |||
205 | upgrademessage = _( |
|
205 | upgrademessage = _( | |
206 | b'repository storage will be able to create ' |
|
206 | b'repository storage will be able to create ' | |
207 | b'optimal deltas; new repository data will be ' |
|
207 | b'optimal deltas; new repository data will be ' | |
208 | b'smaller and read times should decrease; ' |
|
208 | b'smaller and read times should decrease; ' | |
209 | b'interacting with other repositories using this ' |
|
209 | b'interacting with other repositories using this ' | |
210 | b'storage model should require less network and ' |
|
210 | b'storage model should require less network and ' | |
211 | b'CPU resources, making "hg push" and "hg pull" ' |
|
211 | b'CPU resources, making "hg push" and "hg pull" ' | |
212 | b'faster' |
|
212 | b'faster' | |
213 | ) |
|
213 | ) | |
214 |
|
214 | |||
215 |
|
215 | |||
216 | @registerformatvariant |
|
216 | @registerformatvariant | |
217 | class sharesafe(requirementformatvariant): |
|
217 | class sharesafe(requirementformatvariant): | |
218 | name = b'exp-sharesafe' |
|
218 | name = b'exp-sharesafe' | |
219 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
219 | _requirement = requirements.SHARESAFE_REQUIREMENT | |
220 |
|
220 | |||
221 | default = False |
|
221 | default = False | |
222 |
|
222 | |||
223 | description = _( |
|
223 | description = _( | |
224 | b'old shared repositories do not share source repository ' |
|
224 | b'old shared repositories do not share source repository ' | |
225 | b'requirements and config. This leads to various problems ' |
|
225 | b'requirements and config. This leads to various problems ' | |
226 | b'when the source repository format is upgraded or some new ' |
|
226 | b'when the source repository format is upgraded or some new ' | |
227 | b'extensions are enabled.' |
|
227 | b'extensions are enabled.' | |
228 | ) |
|
228 | ) | |
229 |
|
229 | |||
230 | upgrademessage = _( |
|
230 | upgrademessage = _( | |
231 | b'Upgrades a repository to share-safe format so that future ' |
|
231 | b'Upgrades a repository to share-safe format so that future ' | |
232 | b'shares of this repository share its requirements and configs.' |
|
232 | b'shares of this repository share its requirements and configs.' | |
233 | ) |
|
233 | ) | |
234 |
|
234 | |||
235 |
|
235 | |||
236 | @registerformatvariant |
|
236 | @registerformatvariant | |
237 | class sparserevlog(requirementformatvariant): |
|
237 | class sparserevlog(requirementformatvariant): | |
238 | name = b'sparserevlog' |
|
238 | name = b'sparserevlog' | |
239 |
|
239 | |||
240 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
240 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT | |
241 |
|
241 | |||
242 | default = True |
|
242 | default = True | |
243 |
|
243 | |||
244 | description = _( |
|
244 | description = _( | |
245 | b'in order to limit disk reading and memory usage on older ' |
|
245 | b'in order to limit disk reading and memory usage on older ' | |
246 | b'version, the span of a delta chain from its root to its ' |
|
246 | b'version, the span of a delta chain from its root to its ' | |
247 | b'end is limited, whatever the relevant data in this span. ' |
|
247 | b'end is limited, whatever the relevant data in this span. ' | |
248 | b'This can severly limit Mercurial ability to build good ' |
|
248 | b'This can severly limit Mercurial ability to build good ' | |
249 | b'chain of delta resulting is much more storage space being ' |
|
249 | b'chain of delta resulting is much more storage space being ' | |
250 | b'taken and limit reusability of on disk delta during ' |
|
250 | b'taken and limit reusability of on disk delta during ' | |
251 | b'exchange.' |
|
251 | b'exchange.' | |
252 | ) |
|
252 | ) | |
253 |
|
253 | |||
254 | upgrademessage = _( |
|
254 | upgrademessage = _( | |
255 | b'Revlog supports delta chain with more unused data ' |
|
255 | b'Revlog supports delta chain with more unused data ' | |
256 | b'between payload. These gaps will be skipped at read ' |
|
256 | b'between payload. These gaps will be skipped at read ' | |
257 | b'time. This allows for better delta chains, making a ' |
|
257 | b'time. This allows for better delta chains, making a ' | |
258 | b'better compression and faster exchange with server.' |
|
258 | b'better compression and faster exchange with server.' | |
259 | ) |
|
259 | ) | |
260 |
|
260 | |||
261 |
|
261 | |||
262 | @registerformatvariant |
|
262 | @registerformatvariant | |
263 | class sidedata(requirementformatvariant): |
|
263 | class sidedata(requirementformatvariant): | |
264 | name = b'sidedata' |
|
264 | name = b'sidedata' | |
265 |
|
265 | |||
266 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
266 | _requirement = requirements.SIDEDATA_REQUIREMENT | |
267 |
|
267 | |||
268 | default = False |
|
268 | default = False | |
269 |
|
269 | |||
270 | description = _( |
|
270 | description = _( | |
271 | b'Allows storage of extra data alongside a revision, ' |
|
271 | b'Allows storage of extra data alongside a revision, ' | |
272 | b'unlocking various caching options.' |
|
272 | b'unlocking various caching options.' | |
273 | ) |
|
273 | ) | |
274 |
|
274 | |||
275 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
275 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') | |
276 |
|
276 | |||
277 |
|
277 | |||
278 | @registerformatvariant |
|
278 | @registerformatvariant | |
279 | class persistentnodemap(requirementformatvariant): |
|
279 | class persistentnodemap(requirementformatvariant): | |
280 | name = b'persistent-nodemap' |
|
280 | name = b'persistent-nodemap' | |
281 |
|
281 | |||
282 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
282 | _requirement = requirements.NODEMAP_REQUIREMENT | |
283 |
|
283 | |||
284 | default = False |
|
284 | default = False | |
285 |
|
285 | |||
286 | description = _( |
|
286 | description = _( | |
287 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
287 | b'persist the node -> rev mapping on disk to speedup lookup' | |
288 | ) |
|
288 | ) | |
289 |
|
289 | |||
290 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
290 | upgrademessage = _(b'Speedup revision lookup by node id.') | |
291 |
|
291 | |||
292 |
|
292 | |||
293 | @registerformatvariant |
|
293 | @registerformatvariant | |
294 | class copiessdc(requirementformatvariant): |
|
294 | class copiessdc(requirementformatvariant): | |
295 | name = b'copies-sdc' |
|
295 | name = b'copies-sdc' | |
296 |
|
296 | |||
297 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
297 | _requirement = requirements.COPIESSDC_REQUIREMENT | |
298 |
|
298 | |||
299 | default = False |
|
299 | default = False | |
300 |
|
300 | |||
301 | description = _(b'Stores copies information alongside changesets.') |
|
301 | description = _(b'Stores copies information alongside changesets.') | |
302 |
|
302 | |||
303 | upgrademessage = _( |
|
303 | upgrademessage = _( | |
304 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
304 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' | |
305 | ) |
|
305 | ) | |
306 |
|
306 | |||
307 |
|
307 | |||
308 | @registerformatvariant |
|
308 | @registerformatvariant | |
309 | class removecldeltachain(formatvariant): |
|
309 | class removecldeltachain(formatvariant): | |
310 | name = b'plain-cl-delta' |
|
310 | name = b'plain-cl-delta' | |
311 |
|
311 | |||
312 | default = True |
|
312 | default = True | |
313 |
|
313 | |||
314 | description = _( |
|
314 | description = _( | |
315 | b'changelog storage is using deltas instead of ' |
|
315 | b'changelog storage is using deltas instead of ' | |
316 | b'raw entries; changelog reading and any ' |
|
316 | b'raw entries; changelog reading and any ' | |
317 | b'operation relying on changelog data are slower ' |
|
317 | b'operation relying on changelog data are slower ' | |
318 | b'than they could be' |
|
318 | b'than they could be' | |
319 | ) |
|
319 | ) | |
320 |
|
320 | |||
321 | upgrademessage = _( |
|
321 | upgrademessage = _( | |
322 | b'changelog storage will be reformated to ' |
|
322 | b'changelog storage will be reformated to ' | |
323 | b'store raw entries; changelog reading will be ' |
|
323 | b'store raw entries; changelog reading will be ' | |
324 | b'faster; changelog size may be reduced' |
|
324 | b'faster; changelog size may be reduced' | |
325 | ) |
|
325 | ) | |
326 |
|
326 | |||
327 | @staticmethod |
|
327 | @staticmethod | |
328 | def fromrepo(repo): |
|
328 | def fromrepo(repo): | |
329 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
329 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for | |
330 | # changelogs with deltas. |
|
330 | # changelogs with deltas. | |
331 | cl = repo.changelog |
|
331 | cl = repo.changelog | |
332 | chainbase = cl.chainbase |
|
332 | chainbase = cl.chainbase | |
333 | return all(rev == chainbase(rev) for rev in cl) |
|
333 | return all(rev == chainbase(rev) for rev in cl) | |
334 |
|
334 | |||
335 | @staticmethod |
|
335 | @staticmethod | |
336 | def fromconfig(repo): |
|
336 | def fromconfig(repo): | |
337 | return True |
|
337 | return True | |
338 |
|
338 | |||
339 |
|
339 | |||
340 | @registerformatvariant |
|
340 | @registerformatvariant | |
341 | class compressionengine(formatvariant): |
|
341 | class compressionengine(formatvariant): | |
342 | name = b'compression' |
|
342 | name = b'compression' | |
343 | default = b'zlib' |
|
343 | default = b'zlib' | |
344 |
|
344 | |||
345 | description = _( |
|
345 | description = _( | |
346 | b'Compresion algorithm used to compress data. ' |
|
346 | b'Compresion algorithm used to compress data. ' | |
347 | b'Some engine are faster than other' |
|
347 | b'Some engine are faster than other' | |
348 | ) |
|
348 | ) | |
349 |
|
349 | |||
350 | upgrademessage = _( |
|
350 | upgrademessage = _( | |
351 | b'revlog content will be recompressed with the new algorithm.' |
|
351 | b'revlog content will be recompressed with the new algorithm.' | |
352 | ) |
|
352 | ) | |
353 |
|
353 | |||
354 | @classmethod |
|
354 | @classmethod | |
355 | def fromrepo(cls, repo): |
|
355 | def fromrepo(cls, repo): | |
356 | # we allow multiple compression engine requirement to co-exist because |
|
356 | # we allow multiple compression engine requirement to co-exist because | |
357 | # strickly speaking, revlog seems to support mixed compression style. |
|
357 | # strickly speaking, revlog seems to support mixed compression style. | |
358 | # |
|
358 | # | |
359 | # The compression used for new entries will be "the last one" |
|
359 | # The compression used for new entries will be "the last one" | |
360 | compression = b'zlib' |
|
360 | compression = b'zlib' | |
361 | for req in repo.requirements: |
|
361 | for req in repo.requirements: | |
362 | prefix = req.startswith |
|
362 | prefix = req.startswith | |
363 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
363 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): | |
364 | compression = req.split(b'-', 2)[2] |
|
364 | compression = req.split(b'-', 2)[2] | |
365 | return compression |
|
365 | return compression | |
366 |
|
366 | |||
367 | @classmethod |
|
367 | @classmethod | |
368 | def fromconfig(cls, repo): |
|
368 | def fromconfig(cls, repo): | |
369 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
369 | compengines = repo.ui.configlist(b'format', b'revlog-compression') | |
370 | # return the first valid value as the selection code would do |
|
370 | # return the first valid value as the selection code would do | |
371 | for comp in compengines: |
|
371 | for comp in compengines: | |
372 | if comp in util.compengines: |
|
372 | if comp in util.compengines: | |
373 | return comp |
|
373 | return comp | |
374 |
|
374 | |||
375 | # no valide compression found lets display it all for clarity |
|
375 | # no valide compression found lets display it all for clarity | |
376 | return b','.join(compengines) |
|
376 | return b','.join(compengines) | |
377 |
|
377 | |||
378 |
|
378 | |||
379 | @registerformatvariant |
|
379 | @registerformatvariant | |
380 | class compressionlevel(formatvariant): |
|
380 | class compressionlevel(formatvariant): | |
381 | name = b'compression-level' |
|
381 | name = b'compression-level' | |
382 | default = b'default' |
|
382 | default = b'default' | |
383 |
|
383 | |||
384 | description = _(b'compression level') |
|
384 | description = _(b'compression level') | |
385 |
|
385 | |||
386 | upgrademessage = _(b'revlog content will be recompressed') |
|
386 | upgrademessage = _(b'revlog content will be recompressed') | |
387 |
|
387 | |||
388 | @classmethod |
|
388 | @classmethod | |
389 | def fromrepo(cls, repo): |
|
389 | def fromrepo(cls, repo): | |
390 | comp = compressionengine.fromrepo(repo) |
|
390 | comp = compressionengine.fromrepo(repo) | |
391 | level = None |
|
391 | level = None | |
392 | if comp == b'zlib': |
|
392 | if comp == b'zlib': | |
393 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
393 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
394 | elif comp == b'zstd': |
|
394 | elif comp == b'zstd': | |
395 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
395 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
396 | if level is None: |
|
396 | if level is None: | |
397 | return b'default' |
|
397 | return b'default' | |
398 | return bytes(level) |
|
398 | return bytes(level) | |
399 |
|
399 | |||
400 | @classmethod |
|
400 | @classmethod | |
401 | def fromconfig(cls, repo): |
|
401 | def fromconfig(cls, repo): | |
402 | comp = compressionengine.fromconfig(repo) |
|
402 | comp = compressionengine.fromconfig(repo) | |
403 | level = None |
|
403 | level = None | |
404 | if comp == b'zlib': |
|
404 | if comp == b'zlib': | |
405 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
405 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') | |
406 | elif comp == b'zstd': |
|
406 | elif comp == b'zstd': | |
407 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
407 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') | |
408 | if level is None: |
|
408 | if level is None: | |
409 | return b'default' |
|
409 | return b'default' | |
410 | return bytes(level) |
|
410 | return bytes(level) | |
411 |
|
411 | |||
412 |
|
412 | |||
413 | def finddeficiencies(repo): |
|
413 | def finddeficiencies(repo): | |
414 | """returns a list of deficiencies that the repo suffer from""" |
|
414 | """returns a list of deficiencies that the repo suffer from""" | |
415 | deficiencies = [] |
|
415 | deficiencies = [] | |
416 |
|
416 | |||
417 | # We could detect lack of revlogv1 and store here, but they were added |
|
417 | # We could detect lack of revlogv1 and store here, but they were added | |
418 | # in 0.9.2 and we don't support upgrading repos without these |
|
418 | # in 0.9.2 and we don't support upgrading repos without these | |
419 | # requirements, so let's not bother. |
|
419 | # requirements, so let's not bother. | |
420 |
|
420 | |||
421 | for fv in allformatvariant: |
|
421 | for fv in allformatvariant: | |
422 | if not fv.fromrepo(repo): |
|
422 | if not fv.fromrepo(repo): | |
423 | deficiencies.append(fv) |
|
423 | deficiencies.append(fv) | |
424 |
|
424 | |||
425 | return deficiencies |
|
425 | return deficiencies | |
426 |
|
426 | |||
427 |
|
427 | |||
428 | ALL_OPTIMISATIONS = [] |
|
428 | ALL_OPTIMISATIONS = [] | |
429 |
|
429 | |||
430 |
|
430 | |||
431 | def register_optimization(obj): |
|
431 | def register_optimization(obj): | |
432 | ALL_OPTIMISATIONS.append(obj) |
|
432 | ALL_OPTIMISATIONS.append(obj) | |
433 | return obj |
|
433 | return obj | |
434 |
|
434 | |||
435 |
|
435 | |||
436 | register_optimization( |
|
436 | register_optimization( | |
437 | improvement( |
|
437 | improvement( | |
438 | name=b're-delta-parent', |
|
438 | name=b're-delta-parent', | |
439 | type=OPTIMISATION, |
|
439 | type=OPTIMISATION, | |
440 | description=_( |
|
440 | description=_( | |
441 | b'deltas within internal storage will be recalculated to ' |
|
441 | b'deltas within internal storage will be recalculated to ' | |
442 | b'choose an optimal base revision where this was not ' |
|
442 | b'choose an optimal base revision where this was not ' | |
443 | b'already done; the size of the repository may shrink and ' |
|
443 | b'already done; the size of the repository may shrink and ' | |
444 | b'various operations may become faster; the first time ' |
|
444 | b'various operations may become faster; the first time ' | |
445 | b'this optimization is performed could slow down upgrade ' |
|
445 | b'this optimization is performed could slow down upgrade ' | |
446 | b'execution considerably; subsequent invocations should ' |
|
446 | b'execution considerably; subsequent invocations should ' | |
447 | b'not run noticeably slower' |
|
447 | b'not run noticeably slower' | |
448 | ), |
|
448 | ), | |
449 | upgrademessage=_( |
|
449 | upgrademessage=_( | |
450 | b'deltas within internal storage will choose a new ' |
|
450 | b'deltas within internal storage will choose a new ' | |
451 | b'base revision if needed' |
|
451 | b'base revision if needed' | |
452 | ), |
|
452 | ), | |
453 | ) |
|
453 | ) | |
454 | ) |
|
454 | ) | |
455 |
|
455 | |||
456 | register_optimization( |
|
456 | register_optimization( | |
457 | improvement( |
|
457 | improvement( | |
458 | name=b're-delta-multibase', |
|
458 | name=b're-delta-multibase', | |
459 | type=OPTIMISATION, |
|
459 | type=OPTIMISATION, | |
460 | description=_( |
|
460 | description=_( | |
461 | b'deltas within internal storage will be recalculated ' |
|
461 | b'deltas within internal storage will be recalculated ' | |
462 | b'against multiple base revision and the smallest ' |
|
462 | b'against multiple base revision and the smallest ' | |
463 | b'difference will be used; the size of the repository may ' |
|
463 | b'difference will be used; the size of the repository may ' | |
464 | b'shrink significantly when there are many merges; this ' |
|
464 | b'shrink significantly when there are many merges; this ' | |
465 | b'optimization will slow down execution in proportion to ' |
|
465 | b'optimization will slow down execution in proportion to ' | |
466 | b'the number of merges in the repository and the amount ' |
|
466 | b'the number of merges in the repository and the amount ' | |
467 | b'of files in the repository; this slow down should not ' |
|
467 | b'of files in the repository; this slow down should not ' | |
468 | b'be significant unless there are tens of thousands of ' |
|
468 | b'be significant unless there are tens of thousands of ' | |
469 | b'files and thousands of merges' |
|
469 | b'files and thousands of merges' | |
470 | ), |
|
470 | ), | |
471 | upgrademessage=_( |
|
471 | upgrademessage=_( | |
472 | b'deltas within internal storage will choose an ' |
|
472 | b'deltas within internal storage will choose an ' | |
473 | b'optimal delta by computing deltas against multiple ' |
|
473 | b'optimal delta by computing deltas against multiple ' | |
474 | b'parents; may slow down execution time ' |
|
474 | b'parents; may slow down execution time ' | |
475 | b'significantly' |
|
475 | b'significantly' | |
476 | ), |
|
476 | ), | |
477 | ) |
|
477 | ) | |
478 | ) |
|
478 | ) | |
479 |
|
479 | |||
480 | register_optimization( |
|
480 | register_optimization( | |
481 | improvement( |
|
481 | improvement( | |
482 | name=b're-delta-all', |
|
482 | name=b're-delta-all', | |
483 | type=OPTIMISATION, |
|
483 | type=OPTIMISATION, | |
484 | description=_( |
|
484 | description=_( | |
485 | b'deltas within internal storage will always be ' |
|
485 | b'deltas within internal storage will always be ' | |
486 | b'recalculated without reusing prior deltas; this will ' |
|
486 | b'recalculated without reusing prior deltas; this will ' | |
487 | b'likely make execution run several times slower; this ' |
|
487 | b'likely make execution run several times slower; this ' | |
488 | b'optimization is typically not needed' |
|
488 | b'optimization is typically not needed' | |
489 | ), |
|
489 | ), | |
490 | upgrademessage=_( |
|
490 | upgrademessage=_( | |
491 | b'deltas within internal storage will be fully ' |
|
491 | b'deltas within internal storage will be fully ' | |
492 | b'recomputed; this will likely drastically slow down ' |
|
492 | b'recomputed; this will likely drastically slow down ' | |
493 | b'execution time' |
|
493 | b'execution time' | |
494 | ), |
|
494 | ), | |
495 | ) |
|
495 | ) | |
496 | ) |
|
496 | ) | |
497 |
|
497 | |||
498 | register_optimization( |
|
498 | register_optimization( | |
499 | improvement( |
|
499 | improvement( | |
500 | name=b're-delta-fulladd', |
|
500 | name=b're-delta-fulladd', | |
501 | type=OPTIMISATION, |
|
501 | type=OPTIMISATION, | |
502 | description=_( |
|
502 | description=_( | |
503 | b'every revision will be re-added as if it was new ' |
|
503 | b'every revision will be re-added as if it was new ' | |
504 | b'content. It will go through the full storage ' |
|
504 | b'content. It will go through the full storage ' | |
505 | b'mechanism giving extensions a chance to process it ' |
|
505 | b'mechanism giving extensions a chance to process it ' | |
506 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
506 | b'(eg. lfs). This is similar to "re-delta-all" but even ' | |
507 | b'slower since more logic is involved.' |
|
507 | b'slower since more logic is involved.' | |
508 | ), |
|
508 | ), | |
509 | upgrademessage=_( |
|
509 | upgrademessage=_( | |
510 | b'each revision will be added as new content to the ' |
|
510 | b'each revision will be added as new content to the ' | |
511 | b'internal storage; this will likely drastically slow ' |
|
511 | b'internal storage; this will likely drastically slow ' | |
512 | b'down execution time, but some extensions might need ' |
|
512 | b'down execution time, but some extensions might need ' | |
513 | b'it' |
|
513 | b'it' | |
514 | ), |
|
514 | ), | |
515 | ) |
|
515 | ) | |
516 | ) |
|
516 | ) | |
517 |
|
517 | |||
518 |
|
518 | |||
519 | def findoptimizations(repo): |
|
519 | def findoptimizations(repo): | |
520 | """Determine optimisation that could be used during upgrade""" |
|
520 | """Determine optimisation that could be used during upgrade""" | |
521 | # These are unconditionally added. There is logic later that figures out |
|
521 | # These are unconditionally added. There is logic later that figures out | |
522 | # which ones to apply. |
|
522 | # which ones to apply. | |
523 | return list(ALL_OPTIMISATIONS) |
|
523 | return list(ALL_OPTIMISATIONS) | |
524 |
|
524 | |||
525 |
|
525 | |||
526 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
526 | def determineactions(repo, deficiencies, sourcereqs, destreqs): | |
527 | """Determine upgrade actions that will be performed. |
|
527 | """Determine upgrade actions that will be performed. | |
528 |
|
528 | |||
529 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
529 | Given a list of improvements as returned by ``finddeficiencies`` and | |
530 | ``findoptimizations``, determine the list of upgrade actions that |
|
530 | ``findoptimizations``, determine the list of upgrade actions that | |
531 | will be performed. |
|
531 | will be performed. | |
532 |
|
532 | |||
533 | The role of this function is to filter improvements if needed, apply |
|
533 | The role of this function is to filter improvements if needed, apply | |
534 | recommended optimizations from the improvements list that make sense, |
|
534 | recommended optimizations from the improvements list that make sense, | |
535 | etc. |
|
535 | etc. | |
536 |
|
536 | |||
537 | Returns a list of action names. |
|
537 | Returns a list of action names. | |
538 | """ |
|
538 | """ | |
539 | newactions = [] |
|
539 | newactions = [] | |
540 |
|
540 | |||
541 | for d in deficiencies: |
|
541 | for d in deficiencies: | |
542 | name = d._requirement |
|
542 | name = d._requirement | |
543 |
|
543 | |||
544 | # If the action is a requirement that doesn't show up in the |
|
544 | # If the action is a requirement that doesn't show up in the | |
545 | # destination requirements, prune the action. |
|
545 | # destination requirements, prune the action. | |
546 | if name is not None and name not in destreqs: |
|
546 | if name is not None and name not in destreqs: | |
547 | continue |
|
547 | continue | |
548 |
|
548 | |||
549 | newactions.append(d) |
|
549 | newactions.append(d) | |
550 |
|
550 | |||
551 | # FUTURE consider adding some optimizations here for certain transitions. |
|
551 | # FUTURE consider adding some optimizations here for certain transitions. | |
552 | # e.g. adding generaldelta could schedule parent redeltas. |
|
552 | # e.g. adding generaldelta could schedule parent redeltas. | |
553 |
|
553 | |||
554 | return newactions |
|
554 | return newactions | |
555 |
|
555 | |||
556 |
|
556 | |||
557 | class UpgradeOperation(object): |
|
557 | class UpgradeOperation(object): | |
558 | """represent the work to be done during an upgrade""" |
|
558 | """represent the work to be done during an upgrade""" | |
559 |
|
559 | |||
560 | def __init__(self, ui, requirements, actions, revlogs_to_process): |
|
560 | def __init__( | |
|
561 | self, | |||
|
562 | ui, | |||
|
563 | new_requirements, | |||
|
564 | current_requirements, | |||
|
565 | actions, | |||
|
566 | revlogs_to_process, | |||
|
567 | ): | |||
561 | self.ui = ui |
|
568 | self.ui = ui | |
562 | self.requirements = requirements |
|
569 | self.new_requirements = new_requirements | |
|
570 | self.current_requirements = current_requirements | |||
563 | self.actions = actions |
|
571 | self.actions = actions | |
564 | self._actions_names = set([a.name for a in actions]) |
|
572 | self._actions_names = set([a.name for a in actions]) | |
565 | self.revlogs_to_process = revlogs_to_process |
|
573 | self.revlogs_to_process = revlogs_to_process | |
|
574 | # requirements which will be added by the operation | |||
|
575 | self._added_requirements = ( | |||
|
576 | self.new_requirements - self.current_requirements | |||
|
577 | ) | |||
|
578 | # requirements which will be removed by the operation | |||
|
579 | self._removed_requirements = ( | |||
|
580 | self.current_requirements - self.new_requirements | |||
|
581 | ) | |||
|
582 | # requirements which will be preserved by the operation | |||
|
583 | self._preserved_requirements = ( | |||
|
584 | self.current_requirements & self.new_requirements | |||
|
585 | ) | |||
566 |
|
586 | |||
567 | def _write_labeled(self, l, label): |
|
587 | def _write_labeled(self, l, label): | |
568 | """ |
|
588 | """ | |
569 | Utility function to aid writing of a list under one label |
|
589 | Utility function to aid writing of a list under one label | |
570 | """ |
|
590 | """ | |
571 | first = True |
|
591 | first = True | |
572 | for r in sorted(l): |
|
592 | for r in sorted(l): | |
573 | if not first: |
|
593 | if not first: | |
574 | self.ui.write(b', ') |
|
594 | self.ui.write(b', ') | |
575 | self.ui.write(r, label=label) |
|
595 | self.ui.write(r, label=label) | |
576 | first = False |
|
596 | first = False | |
577 |
|
597 | |||
|
598 | def print_requirements(self): | |||
|
599 | self.ui.write(_(b'requirements\n')) | |||
|
600 | self.ui.write(_(b' preserved: ')) | |||
|
601 | self._write_labeled( | |||
|
602 | self._preserved_requirements, "upgrade-repo.requirement.preserved" | |||
|
603 | ) | |||
|
604 | self.ui.write((b'\n')) | |||
|
605 | if self._removed_requirements: | |||
|
606 | self.ui.write(_(b' removed: ')) | |||
|
607 | self._write_labeled( | |||
|
608 | self._removed_requirements, "upgrade-repo.requirement.removed" | |||
|
609 | ) | |||
|
610 | self.ui.write((b'\n')) | |||
|
611 | if self._added_requirements: | |||
|
612 | self.ui.write(_(b' added: ')) | |||
|
613 | self._write_labeled( | |||
|
614 | self._added_requirements, "upgrade-repo.requirement.added" | |||
|
615 | ) | |||
|
616 | self.ui.write((b'\n')) | |||
|
617 | self.ui.write(b'\n') | |||
|
618 | ||||
578 | def print_optimisations(self): |
|
619 | def print_optimisations(self): | |
579 | optimisations = [a for a in self.actions if a.type == OPTIMISATION] |
|
620 | optimisations = [a for a in self.actions if a.type == OPTIMISATION] | |
580 | optimisations.sort(key=lambda a: a.name) |
|
621 | optimisations.sort(key=lambda a: a.name) | |
581 | if optimisations: |
|
622 | if optimisations: | |
582 | self.ui.write(_(b'optimisations: ')) |
|
623 | self.ui.write(_(b'optimisations: ')) | |
583 | self._write_labeled( |
|
624 | self._write_labeled( | |
584 | [a.name for a in optimisations], |
|
625 | [a.name for a in optimisations], | |
585 | "upgrade-repo.optimisation.performed", |
|
626 | "upgrade-repo.optimisation.performed", | |
586 | ) |
|
627 | ) | |
587 | self.ui.write(b'\n\n') |
|
628 | self.ui.write(b'\n\n') | |
588 |
|
629 | |||
589 | def print_upgrade_actions(self): |
|
630 | def print_upgrade_actions(self): | |
590 | for a in self.actions: |
|
631 | for a in self.actions: | |
591 | self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
632 | self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) | |
592 |
|
633 | |||
593 | def print_affected_revlogs(self): |
|
634 | def print_affected_revlogs(self): | |
594 | if not self.revlogs_to_process: |
|
635 | if not self.revlogs_to_process: | |
595 | self.ui.write((b'no revlogs to process\n')) |
|
636 | self.ui.write((b'no revlogs to process\n')) | |
596 | else: |
|
637 | else: | |
597 | self.ui.write((b'processed revlogs:\n')) |
|
638 | self.ui.write((b'processed revlogs:\n')) | |
598 | for r in sorted(self.revlogs_to_process): |
|
639 | for r in sorted(self.revlogs_to_process): | |
599 | self.ui.write((b' - %s\n' % r)) |
|
640 | self.ui.write((b' - %s\n' % r)) | |
600 | self.ui.write((b'\n')) |
|
641 | self.ui.write((b'\n')) | |
601 |
|
642 | |||
602 | def has_action(self, name): |
|
643 | def has_action(self, name): | |
603 | """ Check whether the upgrade operation will perform this action """ |
|
644 | """ Check whether the upgrade operation will perform this action """ | |
604 | return name in self._actions_names |
|
645 | return name in self._actions_names | |
605 |
|
646 | |||
606 |
|
647 | |||
607 | ### Code checking if a repository can got through the upgrade process at all. # |
|
648 | ### Code checking if a repository can got through the upgrade process at all. # | |
608 |
|
649 | |||
609 |
|
650 | |||
610 | def requiredsourcerequirements(repo): |
|
651 | def requiredsourcerequirements(repo): | |
611 | """Obtain requirements required to be present to upgrade a repo. |
|
652 | """Obtain requirements required to be present to upgrade a repo. | |
612 |
|
653 | |||
613 | An upgrade will not be allowed if the repository doesn't have the |
|
654 | An upgrade will not be allowed if the repository doesn't have the | |
614 | requirements returned by this function. |
|
655 | requirements returned by this function. | |
615 | """ |
|
656 | """ | |
616 | return { |
|
657 | return { | |
617 | # Introduced in Mercurial 0.9.2. |
|
658 | # Introduced in Mercurial 0.9.2. | |
618 | b'revlogv1', |
|
659 | b'revlogv1', | |
619 | # Introduced in Mercurial 0.9.2. |
|
660 | # Introduced in Mercurial 0.9.2. | |
620 | b'store', |
|
661 | b'store', | |
621 | } |
|
662 | } | |
622 |
|
663 | |||
623 |
|
664 | |||
624 | def blocksourcerequirements(repo): |
|
665 | def blocksourcerequirements(repo): | |
625 | """Obtain requirements that will prevent an upgrade from occurring. |
|
666 | """Obtain requirements that will prevent an upgrade from occurring. | |
626 |
|
667 | |||
627 | An upgrade cannot be performed if the source repository contains a |
|
668 | An upgrade cannot be performed if the source repository contains a | |
628 | requirements in the returned set. |
|
669 | requirements in the returned set. | |
629 | """ |
|
670 | """ | |
630 | return { |
|
671 | return { | |
631 | # The upgrade code does not yet support these experimental features. |
|
672 | # The upgrade code does not yet support these experimental features. | |
632 | # This is an artificial limitation. |
|
673 | # This is an artificial limitation. | |
633 | requirements.TREEMANIFEST_REQUIREMENT, |
|
674 | requirements.TREEMANIFEST_REQUIREMENT, | |
634 | # This was a precursor to generaldelta and was never enabled by default. |
|
675 | # This was a precursor to generaldelta and was never enabled by default. | |
635 | # It should (hopefully) not exist in the wild. |
|
676 | # It should (hopefully) not exist in the wild. | |
636 | b'parentdelta', |
|
677 | b'parentdelta', | |
637 | # Upgrade should operate on the actual store, not the shared link. |
|
678 | # Upgrade should operate on the actual store, not the shared link. | |
638 | requirements.SHARED_REQUIREMENT, |
|
679 | requirements.SHARED_REQUIREMENT, | |
639 | } |
|
680 | } | |
640 |
|
681 | |||
641 |
|
682 | |||
642 | def check_source_requirements(repo): |
|
683 | def check_source_requirements(repo): | |
643 | """Ensure that no existing requirements prevent the repository upgrade""" |
|
684 | """Ensure that no existing requirements prevent the repository upgrade""" | |
644 |
|
685 | |||
645 | required = requiredsourcerequirements(repo) |
|
686 | required = requiredsourcerequirements(repo) | |
646 | missingreqs = required - repo.requirements |
|
687 | missingreqs = required - repo.requirements | |
647 | if missingreqs: |
|
688 | if missingreqs: | |
648 | msg = _(b'cannot upgrade repository; requirement missing: %s') |
|
689 | msg = _(b'cannot upgrade repository; requirement missing: %s') | |
649 | missingreqs = b', '.join(sorted(missingreqs)) |
|
690 | missingreqs = b', '.join(sorted(missingreqs)) | |
650 | raise error.Abort(msg % missingreqs) |
|
691 | raise error.Abort(msg % missingreqs) | |
651 |
|
692 | |||
652 | blocking = blocksourcerequirements(repo) |
|
693 | blocking = blocksourcerequirements(repo) | |
653 | blockingreqs = blocking & repo.requirements |
|
694 | blockingreqs = blocking & repo.requirements | |
654 | if blockingreqs: |
|
695 | if blockingreqs: | |
655 | m = _(b'cannot upgrade repository; unsupported source requirement: %s') |
|
696 | m = _(b'cannot upgrade repository; unsupported source requirement: %s') | |
656 | blockingreqs = b', '.join(sorted(blockingreqs)) |
|
697 | blockingreqs = b', '.join(sorted(blockingreqs)) | |
657 | raise error.Abort(m % blockingreqs) |
|
698 | raise error.Abort(m % blockingreqs) | |
658 |
|
699 | |||
659 |
|
700 | |||
660 | ### Verify the validity of the planned requirement changes #################### |
|
701 | ### Verify the validity of the planned requirement changes #################### | |
661 |
|
702 | |||
662 |
|
703 | |||
663 | def supportremovedrequirements(repo): |
|
704 | def supportremovedrequirements(repo): | |
664 | """Obtain requirements that can be removed during an upgrade. |
|
705 | """Obtain requirements that can be removed during an upgrade. | |
665 |
|
706 | |||
666 | If an upgrade were to create a repository that dropped a requirement, |
|
707 | If an upgrade were to create a repository that dropped a requirement, | |
667 | the dropped requirement must appear in the returned set for the upgrade |
|
708 | the dropped requirement must appear in the returned set for the upgrade | |
668 | to be allowed. |
|
709 | to be allowed. | |
669 | """ |
|
710 | """ | |
670 | supported = { |
|
711 | supported = { | |
671 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
712 | requirements.SPARSEREVLOG_REQUIREMENT, | |
672 | requirements.SIDEDATA_REQUIREMENT, |
|
713 | requirements.SIDEDATA_REQUIREMENT, | |
673 | requirements.COPIESSDC_REQUIREMENT, |
|
714 | requirements.COPIESSDC_REQUIREMENT, | |
674 | requirements.NODEMAP_REQUIREMENT, |
|
715 | requirements.NODEMAP_REQUIREMENT, | |
675 | requirements.SHARESAFE_REQUIREMENT, |
|
716 | requirements.SHARESAFE_REQUIREMENT, | |
676 | } |
|
717 | } | |
677 | for name in compression.compengines: |
|
718 | for name in compression.compengines: | |
678 | engine = compression.compengines[name] |
|
719 | engine = compression.compengines[name] | |
679 | if engine.available() and engine.revlogheader(): |
|
720 | if engine.available() and engine.revlogheader(): | |
680 | supported.add(b'exp-compression-%s' % name) |
|
721 | supported.add(b'exp-compression-%s' % name) | |
681 | if engine.name() == b'zstd': |
|
722 | if engine.name() == b'zstd': | |
682 | supported.add(b'revlog-compression-zstd') |
|
723 | supported.add(b'revlog-compression-zstd') | |
683 | return supported |
|
724 | return supported | |
684 |
|
725 | |||
685 |
|
726 | |||
686 | def supporteddestrequirements(repo): |
|
727 | def supporteddestrequirements(repo): | |
687 | """Obtain requirements that upgrade supports in the destination. |
|
728 | """Obtain requirements that upgrade supports in the destination. | |
688 |
|
729 | |||
689 | If the result of the upgrade would create requirements not in this set, |
|
730 | If the result of the upgrade would create requirements not in this set, | |
690 | the upgrade is disallowed. |
|
731 | the upgrade is disallowed. | |
691 |
|
732 | |||
692 | Extensions should monkeypatch this to add their custom requirements. |
|
733 | Extensions should monkeypatch this to add their custom requirements. | |
693 | """ |
|
734 | """ | |
694 | supported = { |
|
735 | supported = { | |
695 | b'dotencode', |
|
736 | b'dotencode', | |
696 | b'fncache', |
|
737 | b'fncache', | |
697 | b'generaldelta', |
|
738 | b'generaldelta', | |
698 | b'revlogv1', |
|
739 | b'revlogv1', | |
699 | b'store', |
|
740 | b'store', | |
700 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
741 | requirements.SPARSEREVLOG_REQUIREMENT, | |
701 | requirements.SIDEDATA_REQUIREMENT, |
|
742 | requirements.SIDEDATA_REQUIREMENT, | |
702 | requirements.COPIESSDC_REQUIREMENT, |
|
743 | requirements.COPIESSDC_REQUIREMENT, | |
703 | requirements.NODEMAP_REQUIREMENT, |
|
744 | requirements.NODEMAP_REQUIREMENT, | |
704 | requirements.SHARESAFE_REQUIREMENT, |
|
745 | requirements.SHARESAFE_REQUIREMENT, | |
705 | } |
|
746 | } | |
706 | for name in compression.compengines: |
|
747 | for name in compression.compengines: | |
707 | engine = compression.compengines[name] |
|
748 | engine = compression.compengines[name] | |
708 | if engine.available() and engine.revlogheader(): |
|
749 | if engine.available() and engine.revlogheader(): | |
709 | supported.add(b'exp-compression-%s' % name) |
|
750 | supported.add(b'exp-compression-%s' % name) | |
710 | if engine.name() == b'zstd': |
|
751 | if engine.name() == b'zstd': | |
711 | supported.add(b'revlog-compression-zstd') |
|
752 | supported.add(b'revlog-compression-zstd') | |
712 | return supported |
|
753 | return supported | |
713 |
|
754 | |||
714 |
|
755 | |||
715 | def allowednewrequirements(repo): |
|
756 | def allowednewrequirements(repo): | |
716 | """Obtain requirements that can be added to a repository during upgrade. |
|
757 | """Obtain requirements that can be added to a repository during upgrade. | |
717 |
|
758 | |||
718 | This is used to disallow proposed requirements from being added when |
|
759 | This is used to disallow proposed requirements from being added when | |
719 | they weren't present before. |
|
760 | they weren't present before. | |
720 |
|
761 | |||
721 | We use a list of allowed requirement additions instead of a list of known |
|
762 | We use a list of allowed requirement additions instead of a list of known | |
722 | bad additions because the whitelist approach is safer and will prevent |
|
763 | bad additions because the whitelist approach is safer and will prevent | |
723 | future, unknown requirements from accidentally being added. |
|
764 | future, unknown requirements from accidentally being added. | |
724 | """ |
|
765 | """ | |
725 | supported = { |
|
766 | supported = { | |
726 | b'dotencode', |
|
767 | b'dotencode', | |
727 | b'fncache', |
|
768 | b'fncache', | |
728 | b'generaldelta', |
|
769 | b'generaldelta', | |
729 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
770 | requirements.SPARSEREVLOG_REQUIREMENT, | |
730 | requirements.SIDEDATA_REQUIREMENT, |
|
771 | requirements.SIDEDATA_REQUIREMENT, | |
731 | requirements.COPIESSDC_REQUIREMENT, |
|
772 | requirements.COPIESSDC_REQUIREMENT, | |
732 | requirements.NODEMAP_REQUIREMENT, |
|
773 | requirements.NODEMAP_REQUIREMENT, | |
733 | requirements.SHARESAFE_REQUIREMENT, |
|
774 | requirements.SHARESAFE_REQUIREMENT, | |
734 | } |
|
775 | } | |
735 | for name in compression.compengines: |
|
776 | for name in compression.compengines: | |
736 | engine = compression.compengines[name] |
|
777 | engine = compression.compengines[name] | |
737 | if engine.available() and engine.revlogheader(): |
|
778 | if engine.available() and engine.revlogheader(): | |
738 | supported.add(b'exp-compression-%s' % name) |
|
779 | supported.add(b'exp-compression-%s' % name) | |
739 | if engine.name() == b'zstd': |
|
780 | if engine.name() == b'zstd': | |
740 | supported.add(b'revlog-compression-zstd') |
|
781 | supported.add(b'revlog-compression-zstd') | |
741 | return supported |
|
782 | return supported | |
742 |
|
783 | |||
743 |
|
784 | |||
744 | def check_requirements_changes(repo, new_reqs): |
|
785 | def check_requirements_changes(repo, new_reqs): | |
745 | old_reqs = repo.requirements |
|
786 | old_reqs = repo.requirements | |
746 |
|
787 | |||
747 | support_removal = supportremovedrequirements(repo) |
|
788 | support_removal = supportremovedrequirements(repo) | |
748 | no_remove_reqs = old_reqs - new_reqs - support_removal |
|
789 | no_remove_reqs = old_reqs - new_reqs - support_removal | |
749 | if no_remove_reqs: |
|
790 | if no_remove_reqs: | |
750 | msg = _(b'cannot upgrade repository; requirement would be removed: %s') |
|
791 | msg = _(b'cannot upgrade repository; requirement would be removed: %s') | |
751 | no_remove_reqs = b', '.join(sorted(no_remove_reqs)) |
|
792 | no_remove_reqs = b', '.join(sorted(no_remove_reqs)) | |
752 | raise error.Abort(msg % no_remove_reqs) |
|
793 | raise error.Abort(msg % no_remove_reqs) | |
753 |
|
794 | |||
754 | support_addition = allowednewrequirements(repo) |
|
795 | support_addition = allowednewrequirements(repo) | |
755 | no_add_reqs = new_reqs - old_reqs - support_addition |
|
796 | no_add_reqs = new_reqs - old_reqs - support_addition | |
756 | if no_add_reqs: |
|
797 | if no_add_reqs: | |
757 | m = _(b'cannot upgrade repository; do not support adding requirement: ') |
|
798 | m = _(b'cannot upgrade repository; do not support adding requirement: ') | |
758 | no_add_reqs = b', '.join(sorted(no_add_reqs)) |
|
799 | no_add_reqs = b', '.join(sorted(no_add_reqs)) | |
759 | raise error.Abort(m + no_add_reqs) |
|
800 | raise error.Abort(m + no_add_reqs) | |
760 |
|
801 | |||
761 | supported = supporteddestrequirements(repo) |
|
802 | supported = supporteddestrequirements(repo) | |
762 | unsupported_reqs = new_reqs - supported |
|
803 | unsupported_reqs = new_reqs - supported | |
763 | if unsupported_reqs: |
|
804 | if unsupported_reqs: | |
764 | msg = _( |
|
805 | msg = _( | |
765 | b'cannot upgrade repository; do not support destination ' |
|
806 | b'cannot upgrade repository; do not support destination ' | |
766 | b'requirement: %s' |
|
807 | b'requirement: %s' | |
767 | ) |
|
808 | ) | |
768 | unsupported_reqs = b', '.join(sorted(unsupported_reqs)) |
|
809 | unsupported_reqs = b', '.join(sorted(unsupported_reqs)) | |
769 | raise error.Abort(msg % unsupported_reqs) |
|
810 | raise error.Abort(msg % unsupported_reqs) |
@@ -1,500 +1,500 b'' | |||||
1 | # upgrade.py - functions for in place upgrade of Mercurial repository |
|
1 | # upgrade.py - functions for in place upgrade of Mercurial repository | |
2 | # |
|
2 | # | |
3 | # Copyright (c) 2016-present, Gregory Szorc |
|
3 | # Copyright (c) 2016-present, Gregory Szorc | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import stat |
|
10 | import stat | |
11 |
|
11 | |||
12 | from ..i18n import _ |
|
12 | from ..i18n import _ | |
13 | from ..pycompat import getattr |
|
13 | from ..pycompat import getattr | |
14 | from .. import ( |
|
14 | from .. import ( | |
15 | changelog, |
|
15 | changelog, | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | manifest, |
|
18 | manifest, | |
19 | metadata, |
|
19 | metadata, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | requirements, |
|
21 | requirements, | |
22 | revlog, |
|
22 | revlog, | |
23 | scmutil, |
|
23 | scmutil, | |
24 | util, |
|
24 | util, | |
25 | vfs as vfsmod, |
|
25 | vfs as vfsmod, | |
26 | ) |
|
26 | ) | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | def _revlogfrompath(repo, path): |
|
29 | def _revlogfrompath(repo, path): | |
30 | """Obtain a revlog from a repo path. |
|
30 | """Obtain a revlog from a repo path. | |
31 |
|
31 | |||
32 | An instance of the appropriate class is returned. |
|
32 | An instance of the appropriate class is returned. | |
33 | """ |
|
33 | """ | |
34 | if path == b'00changelog.i': |
|
34 | if path == b'00changelog.i': | |
35 | return changelog.changelog(repo.svfs) |
|
35 | return changelog.changelog(repo.svfs) | |
36 | elif path.endswith(b'00manifest.i'): |
|
36 | elif path.endswith(b'00manifest.i'): | |
37 | mandir = path[: -len(b'00manifest.i')] |
|
37 | mandir = path[: -len(b'00manifest.i')] | |
38 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
38 | return manifest.manifestrevlog(repo.svfs, tree=mandir) | |
39 | else: |
|
39 | else: | |
40 | # reverse of "/".join(("data", path + ".i")) |
|
40 | # reverse of "/".join(("data", path + ".i")) | |
41 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
41 | return filelog.filelog(repo.svfs, path[5:-2]) | |
42 |
|
42 | |||
43 |
|
43 | |||
44 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
44 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): | |
45 | """copy all relevant files for `oldrl` into `destrepo` store |
|
45 | """copy all relevant files for `oldrl` into `destrepo` store | |
46 |
|
46 | |||
47 | Files are copied "as is" without any transformation. The copy is performed |
|
47 | Files are copied "as is" without any transformation. The copy is performed | |
48 | without extra checks. Callers are responsible for making sure the copied |
|
48 | without extra checks. Callers are responsible for making sure the copied | |
49 | content is compatible with format of the destination repository. |
|
49 | content is compatible with format of the destination repository. | |
50 | """ |
|
50 | """ | |
51 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
51 | oldrl = getattr(oldrl, '_revlog', oldrl) | |
52 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
52 | newrl = _revlogfrompath(destrepo, unencodedname) | |
53 | newrl = getattr(newrl, '_revlog', newrl) |
|
53 | newrl = getattr(newrl, '_revlog', newrl) | |
54 |
|
54 | |||
55 | oldvfs = oldrl.opener |
|
55 | oldvfs = oldrl.opener | |
56 | newvfs = newrl.opener |
|
56 | newvfs = newrl.opener | |
57 | oldindex = oldvfs.join(oldrl.indexfile) |
|
57 | oldindex = oldvfs.join(oldrl.indexfile) | |
58 | newindex = newvfs.join(newrl.indexfile) |
|
58 | newindex = newvfs.join(newrl.indexfile) | |
59 | olddata = oldvfs.join(oldrl.datafile) |
|
59 | olddata = oldvfs.join(oldrl.datafile) | |
60 | newdata = newvfs.join(newrl.datafile) |
|
60 | newdata = newvfs.join(newrl.datafile) | |
61 |
|
61 | |||
62 | with newvfs(newrl.indexfile, b'w'): |
|
62 | with newvfs(newrl.indexfile, b'w'): | |
63 | pass # create all the directories |
|
63 | pass # create all the directories | |
64 |
|
64 | |||
65 | util.copyfile(oldindex, newindex) |
|
65 | util.copyfile(oldindex, newindex) | |
66 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
66 | copydata = oldrl.opener.exists(oldrl.datafile) | |
67 | if copydata: |
|
67 | if copydata: | |
68 | util.copyfile(olddata, newdata) |
|
68 | util.copyfile(olddata, newdata) | |
69 |
|
69 | |||
70 | if not ( |
|
70 | if not ( | |
71 | unencodedname.endswith(b'00changelog.i') |
|
71 | unencodedname.endswith(b'00changelog.i') | |
72 | or unencodedname.endswith(b'00manifest.i') |
|
72 | or unencodedname.endswith(b'00manifest.i') | |
73 | ): |
|
73 | ): | |
74 | destrepo.svfs.fncache.add(unencodedname) |
|
74 | destrepo.svfs.fncache.add(unencodedname) | |
75 | if copydata: |
|
75 | if copydata: | |
76 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
76 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | |
77 |
|
77 | |||
78 |
|
78 | |||
79 | UPGRADE_CHANGELOG = b"changelog" |
|
79 | UPGRADE_CHANGELOG = b"changelog" | |
80 | UPGRADE_MANIFEST = b"manifest" |
|
80 | UPGRADE_MANIFEST = b"manifest" | |
81 | UPGRADE_FILELOGS = b"all-filelogs" |
|
81 | UPGRADE_FILELOGS = b"all-filelogs" | |
82 |
|
82 | |||
83 | UPGRADE_ALL_REVLOGS = frozenset( |
|
83 | UPGRADE_ALL_REVLOGS = frozenset( | |
84 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
84 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | |
85 | ) |
|
85 | ) | |
86 |
|
86 | |||
87 |
|
87 | |||
88 | def getsidedatacompanion(srcrepo, dstrepo): |
|
88 | def getsidedatacompanion(srcrepo, dstrepo): | |
89 | sidedatacompanion = None |
|
89 | sidedatacompanion = None | |
90 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
90 | removedreqs = srcrepo.requirements - dstrepo.requirements | |
91 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
91 | addedreqs = dstrepo.requirements - srcrepo.requirements | |
92 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
92 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: | |
93 |
|
93 | |||
94 | def sidedatacompanion(rl, rev): |
|
94 | def sidedatacompanion(rl, rev): | |
95 | rl = getattr(rl, '_revlog', rl) |
|
95 | rl = getattr(rl, '_revlog', rl) | |
96 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
96 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: | |
97 | return True, (), {}, 0, 0 |
|
97 | return True, (), {}, 0, 0 | |
98 | return False, (), {}, 0, 0 |
|
98 | return False, (), {}, 0, 0 | |
99 |
|
99 | |||
100 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
100 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: | |
101 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
101 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) | |
102 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
102 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: | |
103 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
103 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) | |
104 | return sidedatacompanion |
|
104 | return sidedatacompanion | |
105 |
|
105 | |||
106 |
|
106 | |||
107 | def matchrevlog(revlogfilter, entry): |
|
107 | def matchrevlog(revlogfilter, entry): | |
108 | """check if a revlog is selected for cloning. |
|
108 | """check if a revlog is selected for cloning. | |
109 |
|
109 | |||
110 | In other words, are there any updates which need to be done on revlog |
|
110 | In other words, are there any updates which need to be done on revlog | |
111 | or it can be blindly copied. |
|
111 | or it can be blindly copied. | |
112 |
|
112 | |||
113 | The store entry is checked against the passed filter""" |
|
113 | The store entry is checked against the passed filter""" | |
114 | if entry.endswith(b'00changelog.i'): |
|
114 | if entry.endswith(b'00changelog.i'): | |
115 | return UPGRADE_CHANGELOG in revlogfilter |
|
115 | return UPGRADE_CHANGELOG in revlogfilter | |
116 | elif entry.endswith(b'00manifest.i'): |
|
116 | elif entry.endswith(b'00manifest.i'): | |
117 | return UPGRADE_MANIFEST in revlogfilter |
|
117 | return UPGRADE_MANIFEST in revlogfilter | |
118 | return UPGRADE_FILELOGS in revlogfilter |
|
118 | return UPGRADE_FILELOGS in revlogfilter | |
119 |
|
119 | |||
120 |
|
120 | |||
121 | def _clonerevlogs( |
|
121 | def _clonerevlogs( | |
122 | ui, |
|
122 | ui, | |
123 | srcrepo, |
|
123 | srcrepo, | |
124 | dstrepo, |
|
124 | dstrepo, | |
125 | tr, |
|
125 | tr, | |
126 | deltareuse, |
|
126 | deltareuse, | |
127 | forcedeltabothparents, |
|
127 | forcedeltabothparents, | |
128 | revlogs=UPGRADE_ALL_REVLOGS, |
|
128 | revlogs=UPGRADE_ALL_REVLOGS, | |
129 | ): |
|
129 | ): | |
130 | """Copy revlogs between 2 repos.""" |
|
130 | """Copy revlogs between 2 repos.""" | |
131 | revcount = 0 |
|
131 | revcount = 0 | |
132 | srcsize = 0 |
|
132 | srcsize = 0 | |
133 | srcrawsize = 0 |
|
133 | srcrawsize = 0 | |
134 | dstsize = 0 |
|
134 | dstsize = 0 | |
135 | fcount = 0 |
|
135 | fcount = 0 | |
136 | frevcount = 0 |
|
136 | frevcount = 0 | |
137 | fsrcsize = 0 |
|
137 | fsrcsize = 0 | |
138 | frawsize = 0 |
|
138 | frawsize = 0 | |
139 | fdstsize = 0 |
|
139 | fdstsize = 0 | |
140 | mcount = 0 |
|
140 | mcount = 0 | |
141 | mrevcount = 0 |
|
141 | mrevcount = 0 | |
142 | msrcsize = 0 |
|
142 | msrcsize = 0 | |
143 | mrawsize = 0 |
|
143 | mrawsize = 0 | |
144 | mdstsize = 0 |
|
144 | mdstsize = 0 | |
145 | crevcount = 0 |
|
145 | crevcount = 0 | |
146 | csrcsize = 0 |
|
146 | csrcsize = 0 | |
147 | crawsize = 0 |
|
147 | crawsize = 0 | |
148 | cdstsize = 0 |
|
148 | cdstsize = 0 | |
149 |
|
149 | |||
150 | alldatafiles = list(srcrepo.store.walk()) |
|
150 | alldatafiles = list(srcrepo.store.walk()) | |
151 |
|
151 | |||
152 | # Perform a pass to collect metadata. This validates we can open all |
|
152 | # Perform a pass to collect metadata. This validates we can open all | |
153 | # source files and allows a unified progress bar to be displayed. |
|
153 | # source files and allows a unified progress bar to be displayed. | |
154 | for unencoded, encoded, size in alldatafiles: |
|
154 | for unencoded, encoded, size in alldatafiles: | |
155 | if unencoded.endswith(b'.d'): |
|
155 | if unencoded.endswith(b'.d'): | |
156 | continue |
|
156 | continue | |
157 |
|
157 | |||
158 | rl = _revlogfrompath(srcrepo, unencoded) |
|
158 | rl = _revlogfrompath(srcrepo, unencoded) | |
159 |
|
159 | |||
160 | info = rl.storageinfo( |
|
160 | info = rl.storageinfo( | |
161 | exclusivefiles=True, |
|
161 | exclusivefiles=True, | |
162 | revisionscount=True, |
|
162 | revisionscount=True, | |
163 | trackedsize=True, |
|
163 | trackedsize=True, | |
164 | storedsize=True, |
|
164 | storedsize=True, | |
165 | ) |
|
165 | ) | |
166 |
|
166 | |||
167 | revcount += info[b'revisionscount'] or 0 |
|
167 | revcount += info[b'revisionscount'] or 0 | |
168 | datasize = info[b'storedsize'] or 0 |
|
168 | datasize = info[b'storedsize'] or 0 | |
169 | rawsize = info[b'trackedsize'] or 0 |
|
169 | rawsize = info[b'trackedsize'] or 0 | |
170 |
|
170 | |||
171 | srcsize += datasize |
|
171 | srcsize += datasize | |
172 | srcrawsize += rawsize |
|
172 | srcrawsize += rawsize | |
173 |
|
173 | |||
174 | # This is for the separate progress bars. |
|
174 | # This is for the separate progress bars. | |
175 | if isinstance(rl, changelog.changelog): |
|
175 | if isinstance(rl, changelog.changelog): | |
176 | crevcount += len(rl) |
|
176 | crevcount += len(rl) | |
177 | csrcsize += datasize |
|
177 | csrcsize += datasize | |
178 | crawsize += rawsize |
|
178 | crawsize += rawsize | |
179 | elif isinstance(rl, manifest.manifestrevlog): |
|
179 | elif isinstance(rl, manifest.manifestrevlog): | |
180 | mcount += 1 |
|
180 | mcount += 1 | |
181 | mrevcount += len(rl) |
|
181 | mrevcount += len(rl) | |
182 | msrcsize += datasize |
|
182 | msrcsize += datasize | |
183 | mrawsize += rawsize |
|
183 | mrawsize += rawsize | |
184 | elif isinstance(rl, filelog.filelog): |
|
184 | elif isinstance(rl, filelog.filelog): | |
185 | fcount += 1 |
|
185 | fcount += 1 | |
186 | frevcount += len(rl) |
|
186 | frevcount += len(rl) | |
187 | fsrcsize += datasize |
|
187 | fsrcsize += datasize | |
188 | frawsize += rawsize |
|
188 | frawsize += rawsize | |
189 | else: |
|
189 | else: | |
190 | error.ProgrammingError(b'unknown revlog type') |
|
190 | error.ProgrammingError(b'unknown revlog type') | |
191 |
|
191 | |||
192 | if not revcount: |
|
192 | if not revcount: | |
193 | return |
|
193 | return | |
194 |
|
194 | |||
195 | ui.status( |
|
195 | ui.status( | |
196 | _( |
|
196 | _( | |
197 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
197 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
198 | b'%d in changelog)\n' |
|
198 | b'%d in changelog)\n' | |
199 | ) |
|
199 | ) | |
200 | % (revcount, frevcount, mrevcount, crevcount) |
|
200 | % (revcount, frevcount, mrevcount, crevcount) | |
201 | ) |
|
201 | ) | |
202 | ui.status( |
|
202 | ui.status( | |
203 | _(b'migrating %s in store; %s tracked data\n') |
|
203 | _(b'migrating %s in store; %s tracked data\n') | |
204 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
204 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | |
205 | ) |
|
205 | ) | |
206 |
|
206 | |||
207 | # Used to keep track of progress. |
|
207 | # Used to keep track of progress. | |
208 | progress = None |
|
208 | progress = None | |
209 |
|
209 | |||
210 | def oncopiedrevision(rl, rev, node): |
|
210 | def oncopiedrevision(rl, rev, node): | |
211 | progress.increment() |
|
211 | progress.increment() | |
212 |
|
212 | |||
213 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
213 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) | |
214 |
|
214 | |||
215 | # Do the actual copying. |
|
215 | # Do the actual copying. | |
216 | # FUTURE this operation can be farmed off to worker processes. |
|
216 | # FUTURE this operation can be farmed off to worker processes. | |
217 | seen = set() |
|
217 | seen = set() | |
218 | for unencoded, encoded, size in alldatafiles: |
|
218 | for unencoded, encoded, size in alldatafiles: | |
219 | if unencoded.endswith(b'.d'): |
|
219 | if unencoded.endswith(b'.d'): | |
220 | continue |
|
220 | continue | |
221 |
|
221 | |||
222 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
222 | oldrl = _revlogfrompath(srcrepo, unencoded) | |
223 |
|
223 | |||
224 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: |
|
224 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: | |
225 | ui.status( |
|
225 | ui.status( | |
226 | _( |
|
226 | _( | |
227 | b'finished migrating %d manifest revisions across %d ' |
|
227 | b'finished migrating %d manifest revisions across %d ' | |
228 | b'manifests; change in size: %s\n' |
|
228 | b'manifests; change in size: %s\n' | |
229 | ) |
|
229 | ) | |
230 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
230 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | |
231 | ) |
|
231 | ) | |
232 |
|
232 | |||
233 | ui.status( |
|
233 | ui.status( | |
234 | _( |
|
234 | _( | |
235 | b'migrating changelog containing %d revisions ' |
|
235 | b'migrating changelog containing %d revisions ' | |
236 | b'(%s in store; %s tracked data)\n' |
|
236 | b'(%s in store; %s tracked data)\n' | |
237 | ) |
|
237 | ) | |
238 | % ( |
|
238 | % ( | |
239 | crevcount, |
|
239 | crevcount, | |
240 | util.bytecount(csrcsize), |
|
240 | util.bytecount(csrcsize), | |
241 | util.bytecount(crawsize), |
|
241 | util.bytecount(crawsize), | |
242 | ) |
|
242 | ) | |
243 | ) |
|
243 | ) | |
244 | seen.add(b'c') |
|
244 | seen.add(b'c') | |
245 | progress = srcrepo.ui.makeprogress( |
|
245 | progress = srcrepo.ui.makeprogress( | |
246 | _(b'changelog revisions'), total=crevcount |
|
246 | _(b'changelog revisions'), total=crevcount | |
247 | ) |
|
247 | ) | |
248 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: |
|
248 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: | |
249 | ui.status( |
|
249 | ui.status( | |
250 | _( |
|
250 | _( | |
251 | b'finished migrating %d filelog revisions across %d ' |
|
251 | b'finished migrating %d filelog revisions across %d ' | |
252 | b'filelogs; change in size: %s\n' |
|
252 | b'filelogs; change in size: %s\n' | |
253 | ) |
|
253 | ) | |
254 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
254 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | |
255 | ) |
|
255 | ) | |
256 |
|
256 | |||
257 | ui.status( |
|
257 | ui.status( | |
258 | _( |
|
258 | _( | |
259 | b'migrating %d manifests containing %d revisions ' |
|
259 | b'migrating %d manifests containing %d revisions ' | |
260 | b'(%s in store; %s tracked data)\n' |
|
260 | b'(%s in store; %s tracked data)\n' | |
261 | ) |
|
261 | ) | |
262 | % ( |
|
262 | % ( | |
263 | mcount, |
|
263 | mcount, | |
264 | mrevcount, |
|
264 | mrevcount, | |
265 | util.bytecount(msrcsize), |
|
265 | util.bytecount(msrcsize), | |
266 | util.bytecount(mrawsize), |
|
266 | util.bytecount(mrawsize), | |
267 | ) |
|
267 | ) | |
268 | ) |
|
268 | ) | |
269 | seen.add(b'm') |
|
269 | seen.add(b'm') | |
270 | if progress: |
|
270 | if progress: | |
271 | progress.complete() |
|
271 | progress.complete() | |
272 | progress = srcrepo.ui.makeprogress( |
|
272 | progress = srcrepo.ui.makeprogress( | |
273 | _(b'manifest revisions'), total=mrevcount |
|
273 | _(b'manifest revisions'), total=mrevcount | |
274 | ) |
|
274 | ) | |
275 | elif b'f' not in seen: |
|
275 | elif b'f' not in seen: | |
276 | ui.status( |
|
276 | ui.status( | |
277 | _( |
|
277 | _( | |
278 | b'migrating %d filelogs containing %d revisions ' |
|
278 | b'migrating %d filelogs containing %d revisions ' | |
279 | b'(%s in store; %s tracked data)\n' |
|
279 | b'(%s in store; %s tracked data)\n' | |
280 | ) |
|
280 | ) | |
281 | % ( |
|
281 | % ( | |
282 | fcount, |
|
282 | fcount, | |
283 | frevcount, |
|
283 | frevcount, | |
284 | util.bytecount(fsrcsize), |
|
284 | util.bytecount(fsrcsize), | |
285 | util.bytecount(frawsize), |
|
285 | util.bytecount(frawsize), | |
286 | ) |
|
286 | ) | |
287 | ) |
|
287 | ) | |
288 | seen.add(b'f') |
|
288 | seen.add(b'f') | |
289 | if progress: |
|
289 | if progress: | |
290 | progress.complete() |
|
290 | progress.complete() | |
291 | progress = srcrepo.ui.makeprogress( |
|
291 | progress = srcrepo.ui.makeprogress( | |
292 | _(b'file revisions'), total=frevcount |
|
292 | _(b'file revisions'), total=frevcount | |
293 | ) |
|
293 | ) | |
294 |
|
294 | |||
295 | if matchrevlog(revlogs, unencoded): |
|
295 | if matchrevlog(revlogs, unencoded): | |
296 | ui.note( |
|
296 | ui.note( | |
297 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
297 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) | |
298 | ) |
|
298 | ) | |
299 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
299 | newrl = _revlogfrompath(dstrepo, unencoded) | |
300 | oldrl.clone( |
|
300 | oldrl.clone( | |
301 | tr, |
|
301 | tr, | |
302 | newrl, |
|
302 | newrl, | |
303 | addrevisioncb=oncopiedrevision, |
|
303 | addrevisioncb=oncopiedrevision, | |
304 | deltareuse=deltareuse, |
|
304 | deltareuse=deltareuse, | |
305 | forcedeltabothparents=forcedeltabothparents, |
|
305 | forcedeltabothparents=forcedeltabothparents, | |
306 | sidedatacompanion=sidedatacompanion, |
|
306 | sidedatacompanion=sidedatacompanion, | |
307 | ) |
|
307 | ) | |
308 | else: |
|
308 | else: | |
309 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
309 | msg = _(b'blindly copying %s containing %i revisions\n') | |
310 | ui.note(msg % (unencoded, len(oldrl))) |
|
310 | ui.note(msg % (unencoded, len(oldrl))) | |
311 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
311 | _copyrevlog(tr, dstrepo, oldrl, unencoded) | |
312 |
|
312 | |||
313 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
313 | newrl = _revlogfrompath(dstrepo, unencoded) | |
314 |
|
314 | |||
315 | info = newrl.storageinfo(storedsize=True) |
|
315 | info = newrl.storageinfo(storedsize=True) | |
316 | datasize = info[b'storedsize'] or 0 |
|
316 | datasize = info[b'storedsize'] or 0 | |
317 |
|
317 | |||
318 | dstsize += datasize |
|
318 | dstsize += datasize | |
319 |
|
319 | |||
320 | if isinstance(newrl, changelog.changelog): |
|
320 | if isinstance(newrl, changelog.changelog): | |
321 | cdstsize += datasize |
|
321 | cdstsize += datasize | |
322 | elif isinstance(newrl, manifest.manifestrevlog): |
|
322 | elif isinstance(newrl, manifest.manifestrevlog): | |
323 | mdstsize += datasize |
|
323 | mdstsize += datasize | |
324 | else: |
|
324 | else: | |
325 | fdstsize += datasize |
|
325 | fdstsize += datasize | |
326 |
|
326 | |||
327 | progress.complete() |
|
327 | progress.complete() | |
328 |
|
328 | |||
329 | ui.status( |
|
329 | ui.status( | |
330 | _( |
|
330 | _( | |
331 | b'finished migrating %d changelog revisions; change in size: ' |
|
331 | b'finished migrating %d changelog revisions; change in size: ' | |
332 | b'%s\n' |
|
332 | b'%s\n' | |
333 | ) |
|
333 | ) | |
334 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
334 | % (crevcount, util.bytecount(cdstsize - csrcsize)) | |
335 | ) |
|
335 | ) | |
336 |
|
336 | |||
337 | ui.status( |
|
337 | ui.status( | |
338 | _( |
|
338 | _( | |
339 | b'finished migrating %d total revisions; total change in store ' |
|
339 | b'finished migrating %d total revisions; total change in store ' | |
340 | b'size: %s\n' |
|
340 | b'size: %s\n' | |
341 | ) |
|
341 | ) | |
342 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
342 | % (revcount, util.bytecount(dstsize - srcsize)) | |
343 | ) |
|
343 | ) | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
346 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): | |
347 | """Determine whether to copy a store file during upgrade. |
|
347 | """Determine whether to copy a store file during upgrade. | |
348 |
|
348 | |||
349 | This function is called when migrating store files from ``srcrepo`` to |
|
349 | This function is called when migrating store files from ``srcrepo`` to | |
350 | ``dstrepo`` as part of upgrading a repository. |
|
350 | ``dstrepo`` as part of upgrading a repository. | |
351 |
|
351 | |||
352 | Args: |
|
352 | Args: | |
353 | srcrepo: repo we are copying from |
|
353 | srcrepo: repo we are copying from | |
354 | dstrepo: repo we are copying to |
|
354 | dstrepo: repo we are copying to | |
355 | requirements: set of requirements for ``dstrepo`` |
|
355 | requirements: set of requirements for ``dstrepo`` | |
356 | path: store file being examined |
|
356 | path: store file being examined | |
357 | mode: the ``ST_MODE`` file type of ``path`` |
|
357 | mode: the ``ST_MODE`` file type of ``path`` | |
358 | st: ``stat`` data structure for ``path`` |
|
358 | st: ``stat`` data structure for ``path`` | |
359 |
|
359 | |||
360 | Function should return ``True`` if the file is to be copied. |
|
360 | Function should return ``True`` if the file is to be copied. | |
361 | """ |
|
361 | """ | |
362 | # Skip revlogs. |
|
362 | # Skip revlogs. | |
363 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
363 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): | |
364 | return False |
|
364 | return False | |
365 | # Skip transaction related files. |
|
365 | # Skip transaction related files. | |
366 | if path.startswith(b'undo'): |
|
366 | if path.startswith(b'undo'): | |
367 | return False |
|
367 | return False | |
368 | # Only copy regular files. |
|
368 | # Only copy regular files. | |
369 | if mode != stat.S_IFREG: |
|
369 | if mode != stat.S_IFREG: | |
370 | return False |
|
370 | return False | |
371 | # Skip other skipped files. |
|
371 | # Skip other skipped files. | |
372 | if path in (b'lock', b'fncache'): |
|
372 | if path in (b'lock', b'fncache'): | |
373 | return False |
|
373 | return False | |
374 |
|
374 | |||
375 | return True |
|
375 | return True | |
376 |
|
376 | |||
377 |
|
377 | |||
378 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
378 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): | |
379 | """Hook point for extensions to perform additional actions during upgrade. |
|
379 | """Hook point for extensions to perform additional actions during upgrade. | |
380 |
|
380 | |||
381 | This function is called after revlogs and store files have been copied but |
|
381 | This function is called after revlogs and store files have been copied but | |
382 | before the new store is swapped into the original location. |
|
382 | before the new store is swapped into the original location. | |
383 | """ |
|
383 | """ | |
384 |
|
384 | |||
385 |
|
385 | |||
386 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): |
|
386 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): | |
387 | """Do the low-level work of upgrading a repository. |
|
387 | """Do the low-level work of upgrading a repository. | |
388 |
|
388 | |||
389 | The upgrade is effectively performed as a copy between a source |
|
389 | The upgrade is effectively performed as a copy between a source | |
390 | repository and a temporary destination repository. |
|
390 | repository and a temporary destination repository. | |
391 |
|
391 | |||
392 | The source repository is unmodified for as long as possible so the |
|
392 | The source repository is unmodified for as long as possible so the | |
393 | upgrade can abort at any time without causing loss of service for |
|
393 | upgrade can abort at any time without causing loss of service for | |
394 | readers and without corrupting the source repository. |
|
394 | readers and without corrupting the source repository. | |
395 | """ |
|
395 | """ | |
396 | assert srcrepo.currentwlock() |
|
396 | assert srcrepo.currentwlock() | |
397 | assert dstrepo.currentwlock() |
|
397 | assert dstrepo.currentwlock() | |
398 |
|
398 | |||
399 | ui.status( |
|
399 | ui.status( | |
400 | _( |
|
400 | _( | |
401 | b'(it is safe to interrupt this process any time before ' |
|
401 | b'(it is safe to interrupt this process any time before ' | |
402 | b'data migration completes)\n' |
|
402 | b'data migration completes)\n' | |
403 | ) |
|
403 | ) | |
404 | ) |
|
404 | ) | |
405 |
|
405 | |||
406 | if upgrade_op.has_action(b're-delta-all'): |
|
406 | if upgrade_op.has_action(b're-delta-all'): | |
407 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
407 | deltareuse = revlog.revlog.DELTAREUSENEVER | |
408 | elif upgrade_op.has_action(b're-delta-parent'): |
|
408 | elif upgrade_op.has_action(b're-delta-parent'): | |
409 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
409 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
410 | elif upgrade_op.has_action(b're-delta-multibase'): |
|
410 | elif upgrade_op.has_action(b're-delta-multibase'): | |
411 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
411 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
412 | elif upgrade_op.has_action(b're-delta-fulladd'): |
|
412 | elif upgrade_op.has_action(b're-delta-fulladd'): | |
413 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
413 | deltareuse = revlog.revlog.DELTAREUSEFULLADD | |
414 | else: |
|
414 | else: | |
415 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
415 | deltareuse = revlog.revlog.DELTAREUSEALWAYS | |
416 |
|
416 | |||
417 | with dstrepo.transaction(b'upgrade') as tr: |
|
417 | with dstrepo.transaction(b'upgrade') as tr: | |
418 | _clonerevlogs( |
|
418 | _clonerevlogs( | |
419 | ui, |
|
419 | ui, | |
420 | srcrepo, |
|
420 | srcrepo, | |
421 | dstrepo, |
|
421 | dstrepo, | |
422 | tr, |
|
422 | tr, | |
423 | deltareuse, |
|
423 | deltareuse, | |
424 | upgrade_op.has_action(b're-delta-multibase'), |
|
424 | upgrade_op.has_action(b're-delta-multibase'), | |
425 | revlogs=upgrade_op.revlogs_to_process, |
|
425 | revlogs=upgrade_op.revlogs_to_process, | |
426 | ) |
|
426 | ) | |
427 |
|
427 | |||
428 | # Now copy other files in the store directory. |
|
428 | # Now copy other files in the store directory. | |
429 | # The sorted() makes execution deterministic. |
|
429 | # The sorted() makes execution deterministic. | |
430 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
430 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | |
431 | if not _filterstorefile( |
|
431 | if not _filterstorefile( | |
432 | srcrepo, dstrepo, upgrade_op.requirements, p, kind, st |
|
432 | srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st | |
433 | ): |
|
433 | ): | |
434 | continue |
|
434 | continue | |
435 |
|
435 | |||
436 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
436 | srcrepo.ui.status(_(b'copying %s\n') % p) | |
437 | src = srcrepo.store.rawvfs.join(p) |
|
437 | src = srcrepo.store.rawvfs.join(p) | |
438 | dst = dstrepo.store.rawvfs.join(p) |
|
438 | dst = dstrepo.store.rawvfs.join(p) | |
439 | util.copyfile(src, dst, copystat=True) |
|
439 | util.copyfile(src, dst, copystat=True) | |
440 |
|
440 | |||
441 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
441 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) | |
442 |
|
442 | |||
443 | ui.status(_(b'data fully migrated to temporary repository\n')) |
|
443 | ui.status(_(b'data fully migrated to temporary repository\n')) | |
444 |
|
444 | |||
445 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) |
|
445 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) | |
446 | backupvfs = vfsmod.vfs(backuppath) |
|
446 | backupvfs = vfsmod.vfs(backuppath) | |
447 |
|
447 | |||
448 | # Make a backup of requires file first, as it is the first to be modified. |
|
448 | # Make a backup of requires file first, as it is the first to be modified. | |
449 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) |
|
449 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) | |
450 |
|
450 | |||
451 | # We install an arbitrary requirement that clients must not support |
|
451 | # We install an arbitrary requirement that clients must not support | |
452 | # as a mechanism to lock out new clients during the data swap. This is |
|
452 | # as a mechanism to lock out new clients during the data swap. This is | |
453 | # better than allowing a client to continue while the repository is in |
|
453 | # better than allowing a client to continue while the repository is in | |
454 | # an inconsistent state. |
|
454 | # an inconsistent state. | |
455 | ui.status( |
|
455 | ui.status( | |
456 | _( |
|
456 | _( | |
457 | b'marking source repository as being upgraded; clients will be ' |
|
457 | b'marking source repository as being upgraded; clients will be ' | |
458 | b'unable to read from repository\n' |
|
458 | b'unable to read from repository\n' | |
459 | ) |
|
459 | ) | |
460 | ) |
|
460 | ) | |
461 | scmutil.writereporequirements( |
|
461 | scmutil.writereporequirements( | |
462 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
462 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | |
463 | ) |
|
463 | ) | |
464 |
|
464 | |||
465 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
465 | ui.status(_(b'starting in-place swap of repository data\n')) | |
466 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
466 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | |
467 |
|
467 | |||
468 | # Now swap in the new store directory. Doing it as a rename should make |
|
468 | # Now swap in the new store directory. Doing it as a rename should make | |
469 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
469 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
470 | # environments). |
|
470 | # environments). | |
471 | ui.status(_(b'replacing store...\n')) |
|
471 | ui.status(_(b'replacing store...\n')) | |
472 | tstart = util.timer() |
|
472 | tstart = util.timer() | |
473 | util.rename(srcrepo.spath, backupvfs.join(b'store')) |
|
473 | util.rename(srcrepo.spath, backupvfs.join(b'store')) | |
474 | util.rename(dstrepo.spath, srcrepo.spath) |
|
474 | util.rename(dstrepo.spath, srcrepo.spath) | |
475 | elapsed = util.timer() - tstart |
|
475 | elapsed = util.timer() - tstart | |
476 | ui.status( |
|
476 | ui.status( | |
477 | _( |
|
477 | _( | |
478 | b'store replacement complete; repository was inconsistent for ' |
|
478 | b'store replacement complete; repository was inconsistent for ' | |
479 | b'%0.1fs\n' |
|
479 | b'%0.1fs\n' | |
480 | ) |
|
480 | ) | |
481 | % elapsed |
|
481 | % elapsed | |
482 | ) |
|
482 | ) | |
483 |
|
483 | |||
484 | # We first write the requirements file. Any new requirements will lock |
|
484 | # We first write the requirements file. Any new requirements will lock | |
485 | # out legacy clients. |
|
485 | # out legacy clients. | |
486 | ui.status( |
|
486 | ui.status( | |
487 | _( |
|
487 | _( | |
488 | b'finalizing requirements file and making repository readable ' |
|
488 | b'finalizing requirements file and making repository readable ' | |
489 | b'again\n' |
|
489 | b'again\n' | |
490 | ) |
|
490 | ) | |
491 | ) |
|
491 | ) | |
492 | scmutil.writereporequirements(srcrepo, upgrade_op.requirements) |
|
492 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
493 |
|
493 | |||
494 | # The lock file from the old store won't be removed because nothing has a |
|
494 | # The lock file from the old store won't be removed because nothing has a | |
495 | # reference to its new location. So clean it up manually. Alternatively, we |
|
495 | # reference to its new location. So clean it up manually. Alternatively, we | |
496 | # could update srcrepo.svfs and other variables to point to the new |
|
496 | # could update srcrepo.svfs and other variables to point to the new | |
497 | # location. This is simpler. |
|
497 | # location. This is simpler. | |
498 | backupvfs.unlink(b'store/lock') |
|
498 | backupvfs.unlink(b'store/lock') | |
499 |
|
499 | |||
500 | return backuppath |
|
500 | return backuppath |
General Comments 0
You need to be logged in to leave comments.
Login now