##// END OF EJS Templates
upgrade: move optimisation to something more declarative...
marmoute -
r46610:32dcd783 default
parent child Browse files
Show More
@@ -1,1433 +1,1439 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 requirements.SHARED_REQUIREMENT,
67 requirements.SHARED_REQUIREMENT,
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 requirements.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 }
83 }
84 for name in compression.compengines:
84 for name in compression.compengines:
85 engine = compression.compengines[name]
85 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
86 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
87 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
88 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
89 supported.add(b'revlog-compression-zstd')
90 return supported
90 return supported
91
91
92
92
93 def supporteddestrequirements(repo):
93 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
94 """Obtain requirements that upgrade supports in the destination.
95
95
96 If the result of the upgrade would create requirements not in this set,
96 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
97 the upgrade is disallowed.
98
98
99 Extensions should monkeypatch this to add their custom requirements.
99 Extensions should monkeypatch this to add their custom requirements.
100 """
100 """
101 supported = {
101 supported = {
102 b'dotencode',
102 b'dotencode',
103 b'fncache',
103 b'fncache',
104 b'generaldelta',
104 b'generaldelta',
105 b'revlogv1',
105 b'revlogv1',
106 b'store',
106 b'store',
107 requirements.SPARSEREVLOG_REQUIREMENT,
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
112 }
112 }
113 for name in compression.compengines:
113 for name in compression.compengines:
114 engine = compression.compengines[name]
114 engine = compression.compengines[name]
115 if engine.available() and engine.revlogheader():
115 if engine.available() and engine.revlogheader():
116 supported.add(b'exp-compression-%s' % name)
116 supported.add(b'exp-compression-%s' % name)
117 if engine.name() == b'zstd':
117 if engine.name() == b'zstd':
118 supported.add(b'revlog-compression-zstd')
118 supported.add(b'revlog-compression-zstd')
119 return supported
119 return supported
120
120
121
121
122 def allowednewrequirements(repo):
122 def allowednewrequirements(repo):
123 """Obtain requirements that can be added to a repository during upgrade.
123 """Obtain requirements that can be added to a repository during upgrade.
124
124
125 This is used to disallow proposed requirements from being added when
125 This is used to disallow proposed requirements from being added when
126 they weren't present before.
126 they weren't present before.
127
127
128 We use a list of allowed requirement additions instead of a list of known
128 We use a list of allowed requirement additions instead of a list of known
129 bad additions because the whitelist approach is safer and will prevent
129 bad additions because the whitelist approach is safer and will prevent
130 future, unknown requirements from accidentally being added.
130 future, unknown requirements from accidentally being added.
131 """
131 """
132 supported = {
132 supported = {
133 b'dotencode',
133 b'dotencode',
134 b'fncache',
134 b'fncache',
135 b'generaldelta',
135 b'generaldelta',
136 requirements.SPARSEREVLOG_REQUIREMENT,
136 requirements.SPARSEREVLOG_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
140 }
140 }
141 for name in compression.compengines:
141 for name in compression.compengines:
142 engine = compression.compengines[name]
142 engine = compression.compengines[name]
143 if engine.available() and engine.revlogheader():
143 if engine.available() and engine.revlogheader():
144 supported.add(b'exp-compression-%s' % name)
144 supported.add(b'exp-compression-%s' % name)
145 if engine.name() == b'zstd':
145 if engine.name() == b'zstd':
146 supported.add(b'revlog-compression-zstd')
146 supported.add(b'revlog-compression-zstd')
147 return supported
147 return supported
148
148
149
149
150 def preservedrequirements(repo):
150 def preservedrequirements(repo):
151 return set()
151 return set()
152
152
153
153
154 DEFICIENCY = b'deficiency'
154 DEFICIENCY = b'deficiency'
155 OPTIMISATION = b'optimization'
155 OPTIMISATION = b'optimization'
156
156
157
157
158 class improvement(object):
158 class improvement(object):
159 """Represents an improvement that can be made as part of an upgrade.
159 """Represents an improvement that can be made as part of an upgrade.
160
160
161 The following attributes are defined on each instance:
161 The following attributes are defined on each instance:
162
162
163 name
163 name
164 Machine-readable string uniquely identifying this improvement. It
164 Machine-readable string uniquely identifying this improvement. It
165 will be mapped to an action later in the upgrade process.
165 will be mapped to an action later in the upgrade process.
166
166
167 type
167 type
168 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
168 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
169 problem. An optimization is an action (sometimes optional) that
169 problem. An optimization is an action (sometimes optional) that
170 can be taken to further improve the state of the repository.
170 can be taken to further improve the state of the repository.
171
171
172 description
172 description
173 Message intended for humans explaining the improvement in more detail,
173 Message intended for humans explaining the improvement in more detail,
174 including the implications of it. For ``DEFICIENCY`` types, should be
174 including the implications of it. For ``DEFICIENCY`` types, should be
175 worded in the present tense. For ``OPTIMISATION`` types, should be
175 worded in the present tense. For ``OPTIMISATION`` types, should be
176 worded in the future tense.
176 worded in the future tense.
177
177
178 upgrademessage
178 upgrademessage
179 Message intended for humans explaining what an upgrade addressing this
179 Message intended for humans explaining what an upgrade addressing this
180 issue will do. Should be worded in the future tense.
180 issue will do. Should be worded in the future tense.
181 """
181 """
182
182
183 def __init__(self, name, type, description, upgrademessage):
183 def __init__(self, name, type, description, upgrademessage):
184 self.name = name
184 self.name = name
185 self.type = type
185 self.type = type
186 self.description = description
186 self.description = description
187 self.upgrademessage = upgrademessage
187 self.upgrademessage = upgrademessage
188
188
189 def __eq__(self, other):
189 def __eq__(self, other):
190 if not isinstance(other, improvement):
190 if not isinstance(other, improvement):
191 # This is what python tell use to do
191 # This is what python tell use to do
192 return NotImplemented
192 return NotImplemented
193 return self.name == other.name
193 return self.name == other.name
194
194
195 def __ne__(self, other):
195 def __ne__(self, other):
196 return not (self == other)
196 return not (self == other)
197
197
198 def __hash__(self):
198 def __hash__(self):
199 return hash(self.name)
199 return hash(self.name)
200
200
201
201
202 allformatvariant = []
202 allformatvariant = []
203
203
204
204
205 def registerformatvariant(cls):
205 def registerformatvariant(cls):
206 allformatvariant.append(cls)
206 allformatvariant.append(cls)
207 return cls
207 return cls
208
208
209
209
210 class formatvariant(improvement):
210 class formatvariant(improvement):
211 """an improvement subclass dedicated to repository format"""
211 """an improvement subclass dedicated to repository format"""
212
212
213 type = DEFICIENCY
213 type = DEFICIENCY
214 ### The following attributes should be defined for each class:
214 ### The following attributes should be defined for each class:
215
215
216 # machine-readable string uniquely identifying this improvement. it will be
216 # machine-readable string uniquely identifying this improvement. it will be
217 # mapped to an action later in the upgrade process.
217 # mapped to an action later in the upgrade process.
218 name = None
218 name = None
219
219
220 # message intended for humans explaining the improvement in more detail,
220 # message intended for humans explaining the improvement in more detail,
221 # including the implications of it ``DEFICIENCY`` types, should be worded
221 # including the implications of it ``DEFICIENCY`` types, should be worded
222 # in the present tense.
222 # in the present tense.
223 description = None
223 description = None
224
224
225 # message intended for humans explaining what an upgrade addressing this
225 # message intended for humans explaining what an upgrade addressing this
226 # issue will do. should be worded in the future tense.
226 # issue will do. should be worded in the future tense.
227 upgrademessage = None
227 upgrademessage = None
228
228
229 # value of current Mercurial default for new repository
229 # value of current Mercurial default for new repository
230 default = None
230 default = None
231
231
232 def __init__(self):
232 def __init__(self):
233 raise NotImplementedError()
233 raise NotImplementedError()
234
234
235 @staticmethod
235 @staticmethod
236 def fromrepo(repo):
236 def fromrepo(repo):
237 """current value of the variant in the repository"""
237 """current value of the variant in the repository"""
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 @staticmethod
240 @staticmethod
241 def fromconfig(repo):
241 def fromconfig(repo):
242 """current value of the variant in the configuration"""
242 """current value of the variant in the configuration"""
243 raise NotImplementedError()
243 raise NotImplementedError()
244
244
245
245
246 class requirementformatvariant(formatvariant):
246 class requirementformatvariant(formatvariant):
247 """formatvariant based on a 'requirement' name.
247 """formatvariant based on a 'requirement' name.
248
248
249 Many format variant are controlled by a 'requirement'. We define a small
249 Many format variant are controlled by a 'requirement'. We define a small
250 subclass to factor the code.
250 subclass to factor the code.
251 """
251 """
252
252
253 # the requirement that control this format variant
253 # the requirement that control this format variant
254 _requirement = None
254 _requirement = None
255
255
256 @staticmethod
256 @staticmethod
257 def _newreporequirements(ui):
257 def _newreporequirements(ui):
258 return localrepo.newreporequirements(
258 return localrepo.newreporequirements(
259 ui, localrepo.defaultcreateopts(ui)
259 ui, localrepo.defaultcreateopts(ui)
260 )
260 )
261
261
262 @classmethod
262 @classmethod
263 def fromrepo(cls, repo):
263 def fromrepo(cls, repo):
264 assert cls._requirement is not None
264 assert cls._requirement is not None
265 return cls._requirement in repo.requirements
265 return cls._requirement in repo.requirements
266
266
267 @classmethod
267 @classmethod
268 def fromconfig(cls, repo):
268 def fromconfig(cls, repo):
269 assert cls._requirement is not None
269 assert cls._requirement is not None
270 return cls._requirement in cls._newreporequirements(repo.ui)
270 return cls._requirement in cls._newreporequirements(repo.ui)
271
271
272
272
273 @registerformatvariant
273 @registerformatvariant
274 class fncache(requirementformatvariant):
274 class fncache(requirementformatvariant):
275 name = b'fncache'
275 name = b'fncache'
276
276
277 _requirement = b'fncache'
277 _requirement = b'fncache'
278
278
279 default = True
279 default = True
280
280
281 description = _(
281 description = _(
282 b'long and reserved filenames may not work correctly; '
282 b'long and reserved filenames may not work correctly; '
283 b'repository performance is sub-optimal'
283 b'repository performance is sub-optimal'
284 )
284 )
285
285
286 upgrademessage = _(
286 upgrademessage = _(
287 b'repository will be more resilient to storing '
287 b'repository will be more resilient to storing '
288 b'certain paths and performance of certain '
288 b'certain paths and performance of certain '
289 b'operations should be improved'
289 b'operations should be improved'
290 )
290 )
291
291
292
292
293 @registerformatvariant
293 @registerformatvariant
294 class dotencode(requirementformatvariant):
294 class dotencode(requirementformatvariant):
295 name = b'dotencode'
295 name = b'dotencode'
296
296
297 _requirement = b'dotencode'
297 _requirement = b'dotencode'
298
298
299 default = True
299 default = True
300
300
301 description = _(
301 description = _(
302 b'storage of filenames beginning with a period or '
302 b'storage of filenames beginning with a period or '
303 b'space may not work correctly'
303 b'space may not work correctly'
304 )
304 )
305
305
306 upgrademessage = _(
306 upgrademessage = _(
307 b'repository will be better able to store files '
307 b'repository will be better able to store files '
308 b'beginning with a space or period'
308 b'beginning with a space or period'
309 )
309 )
310
310
311
311
312 @registerformatvariant
312 @registerformatvariant
313 class generaldelta(requirementformatvariant):
313 class generaldelta(requirementformatvariant):
314 name = b'generaldelta'
314 name = b'generaldelta'
315
315
316 _requirement = b'generaldelta'
316 _requirement = b'generaldelta'
317
317
318 default = True
318 default = True
319
319
320 description = _(
320 description = _(
321 b'deltas within internal storage are unable to '
321 b'deltas within internal storage are unable to '
322 b'choose optimal revisions; repository is larger and '
322 b'choose optimal revisions; repository is larger and '
323 b'slower than it could be; interaction with other '
323 b'slower than it could be; interaction with other '
324 b'repositories may require extra network and CPU '
324 b'repositories may require extra network and CPU '
325 b'resources, making "hg push" and "hg pull" slower'
325 b'resources, making "hg push" and "hg pull" slower'
326 )
326 )
327
327
328 upgrademessage = _(
328 upgrademessage = _(
329 b'repository storage will be able to create '
329 b'repository storage will be able to create '
330 b'optimal deltas; new repository data will be '
330 b'optimal deltas; new repository data will be '
331 b'smaller and read times should decrease; '
331 b'smaller and read times should decrease; '
332 b'interacting with other repositories using this '
332 b'interacting with other repositories using this '
333 b'storage model should require less network and '
333 b'storage model should require less network and '
334 b'CPU resources, making "hg push" and "hg pull" '
334 b'CPU resources, making "hg push" and "hg pull" '
335 b'faster'
335 b'faster'
336 )
336 )
337
337
338
338
339 @registerformatvariant
339 @registerformatvariant
340 class sparserevlog(requirementformatvariant):
340 class sparserevlog(requirementformatvariant):
341 name = b'sparserevlog'
341 name = b'sparserevlog'
342
342
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
344
344
345 default = True
345 default = True
346
346
347 description = _(
347 description = _(
348 b'in order to limit disk reading and memory usage on older '
348 b'in order to limit disk reading and memory usage on older '
349 b'version, the span of a delta chain from its root to its '
349 b'version, the span of a delta chain from its root to its '
350 b'end is limited, whatever the relevant data in this span. '
350 b'end is limited, whatever the relevant data in this span. '
351 b'This can severly limit Mercurial ability to build good '
351 b'This can severly limit Mercurial ability to build good '
352 b'chain of delta resulting is much more storage space being '
352 b'chain of delta resulting is much more storage space being '
353 b'taken and limit reusability of on disk delta during '
353 b'taken and limit reusability of on disk delta during '
354 b'exchange.'
354 b'exchange.'
355 )
355 )
356
356
357 upgrademessage = _(
357 upgrademessage = _(
358 b'Revlog supports delta chain with more unused data '
358 b'Revlog supports delta chain with more unused data '
359 b'between payload. These gaps will be skipped at read '
359 b'between payload. These gaps will be skipped at read '
360 b'time. This allows for better delta chains, making a '
360 b'time. This allows for better delta chains, making a '
361 b'better compression and faster exchange with server.'
361 b'better compression and faster exchange with server.'
362 )
362 )
363
363
364
364
365 @registerformatvariant
365 @registerformatvariant
366 class sidedata(requirementformatvariant):
366 class sidedata(requirementformatvariant):
367 name = b'sidedata'
367 name = b'sidedata'
368
368
369 _requirement = requirements.SIDEDATA_REQUIREMENT
369 _requirement = requirements.SIDEDATA_REQUIREMENT
370
370
371 default = False
371 default = False
372
372
373 description = _(
373 description = _(
374 b'Allows storage of extra data alongside a revision, '
374 b'Allows storage of extra data alongside a revision, '
375 b'unlocking various caching options.'
375 b'unlocking various caching options.'
376 )
376 )
377
377
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
379
379
380
380
381 @registerformatvariant
381 @registerformatvariant
382 class persistentnodemap(requirementformatvariant):
382 class persistentnodemap(requirementformatvariant):
383 name = b'persistent-nodemap'
383 name = b'persistent-nodemap'
384
384
385 _requirement = requirements.NODEMAP_REQUIREMENT
385 _requirement = requirements.NODEMAP_REQUIREMENT
386
386
387 default = False
387 default = False
388
388
389 description = _(
389 description = _(
390 b'persist the node -> rev mapping on disk to speedup lookup'
390 b'persist the node -> rev mapping on disk to speedup lookup'
391 )
391 )
392
392
393 upgrademessage = _(b'Speedup revision lookup by node id.')
393 upgrademessage = _(b'Speedup revision lookup by node id.')
394
394
395
395
396 @registerformatvariant
396 @registerformatvariant
397 class copiessdc(requirementformatvariant):
397 class copiessdc(requirementformatvariant):
398 name = b'copies-sdc'
398 name = b'copies-sdc'
399
399
400 _requirement = requirements.COPIESSDC_REQUIREMENT
400 _requirement = requirements.COPIESSDC_REQUIREMENT
401
401
402 default = False
402 default = False
403
403
404 description = _(b'Stores copies information alongside changesets.')
404 description = _(b'Stores copies information alongside changesets.')
405
405
406 upgrademessage = _(
406 upgrademessage = _(
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
408 )
408 )
409
409
410
410
411 @registerformatvariant
411 @registerformatvariant
412 class removecldeltachain(formatvariant):
412 class removecldeltachain(formatvariant):
413 name = b'plain-cl-delta'
413 name = b'plain-cl-delta'
414
414
415 default = True
415 default = True
416
416
417 description = _(
417 description = _(
418 b'changelog storage is using deltas instead of '
418 b'changelog storage is using deltas instead of '
419 b'raw entries; changelog reading and any '
419 b'raw entries; changelog reading and any '
420 b'operation relying on changelog data are slower '
420 b'operation relying on changelog data are slower '
421 b'than they could be'
421 b'than they could be'
422 )
422 )
423
423
424 upgrademessage = _(
424 upgrademessage = _(
425 b'changelog storage will be reformated to '
425 b'changelog storage will be reformated to '
426 b'store raw entries; changelog reading will be '
426 b'store raw entries; changelog reading will be '
427 b'faster; changelog size may be reduced'
427 b'faster; changelog size may be reduced'
428 )
428 )
429
429
430 @staticmethod
430 @staticmethod
431 def fromrepo(repo):
431 def fromrepo(repo):
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
433 # changelogs with deltas.
433 # changelogs with deltas.
434 cl = repo.changelog
434 cl = repo.changelog
435 chainbase = cl.chainbase
435 chainbase = cl.chainbase
436 return all(rev == chainbase(rev) for rev in cl)
436 return all(rev == chainbase(rev) for rev in cl)
437
437
438 @staticmethod
438 @staticmethod
439 def fromconfig(repo):
439 def fromconfig(repo):
440 return True
440 return True
441
441
442
442
443 @registerformatvariant
443 @registerformatvariant
444 class compressionengine(formatvariant):
444 class compressionengine(formatvariant):
445 name = b'compression'
445 name = b'compression'
446 default = b'zlib'
446 default = b'zlib'
447
447
448 description = _(
448 description = _(
449 b'Compresion algorithm used to compress data. '
449 b'Compresion algorithm used to compress data. '
450 b'Some engine are faster than other'
450 b'Some engine are faster than other'
451 )
451 )
452
452
453 upgrademessage = _(
453 upgrademessage = _(
454 b'revlog content will be recompressed with the new algorithm.'
454 b'revlog content will be recompressed with the new algorithm.'
455 )
455 )
456
456
457 @classmethod
457 @classmethod
458 def fromrepo(cls, repo):
458 def fromrepo(cls, repo):
459 # we allow multiple compression engine requirement to co-exist because
459 # we allow multiple compression engine requirement to co-exist because
460 # strickly speaking, revlog seems to support mixed compression style.
460 # strickly speaking, revlog seems to support mixed compression style.
461 #
461 #
462 # The compression used for new entries will be "the last one"
462 # The compression used for new entries will be "the last one"
463 compression = b'zlib'
463 compression = b'zlib'
464 for req in repo.requirements:
464 for req in repo.requirements:
465 prefix = req.startswith
465 prefix = req.startswith
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 compression = req.split(b'-', 2)[2]
467 compression = req.split(b'-', 2)[2]
468 return compression
468 return compression
469
469
470 @classmethod
470 @classmethod
471 def fromconfig(cls, repo):
471 def fromconfig(cls, repo):
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 # return the first valid value as the selection code would do
473 # return the first valid value as the selection code would do
474 for comp in compengines:
474 for comp in compengines:
475 if comp in util.compengines:
475 if comp in util.compengines:
476 return comp
476 return comp
477
477
478 # no valide compression found lets display it all for clarity
478 # no valide compression found lets display it all for clarity
479 return b','.join(compengines)
479 return b','.join(compengines)
480
480
481
481
482 @registerformatvariant
482 @registerformatvariant
483 class compressionlevel(formatvariant):
483 class compressionlevel(formatvariant):
484 name = b'compression-level'
484 name = b'compression-level'
485 default = b'default'
485 default = b'default'
486
486
487 description = _(b'compression level')
487 description = _(b'compression level')
488
488
489 upgrademessage = _(b'revlog content will be recompressed')
489 upgrademessage = _(b'revlog content will be recompressed')
490
490
491 @classmethod
491 @classmethod
492 def fromrepo(cls, repo):
492 def fromrepo(cls, repo):
493 comp = compressionengine.fromrepo(repo)
493 comp = compressionengine.fromrepo(repo)
494 level = None
494 level = None
495 if comp == b'zlib':
495 if comp == b'zlib':
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
497 elif comp == b'zstd':
497 elif comp == b'zstd':
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
499 if level is None:
499 if level is None:
500 return b'default'
500 return b'default'
501 return bytes(level)
501 return bytes(level)
502
502
503 @classmethod
503 @classmethod
504 def fromconfig(cls, repo):
504 def fromconfig(cls, repo):
505 comp = compressionengine.fromconfig(repo)
505 comp = compressionengine.fromconfig(repo)
506 level = None
506 level = None
507 if comp == b'zlib':
507 if comp == b'zlib':
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
509 elif comp == b'zstd':
509 elif comp == b'zstd':
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
511 if level is None:
511 if level is None:
512 return b'default'
512 return b'default'
513 return bytes(level)
513 return bytes(level)
514
514
515
515
516 def finddeficiencies(repo):
516 def finddeficiencies(repo):
517 """returns a list of deficiencies that the repo suffer from"""
517 """returns a list of deficiencies that the repo suffer from"""
518 deficiencies = []
518 deficiencies = []
519
519
520 # We could detect lack of revlogv1 and store here, but they were added
520 # We could detect lack of revlogv1 and store here, but they were added
521 # in 0.9.2 and we don't support upgrading repos without these
521 # in 0.9.2 and we don't support upgrading repos without these
522 # requirements, so let's not bother.
522 # requirements, so let's not bother.
523
523
524 for fv in allformatvariant:
524 for fv in allformatvariant:
525 if not fv.fromrepo(repo):
525 if not fv.fromrepo(repo):
526 deficiencies.append(fv)
526 deficiencies.append(fv)
527
527
528 return deficiencies
528 return deficiencies
529
529
530
530
531 # search without '-' to support older form on newer client.
531 # search without '-' to support older form on newer client.
532 #
532 #
533 # We don't enforce backward compatibility for debug command so this
533 # We don't enforce backward compatibility for debug command so this
534 # might eventually be dropped. However, having to use two different
534 # might eventually be dropped. However, having to use two different
535 # forms in script when comparing result is anoying enough to add
535 # forms in script when comparing result is anoying enough to add
536 # backward compatibility for a while.
536 # backward compatibility for a while.
537 legacy_opts_map = {
537 legacy_opts_map = {
538 b'redeltaparent': b're-delta-parent',
538 b'redeltaparent': b're-delta-parent',
539 b'redeltamultibase': b're-delta-multibase',
539 b'redeltamultibase': b're-delta-multibase',
540 b'redeltaall': b're-delta-all',
540 b'redeltaall': b're-delta-all',
541 b'redeltafulladd': b're-delta-fulladd',
541 b'redeltafulladd': b're-delta-fulladd',
542 }
542 }
543
543
544 ALL_OPTIMISATIONS = []
544
545
545 def findoptimizations(repo):
546 """Determine optimisation that could be used during upgrade"""
547 # These are unconditionally added. There is logic later that figures out
548 # which ones to apply.
549 optimizations = []
550
546
551 optimizations.append(
547 def register_optimization(obj):
548 ALL_OPTIMISATIONS.append(obj)
549 return obj
550
551
552 register_optimization(
552 improvement(
553 improvement(
553 name=b're-delta-parent',
554 name=b're-delta-parent',
554 type=OPTIMISATION,
555 type=OPTIMISATION,
555 description=_(
556 description=_(
556 b'deltas within internal storage will be recalculated to '
557 b'deltas within internal storage will be recalculated to '
557 b'choose an optimal base revision where this was not '
558 b'choose an optimal base revision where this was not '
558 b'already done; the size of the repository may shrink and '
559 b'already done; the size of the repository may shrink and '
559 b'various operations may become faster; the first time '
560 b'various operations may become faster; the first time '
560 b'this optimization is performed could slow down upgrade '
561 b'this optimization is performed could slow down upgrade '
561 b'execution considerably; subsequent invocations should '
562 b'execution considerably; subsequent invocations should '
562 b'not run noticeably slower'
563 b'not run noticeably slower'
563 ),
564 ),
564 upgrademessage=_(
565 upgrademessage=_(
565 b'deltas within internal storage will choose a new '
566 b'deltas within internal storage will choose a new '
566 b'base revision if needed'
567 b'base revision if needed'
567 ),
568 ),
568 )
569 )
569 )
570 )
570
571
571 optimizations.append(
572 register_optimization(
572 improvement(
573 improvement(
573 name=b're-delta-multibase',
574 name=b're-delta-multibase',
574 type=OPTIMISATION,
575 type=OPTIMISATION,
575 description=_(
576 description=_(
576 b'deltas within internal storage will be recalculated '
577 b'deltas within internal storage will be recalculated '
577 b'against multiple base revision and the smallest '
578 b'against multiple base revision and the smallest '
578 b'difference will be used; the size of the repository may '
579 b'difference will be used; the size of the repository may '
579 b'shrink significantly when there are many merges; this '
580 b'shrink significantly when there are many merges; this '
580 b'optimization will slow down execution in proportion to '
581 b'optimization will slow down execution in proportion to '
581 b'the number of merges in the repository and the amount '
582 b'the number of merges in the repository and the amount '
582 b'of files in the repository; this slow down should not '
583 b'of files in the repository; this slow down should not '
583 b'be significant unless there are tens of thousands of '
584 b'be significant unless there are tens of thousands of '
584 b'files and thousands of merges'
585 b'files and thousands of merges'
585 ),
586 ),
586 upgrademessage=_(
587 upgrademessage=_(
587 b'deltas within internal storage will choose an '
588 b'deltas within internal storage will choose an '
588 b'optimal delta by computing deltas against multiple '
589 b'optimal delta by computing deltas against multiple '
589 b'parents; may slow down execution time '
590 b'parents; may slow down execution time '
590 b'significantly'
591 b'significantly'
591 ),
592 ),
592 )
593 )
593 )
594 )
594
595
595 optimizations.append(
596 register_optimization(
596 improvement(
597 improvement(
597 name=b're-delta-all',
598 name=b're-delta-all',
598 type=OPTIMISATION,
599 type=OPTIMISATION,
599 description=_(
600 description=_(
600 b'deltas within internal storage will always be '
601 b'deltas within internal storage will always be '
601 b'recalculated without reusing prior deltas; this will '
602 b'recalculated without reusing prior deltas; this will '
602 b'likely make execution run several times slower; this '
603 b'likely make execution run several times slower; this '
603 b'optimization is typically not needed'
604 b'optimization is typically not needed'
604 ),
605 ),
605 upgrademessage=_(
606 upgrademessage=_(
606 b'deltas within internal storage will be fully '
607 b'deltas within internal storage will be fully '
607 b'recomputed; this will likely drastically slow down '
608 b'recomputed; this will likely drastically slow down '
608 b'execution time'
609 b'execution time'
609 ),
610 ),
610 )
611 )
611 )
612 )
612
613
613 optimizations.append(
614 register_optimization(
614 improvement(
615 improvement(
615 name=b're-delta-fulladd',
616 name=b're-delta-fulladd',
616 type=OPTIMISATION,
617 type=OPTIMISATION,
617 description=_(
618 description=_(
618 b'every revision will be re-added as if it was new '
619 b'every revision will be re-added as if it was new '
619 b'content. It will go through the full storage '
620 b'content. It will go through the full storage '
620 b'mechanism giving extensions a chance to process it '
621 b'mechanism giving extensions a chance to process it '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 b'slower since more logic is involved.'
623 b'slower since more logic is involved.'
623 ),
624 ),
624 upgrademessage=_(
625 upgrademessage=_(
625 b'each revision will be added as new content to the '
626 b'each revision will be added as new content to the '
626 b'internal storage; this will likely drastically slow '
627 b'internal storage; this will likely drastically slow '
627 b'down execution time, but some extensions might need '
628 b'down execution time, but some extensions might need '
628 b'it'
629 b'it'
629 ),
630 ),
630 )
631 )
631 )
632 )
632
633
633 return optimizations
634
635 def findoptimizations(repo):
636 """Determine optimisation that could be used during upgrade"""
637 # These are unconditionally added. There is logic later that figures out
638 # which ones to apply.
639 return list(ALL_OPTIMISATIONS)
634
640
635
641
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
642 def determineactions(repo, deficiencies, sourcereqs, destreqs):
637 """Determine upgrade actions that will be performed.
643 """Determine upgrade actions that will be performed.
638
644
639 Given a list of improvements as returned by ``finddeficiencies`` and
645 Given a list of improvements as returned by ``finddeficiencies`` and
640 ``findoptimizations``, determine the list of upgrade actions that
646 ``findoptimizations``, determine the list of upgrade actions that
641 will be performed.
647 will be performed.
642
648
643 The role of this function is to filter improvements if needed, apply
649 The role of this function is to filter improvements if needed, apply
644 recommended optimizations from the improvements list that make sense,
650 recommended optimizations from the improvements list that make sense,
645 etc.
651 etc.
646
652
647 Returns a list of action names.
653 Returns a list of action names.
648 """
654 """
649 newactions = []
655 newactions = []
650
656
651 for d in deficiencies:
657 for d in deficiencies:
652 name = d._requirement
658 name = d._requirement
653
659
654 # If the action is a requirement that doesn't show up in the
660 # If the action is a requirement that doesn't show up in the
655 # destination requirements, prune the action.
661 # destination requirements, prune the action.
656 if name is not None and name not in destreqs:
662 if name is not None and name not in destreqs:
657 continue
663 continue
658
664
659 newactions.append(d)
665 newactions.append(d)
660
666
661 # FUTURE consider adding some optimizations here for certain transitions.
667 # FUTURE consider adding some optimizations here for certain transitions.
662 # e.g. adding generaldelta could schedule parent redeltas.
668 # e.g. adding generaldelta could schedule parent redeltas.
663
669
664 return newactions
670 return newactions
665
671
666
672
667 def _revlogfrompath(repo, path):
673 def _revlogfrompath(repo, path):
668 """Obtain a revlog from a repo path.
674 """Obtain a revlog from a repo path.
669
675
670 An instance of the appropriate class is returned.
676 An instance of the appropriate class is returned.
671 """
677 """
672 if path == b'00changelog.i':
678 if path == b'00changelog.i':
673 return changelog.changelog(repo.svfs)
679 return changelog.changelog(repo.svfs)
674 elif path.endswith(b'00manifest.i'):
680 elif path.endswith(b'00manifest.i'):
675 mandir = path[: -len(b'00manifest.i')]
681 mandir = path[: -len(b'00manifest.i')]
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
682 return manifest.manifestrevlog(repo.svfs, tree=mandir)
677 else:
683 else:
678 # reverse of "/".join(("data", path + ".i"))
684 # reverse of "/".join(("data", path + ".i"))
679 return filelog.filelog(repo.svfs, path[5:-2])
685 return filelog.filelog(repo.svfs, path[5:-2])
680
686
681
687
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
688 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
683 """copy all relevant files for `oldrl` into `destrepo` store
689 """copy all relevant files for `oldrl` into `destrepo` store
684
690
685 Files are copied "as is" without any transformation. The copy is performed
691 Files are copied "as is" without any transformation. The copy is performed
686 without extra checks. Callers are responsible for making sure the copied
692 without extra checks. Callers are responsible for making sure the copied
687 content is compatible with format of the destination repository.
693 content is compatible with format of the destination repository.
688 """
694 """
689 oldrl = getattr(oldrl, '_revlog', oldrl)
695 oldrl = getattr(oldrl, '_revlog', oldrl)
690 newrl = _revlogfrompath(destrepo, unencodedname)
696 newrl = _revlogfrompath(destrepo, unencodedname)
691 newrl = getattr(newrl, '_revlog', newrl)
697 newrl = getattr(newrl, '_revlog', newrl)
692
698
693 oldvfs = oldrl.opener
699 oldvfs = oldrl.opener
694 newvfs = newrl.opener
700 newvfs = newrl.opener
695 oldindex = oldvfs.join(oldrl.indexfile)
701 oldindex = oldvfs.join(oldrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
702 newindex = newvfs.join(newrl.indexfile)
697 olddata = oldvfs.join(oldrl.datafile)
703 olddata = oldvfs.join(oldrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
704 newdata = newvfs.join(newrl.datafile)
699
705
700 with newvfs(newrl.indexfile, b'w'):
706 with newvfs(newrl.indexfile, b'w'):
701 pass # create all the directories
707 pass # create all the directories
702
708
703 util.copyfile(oldindex, newindex)
709 util.copyfile(oldindex, newindex)
704 copydata = oldrl.opener.exists(oldrl.datafile)
710 copydata = oldrl.opener.exists(oldrl.datafile)
705 if copydata:
711 if copydata:
706 util.copyfile(olddata, newdata)
712 util.copyfile(olddata, newdata)
707
713
708 if not (
714 if not (
709 unencodedname.endswith(b'00changelog.i')
715 unencodedname.endswith(b'00changelog.i')
710 or unencodedname.endswith(b'00manifest.i')
716 or unencodedname.endswith(b'00manifest.i')
711 ):
717 ):
712 destrepo.svfs.fncache.add(unencodedname)
718 destrepo.svfs.fncache.add(unencodedname)
713 if copydata:
719 if copydata:
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
720 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
715
721
716
722
717 UPGRADE_CHANGELOG = object()
723 UPGRADE_CHANGELOG = object()
718 UPGRADE_MANIFEST = object()
724 UPGRADE_MANIFEST = object()
719 UPGRADE_FILELOGS = object()
725 UPGRADE_FILELOGS = object()
720
726
721 UPGRADE_ALL_REVLOGS = frozenset(
727 UPGRADE_ALL_REVLOGS = frozenset(
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
728 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
723 )
729 )
724
730
725
731
726 def getsidedatacompanion(srcrepo, dstrepo):
732 def getsidedatacompanion(srcrepo, dstrepo):
727 sidedatacompanion = None
733 sidedatacompanion = None
728 removedreqs = srcrepo.requirements - dstrepo.requirements
734 removedreqs = srcrepo.requirements - dstrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
735 addedreqs = dstrepo.requirements - srcrepo.requirements
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
736 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
731
737
732 def sidedatacompanion(rl, rev):
738 def sidedatacompanion(rl, rev):
733 rl = getattr(rl, '_revlog', rl)
739 rl = getattr(rl, '_revlog', rl)
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
740 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
735 return True, (), {}, 0, 0
741 return True, (), {}, 0, 0
736 return False, (), {}, 0, 0
742 return False, (), {}, 0, 0
737
743
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
744 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
745 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
746 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
747 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
742 return sidedatacompanion
748 return sidedatacompanion
743
749
744
750
745 def matchrevlog(revlogfilter, entry):
751 def matchrevlog(revlogfilter, entry):
746 """check if a revlog is selected for cloning.
752 """check if a revlog is selected for cloning.
747
753
748 In other words, are there any updates which need to be done on revlog
754 In other words, are there any updates which need to be done on revlog
749 or it can be blindly copied.
755 or it can be blindly copied.
750
756
751 The store entry is checked against the passed filter"""
757 The store entry is checked against the passed filter"""
752 if entry.endswith(b'00changelog.i'):
758 if entry.endswith(b'00changelog.i'):
753 return UPGRADE_CHANGELOG in revlogfilter
759 return UPGRADE_CHANGELOG in revlogfilter
754 elif entry.endswith(b'00manifest.i'):
760 elif entry.endswith(b'00manifest.i'):
755 return UPGRADE_MANIFEST in revlogfilter
761 return UPGRADE_MANIFEST in revlogfilter
756 return UPGRADE_FILELOGS in revlogfilter
762 return UPGRADE_FILELOGS in revlogfilter
757
763
758
764
759 def _clonerevlogs(
765 def _clonerevlogs(
760 ui,
766 ui,
761 srcrepo,
767 srcrepo,
762 dstrepo,
768 dstrepo,
763 tr,
769 tr,
764 deltareuse,
770 deltareuse,
765 forcedeltabothparents,
771 forcedeltabothparents,
766 revlogs=UPGRADE_ALL_REVLOGS,
772 revlogs=UPGRADE_ALL_REVLOGS,
767 ):
773 ):
768 """Copy revlogs between 2 repos."""
774 """Copy revlogs between 2 repos."""
769 revcount = 0
775 revcount = 0
770 srcsize = 0
776 srcsize = 0
771 srcrawsize = 0
777 srcrawsize = 0
772 dstsize = 0
778 dstsize = 0
773 fcount = 0
779 fcount = 0
774 frevcount = 0
780 frevcount = 0
775 fsrcsize = 0
781 fsrcsize = 0
776 frawsize = 0
782 frawsize = 0
777 fdstsize = 0
783 fdstsize = 0
778 mcount = 0
784 mcount = 0
779 mrevcount = 0
785 mrevcount = 0
780 msrcsize = 0
786 msrcsize = 0
781 mrawsize = 0
787 mrawsize = 0
782 mdstsize = 0
788 mdstsize = 0
783 crevcount = 0
789 crevcount = 0
784 csrcsize = 0
790 csrcsize = 0
785 crawsize = 0
791 crawsize = 0
786 cdstsize = 0
792 cdstsize = 0
787
793
788 alldatafiles = list(srcrepo.store.walk())
794 alldatafiles = list(srcrepo.store.walk())
789
795
790 # Perform a pass to collect metadata. This validates we can open all
796 # Perform a pass to collect metadata. This validates we can open all
791 # source files and allows a unified progress bar to be displayed.
797 # source files and allows a unified progress bar to be displayed.
792 for unencoded, encoded, size in alldatafiles:
798 for unencoded, encoded, size in alldatafiles:
793 if unencoded.endswith(b'.d'):
799 if unencoded.endswith(b'.d'):
794 continue
800 continue
795
801
796 rl = _revlogfrompath(srcrepo, unencoded)
802 rl = _revlogfrompath(srcrepo, unencoded)
797
803
798 info = rl.storageinfo(
804 info = rl.storageinfo(
799 exclusivefiles=True,
805 exclusivefiles=True,
800 revisionscount=True,
806 revisionscount=True,
801 trackedsize=True,
807 trackedsize=True,
802 storedsize=True,
808 storedsize=True,
803 )
809 )
804
810
805 revcount += info[b'revisionscount'] or 0
811 revcount += info[b'revisionscount'] or 0
806 datasize = info[b'storedsize'] or 0
812 datasize = info[b'storedsize'] or 0
807 rawsize = info[b'trackedsize'] or 0
813 rawsize = info[b'trackedsize'] or 0
808
814
809 srcsize += datasize
815 srcsize += datasize
810 srcrawsize += rawsize
816 srcrawsize += rawsize
811
817
812 # This is for the separate progress bars.
818 # This is for the separate progress bars.
813 if isinstance(rl, changelog.changelog):
819 if isinstance(rl, changelog.changelog):
814 crevcount += len(rl)
820 crevcount += len(rl)
815 csrcsize += datasize
821 csrcsize += datasize
816 crawsize += rawsize
822 crawsize += rawsize
817 elif isinstance(rl, manifest.manifestrevlog):
823 elif isinstance(rl, manifest.manifestrevlog):
818 mcount += 1
824 mcount += 1
819 mrevcount += len(rl)
825 mrevcount += len(rl)
820 msrcsize += datasize
826 msrcsize += datasize
821 mrawsize += rawsize
827 mrawsize += rawsize
822 elif isinstance(rl, filelog.filelog):
828 elif isinstance(rl, filelog.filelog):
823 fcount += 1
829 fcount += 1
824 frevcount += len(rl)
830 frevcount += len(rl)
825 fsrcsize += datasize
831 fsrcsize += datasize
826 frawsize += rawsize
832 frawsize += rawsize
827 else:
833 else:
828 error.ProgrammingError(b'unknown revlog type')
834 error.ProgrammingError(b'unknown revlog type')
829
835
830 if not revcount:
836 if not revcount:
831 return
837 return
832
838
833 ui.status(
839 ui.status(
834 _(
840 _(
835 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
841 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
836 b'%d in changelog)\n'
842 b'%d in changelog)\n'
837 )
843 )
838 % (revcount, frevcount, mrevcount, crevcount)
844 % (revcount, frevcount, mrevcount, crevcount)
839 )
845 )
840 ui.status(
846 ui.status(
841 _(b'migrating %s in store; %s tracked data\n')
847 _(b'migrating %s in store; %s tracked data\n')
842 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
848 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
843 )
849 )
844
850
845 # Used to keep track of progress.
851 # Used to keep track of progress.
846 progress = None
852 progress = None
847
853
848 def oncopiedrevision(rl, rev, node):
854 def oncopiedrevision(rl, rev, node):
849 progress.increment()
855 progress.increment()
850
856
851 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
857 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
852
858
853 # Do the actual copying.
859 # Do the actual copying.
854 # FUTURE this operation can be farmed off to worker processes.
860 # FUTURE this operation can be farmed off to worker processes.
855 seen = set()
861 seen = set()
856 for unencoded, encoded, size in alldatafiles:
862 for unencoded, encoded, size in alldatafiles:
857 if unencoded.endswith(b'.d'):
863 if unencoded.endswith(b'.d'):
858 continue
864 continue
859
865
860 oldrl = _revlogfrompath(srcrepo, unencoded)
866 oldrl = _revlogfrompath(srcrepo, unencoded)
861
867
862 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
868 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
863 ui.status(
869 ui.status(
864 _(
870 _(
865 b'finished migrating %d manifest revisions across %d '
871 b'finished migrating %d manifest revisions across %d '
866 b'manifests; change in size: %s\n'
872 b'manifests; change in size: %s\n'
867 )
873 )
868 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
874 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
869 )
875 )
870
876
871 ui.status(
877 ui.status(
872 _(
878 _(
873 b'migrating changelog containing %d revisions '
879 b'migrating changelog containing %d revisions '
874 b'(%s in store; %s tracked data)\n'
880 b'(%s in store; %s tracked data)\n'
875 )
881 )
876 % (
882 % (
877 crevcount,
883 crevcount,
878 util.bytecount(csrcsize),
884 util.bytecount(csrcsize),
879 util.bytecount(crawsize),
885 util.bytecount(crawsize),
880 )
886 )
881 )
887 )
882 seen.add(b'c')
888 seen.add(b'c')
883 progress = srcrepo.ui.makeprogress(
889 progress = srcrepo.ui.makeprogress(
884 _(b'changelog revisions'), total=crevcount
890 _(b'changelog revisions'), total=crevcount
885 )
891 )
886 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
892 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
887 ui.status(
893 ui.status(
888 _(
894 _(
889 b'finished migrating %d filelog revisions across %d '
895 b'finished migrating %d filelog revisions across %d '
890 b'filelogs; change in size: %s\n'
896 b'filelogs; change in size: %s\n'
891 )
897 )
892 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
898 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
893 )
899 )
894
900
895 ui.status(
901 ui.status(
896 _(
902 _(
897 b'migrating %d manifests containing %d revisions '
903 b'migrating %d manifests containing %d revisions '
898 b'(%s in store; %s tracked data)\n'
904 b'(%s in store; %s tracked data)\n'
899 )
905 )
900 % (
906 % (
901 mcount,
907 mcount,
902 mrevcount,
908 mrevcount,
903 util.bytecount(msrcsize),
909 util.bytecount(msrcsize),
904 util.bytecount(mrawsize),
910 util.bytecount(mrawsize),
905 )
911 )
906 )
912 )
907 seen.add(b'm')
913 seen.add(b'm')
908 if progress:
914 if progress:
909 progress.complete()
915 progress.complete()
910 progress = srcrepo.ui.makeprogress(
916 progress = srcrepo.ui.makeprogress(
911 _(b'manifest revisions'), total=mrevcount
917 _(b'manifest revisions'), total=mrevcount
912 )
918 )
913 elif b'f' not in seen:
919 elif b'f' not in seen:
914 ui.status(
920 ui.status(
915 _(
921 _(
916 b'migrating %d filelogs containing %d revisions '
922 b'migrating %d filelogs containing %d revisions '
917 b'(%s in store; %s tracked data)\n'
923 b'(%s in store; %s tracked data)\n'
918 )
924 )
919 % (
925 % (
920 fcount,
926 fcount,
921 frevcount,
927 frevcount,
922 util.bytecount(fsrcsize),
928 util.bytecount(fsrcsize),
923 util.bytecount(frawsize),
929 util.bytecount(frawsize),
924 )
930 )
925 )
931 )
926 seen.add(b'f')
932 seen.add(b'f')
927 if progress:
933 if progress:
928 progress.complete()
934 progress.complete()
929 progress = srcrepo.ui.makeprogress(
935 progress = srcrepo.ui.makeprogress(
930 _(b'file revisions'), total=frevcount
936 _(b'file revisions'), total=frevcount
931 )
937 )
932
938
933 if matchrevlog(revlogs, unencoded):
939 if matchrevlog(revlogs, unencoded):
934 ui.note(
940 ui.note(
935 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
941 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
936 )
942 )
937 newrl = _revlogfrompath(dstrepo, unencoded)
943 newrl = _revlogfrompath(dstrepo, unencoded)
938 oldrl.clone(
944 oldrl.clone(
939 tr,
945 tr,
940 newrl,
946 newrl,
941 addrevisioncb=oncopiedrevision,
947 addrevisioncb=oncopiedrevision,
942 deltareuse=deltareuse,
948 deltareuse=deltareuse,
943 forcedeltabothparents=forcedeltabothparents,
949 forcedeltabothparents=forcedeltabothparents,
944 sidedatacompanion=sidedatacompanion,
950 sidedatacompanion=sidedatacompanion,
945 )
951 )
946 else:
952 else:
947 msg = _(b'blindly copying %s containing %i revisions\n')
953 msg = _(b'blindly copying %s containing %i revisions\n')
948 ui.note(msg % (unencoded, len(oldrl)))
954 ui.note(msg % (unencoded, len(oldrl)))
949 _copyrevlog(tr, dstrepo, oldrl, unencoded)
955 _copyrevlog(tr, dstrepo, oldrl, unencoded)
950
956
951 newrl = _revlogfrompath(dstrepo, unencoded)
957 newrl = _revlogfrompath(dstrepo, unencoded)
952
958
953 info = newrl.storageinfo(storedsize=True)
959 info = newrl.storageinfo(storedsize=True)
954 datasize = info[b'storedsize'] or 0
960 datasize = info[b'storedsize'] or 0
955
961
956 dstsize += datasize
962 dstsize += datasize
957
963
958 if isinstance(newrl, changelog.changelog):
964 if isinstance(newrl, changelog.changelog):
959 cdstsize += datasize
965 cdstsize += datasize
960 elif isinstance(newrl, manifest.manifestrevlog):
966 elif isinstance(newrl, manifest.manifestrevlog):
961 mdstsize += datasize
967 mdstsize += datasize
962 else:
968 else:
963 fdstsize += datasize
969 fdstsize += datasize
964
970
965 progress.complete()
971 progress.complete()
966
972
967 ui.status(
973 ui.status(
968 _(
974 _(
969 b'finished migrating %d changelog revisions; change in size: '
975 b'finished migrating %d changelog revisions; change in size: '
970 b'%s\n'
976 b'%s\n'
971 )
977 )
972 % (crevcount, util.bytecount(cdstsize - csrcsize))
978 % (crevcount, util.bytecount(cdstsize - csrcsize))
973 )
979 )
974
980
975 ui.status(
981 ui.status(
976 _(
982 _(
977 b'finished migrating %d total revisions; total change in store '
983 b'finished migrating %d total revisions; total change in store '
978 b'size: %s\n'
984 b'size: %s\n'
979 )
985 )
980 % (revcount, util.bytecount(dstsize - srcsize))
986 % (revcount, util.bytecount(dstsize - srcsize))
981 )
987 )
982
988
983
989
984 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
990 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
985 """Determine whether to copy a store file during upgrade.
991 """Determine whether to copy a store file during upgrade.
986
992
987 This function is called when migrating store files from ``srcrepo`` to
993 This function is called when migrating store files from ``srcrepo`` to
988 ``dstrepo`` as part of upgrading a repository.
994 ``dstrepo`` as part of upgrading a repository.
989
995
990 Args:
996 Args:
991 srcrepo: repo we are copying from
997 srcrepo: repo we are copying from
992 dstrepo: repo we are copying to
998 dstrepo: repo we are copying to
993 requirements: set of requirements for ``dstrepo``
999 requirements: set of requirements for ``dstrepo``
994 path: store file being examined
1000 path: store file being examined
995 mode: the ``ST_MODE`` file type of ``path``
1001 mode: the ``ST_MODE`` file type of ``path``
996 st: ``stat`` data structure for ``path``
1002 st: ``stat`` data structure for ``path``
997
1003
998 Function should return ``True`` if the file is to be copied.
1004 Function should return ``True`` if the file is to be copied.
999 """
1005 """
1000 # Skip revlogs.
1006 # Skip revlogs.
1001 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1007 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1002 return False
1008 return False
1003 # Skip transaction related files.
1009 # Skip transaction related files.
1004 if path.startswith(b'undo'):
1010 if path.startswith(b'undo'):
1005 return False
1011 return False
1006 # Only copy regular files.
1012 # Only copy regular files.
1007 if mode != stat.S_IFREG:
1013 if mode != stat.S_IFREG:
1008 return False
1014 return False
1009 # Skip other skipped files.
1015 # Skip other skipped files.
1010 if path in (b'lock', b'fncache'):
1016 if path in (b'lock', b'fncache'):
1011 return False
1017 return False
1012
1018
1013 return True
1019 return True
1014
1020
1015
1021
1016 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1022 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1017 """Hook point for extensions to perform additional actions during upgrade.
1023 """Hook point for extensions to perform additional actions during upgrade.
1018
1024
1019 This function is called after revlogs and store files have been copied but
1025 This function is called after revlogs and store files have been copied but
1020 before the new store is swapped into the original location.
1026 before the new store is swapped into the original location.
1021 """
1027 """
1022
1028
1023
1029
1024 def _upgraderepo(
1030 def _upgraderepo(
1025 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1031 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1026 ):
1032 ):
1027 """Do the low-level work of upgrading a repository.
1033 """Do the low-level work of upgrading a repository.
1028
1034
1029 The upgrade is effectively performed as a copy between a source
1035 The upgrade is effectively performed as a copy between a source
1030 repository and a temporary destination repository.
1036 repository and a temporary destination repository.
1031
1037
1032 The source repository is unmodified for as long as possible so the
1038 The source repository is unmodified for as long as possible so the
1033 upgrade can abort at any time without causing loss of service for
1039 upgrade can abort at any time without causing loss of service for
1034 readers and without corrupting the source repository.
1040 readers and without corrupting the source repository.
1035 """
1041 """
1036 assert srcrepo.currentwlock()
1042 assert srcrepo.currentwlock()
1037 assert dstrepo.currentwlock()
1043 assert dstrepo.currentwlock()
1038
1044
1039 ui.status(
1045 ui.status(
1040 _(
1046 _(
1041 b'(it is safe to interrupt this process any time before '
1047 b'(it is safe to interrupt this process any time before '
1042 b'data migration completes)\n'
1048 b'data migration completes)\n'
1043 )
1049 )
1044 )
1050 )
1045
1051
1046 if b're-delta-all' in actions:
1052 if b're-delta-all' in actions:
1047 deltareuse = revlog.revlog.DELTAREUSENEVER
1053 deltareuse = revlog.revlog.DELTAREUSENEVER
1048 elif b're-delta-parent' in actions:
1054 elif b're-delta-parent' in actions:
1049 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1055 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1050 elif b're-delta-multibase' in actions:
1056 elif b're-delta-multibase' in actions:
1051 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1057 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1052 elif b're-delta-fulladd' in actions:
1058 elif b're-delta-fulladd' in actions:
1053 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1059 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1054 else:
1060 else:
1055 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1061 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1056
1062
1057 with dstrepo.transaction(b'upgrade') as tr:
1063 with dstrepo.transaction(b'upgrade') as tr:
1058 _clonerevlogs(
1064 _clonerevlogs(
1059 ui,
1065 ui,
1060 srcrepo,
1066 srcrepo,
1061 dstrepo,
1067 dstrepo,
1062 tr,
1068 tr,
1063 deltareuse,
1069 deltareuse,
1064 b're-delta-multibase' in actions,
1070 b're-delta-multibase' in actions,
1065 revlogs=revlogs,
1071 revlogs=revlogs,
1066 )
1072 )
1067
1073
1068 # Now copy other files in the store directory.
1074 # Now copy other files in the store directory.
1069 # The sorted() makes execution deterministic.
1075 # The sorted() makes execution deterministic.
1070 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1076 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1071 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1077 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1072 continue
1078 continue
1073
1079
1074 srcrepo.ui.status(_(b'copying %s\n') % p)
1080 srcrepo.ui.status(_(b'copying %s\n') % p)
1075 src = srcrepo.store.rawvfs.join(p)
1081 src = srcrepo.store.rawvfs.join(p)
1076 dst = dstrepo.store.rawvfs.join(p)
1082 dst = dstrepo.store.rawvfs.join(p)
1077 util.copyfile(src, dst, copystat=True)
1083 util.copyfile(src, dst, copystat=True)
1078
1084
1079 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1085 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1080
1086
1081 ui.status(_(b'data fully migrated to temporary repository\n'))
1087 ui.status(_(b'data fully migrated to temporary repository\n'))
1082
1088
1083 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1089 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1084 backupvfs = vfsmod.vfs(backuppath)
1090 backupvfs = vfsmod.vfs(backuppath)
1085
1091
1086 # Make a backup of requires file first, as it is the first to be modified.
1092 # Make a backup of requires file first, as it is the first to be modified.
1087 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1093 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1088
1094
1089 # We install an arbitrary requirement that clients must not support
1095 # We install an arbitrary requirement that clients must not support
1090 # as a mechanism to lock out new clients during the data swap. This is
1096 # as a mechanism to lock out new clients during the data swap. This is
1091 # better than allowing a client to continue while the repository is in
1097 # better than allowing a client to continue while the repository is in
1092 # an inconsistent state.
1098 # an inconsistent state.
1093 ui.status(
1099 ui.status(
1094 _(
1100 _(
1095 b'marking source repository as being upgraded; clients will be '
1101 b'marking source repository as being upgraded; clients will be '
1096 b'unable to read from repository\n'
1102 b'unable to read from repository\n'
1097 )
1103 )
1098 )
1104 )
1099 scmutil.writereporequirements(
1105 scmutil.writereporequirements(
1100 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1106 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1101 )
1107 )
1102
1108
1103 ui.status(_(b'starting in-place swap of repository data\n'))
1109 ui.status(_(b'starting in-place swap of repository data\n'))
1104 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1110 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1105
1111
1106 # Now swap in the new store directory. Doing it as a rename should make
1112 # Now swap in the new store directory. Doing it as a rename should make
1107 # the operation nearly instantaneous and atomic (at least in well-behaved
1113 # the operation nearly instantaneous and atomic (at least in well-behaved
1108 # environments).
1114 # environments).
1109 ui.status(_(b'replacing store...\n'))
1115 ui.status(_(b'replacing store...\n'))
1110 tstart = util.timer()
1116 tstart = util.timer()
1111 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1117 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1112 util.rename(dstrepo.spath, srcrepo.spath)
1118 util.rename(dstrepo.spath, srcrepo.spath)
1113 elapsed = util.timer() - tstart
1119 elapsed = util.timer() - tstart
1114 ui.status(
1120 ui.status(
1115 _(
1121 _(
1116 b'store replacement complete; repository was inconsistent for '
1122 b'store replacement complete; repository was inconsistent for '
1117 b'%0.1fs\n'
1123 b'%0.1fs\n'
1118 )
1124 )
1119 % elapsed
1125 % elapsed
1120 )
1126 )
1121
1127
1122 # We first write the requirements file. Any new requirements will lock
1128 # We first write the requirements file. Any new requirements will lock
1123 # out legacy clients.
1129 # out legacy clients.
1124 ui.status(
1130 ui.status(
1125 _(
1131 _(
1126 b'finalizing requirements file and making repository readable '
1132 b'finalizing requirements file and making repository readable '
1127 b'again\n'
1133 b'again\n'
1128 )
1134 )
1129 )
1135 )
1130 scmutil.writereporequirements(srcrepo, requirements)
1136 scmutil.writereporequirements(srcrepo, requirements)
1131
1137
1132 # The lock file from the old store won't be removed because nothing has a
1138 # The lock file from the old store won't be removed because nothing has a
1133 # reference to its new location. So clean it up manually. Alternatively, we
1139 # reference to its new location. So clean it up manually. Alternatively, we
1134 # could update srcrepo.svfs and other variables to point to the new
1140 # could update srcrepo.svfs and other variables to point to the new
1135 # location. This is simpler.
1141 # location. This is simpler.
1136 backupvfs.unlink(b'store/lock')
1142 backupvfs.unlink(b'store/lock')
1137
1143
1138 return backuppath
1144 return backuppath
1139
1145
1140
1146
1141 def upgraderepo(
1147 def upgraderepo(
1142 ui,
1148 ui,
1143 repo,
1149 repo,
1144 run=False,
1150 run=False,
1145 optimize=None,
1151 optimize=None,
1146 backup=True,
1152 backup=True,
1147 manifest=None,
1153 manifest=None,
1148 changelog=None,
1154 changelog=None,
1149 ):
1155 ):
1150 """Upgrade a repository in place."""
1156 """Upgrade a repository in place."""
1151 if optimize is None:
1157 if optimize is None:
1152 optimize = []
1158 optimize = []
1153 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1159 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1154 repo = repo.unfiltered()
1160 repo = repo.unfiltered()
1155
1161
1156 revlogs = set(UPGRADE_ALL_REVLOGS)
1162 revlogs = set(UPGRADE_ALL_REVLOGS)
1157 specentries = (
1163 specentries = (
1158 (UPGRADE_CHANGELOG, changelog),
1164 (UPGRADE_CHANGELOG, changelog),
1159 (UPGRADE_MANIFEST, manifest)
1165 (UPGRADE_MANIFEST, manifest)
1160 )
1166 )
1161 specified = [(y, x) for (y, x) in specentries if x is not None]
1167 specified = [(y, x) for (y, x) in specentries if x is not None]
1162 if specified:
1168 if specified:
1163 # we have some limitation on revlogs to be recloned
1169 # we have some limitation on revlogs to be recloned
1164 if any(x for y, x in specified):
1170 if any(x for y, x in specified):
1165 revlogs = set()
1171 revlogs = set()
1166 for upgrade, enabled in specified:
1172 for upgrade, enabled in specified:
1167 if enabled:
1173 if enabled:
1168 revlogs.add(upgrade)
1174 revlogs.add(upgrade)
1169 else:
1175 else:
1170 # none are enabled
1176 # none are enabled
1171 for upgrade, __ in specified:
1177 for upgrade, __ in specified:
1172 revlogs.discard(upgrade)
1178 revlogs.discard(upgrade)
1173
1179
1174 # Ensure the repository can be upgraded.
1180 # Ensure the repository can be upgraded.
1175 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1181 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1176 if missingreqs:
1182 if missingreqs:
1177 raise error.Abort(
1183 raise error.Abort(
1178 _(b'cannot upgrade repository; requirement missing: %s')
1184 _(b'cannot upgrade repository; requirement missing: %s')
1179 % _(b', ').join(sorted(missingreqs))
1185 % _(b', ').join(sorted(missingreqs))
1180 )
1186 )
1181
1187
1182 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1188 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1183 if blockedreqs:
1189 if blockedreqs:
1184 raise error.Abort(
1190 raise error.Abort(
1185 _(
1191 _(
1186 b'cannot upgrade repository; unsupported source '
1192 b'cannot upgrade repository; unsupported source '
1187 b'requirement: %s'
1193 b'requirement: %s'
1188 )
1194 )
1189 % _(b', ').join(sorted(blockedreqs))
1195 % _(b', ').join(sorted(blockedreqs))
1190 )
1196 )
1191
1197
1192 # FUTURE there is potentially a need to control the wanted requirements via
1198 # FUTURE there is potentially a need to control the wanted requirements via
1193 # command arguments or via an extension hook point.
1199 # command arguments or via an extension hook point.
1194 newreqs = localrepo.newreporequirements(
1200 newreqs = localrepo.newreporequirements(
1195 repo.ui, localrepo.defaultcreateopts(repo.ui)
1201 repo.ui, localrepo.defaultcreateopts(repo.ui)
1196 )
1202 )
1197 newreqs.update(preservedrequirements(repo))
1203 newreqs.update(preservedrequirements(repo))
1198
1204
1199 noremovereqs = (
1205 noremovereqs = (
1200 repo.requirements - newreqs - supportremovedrequirements(repo)
1206 repo.requirements - newreqs - supportremovedrequirements(repo)
1201 )
1207 )
1202 if noremovereqs:
1208 if noremovereqs:
1203 raise error.Abort(
1209 raise error.Abort(
1204 _(
1210 _(
1205 b'cannot upgrade repository; requirement would be '
1211 b'cannot upgrade repository; requirement would be '
1206 b'removed: %s'
1212 b'removed: %s'
1207 )
1213 )
1208 % _(b', ').join(sorted(noremovereqs))
1214 % _(b', ').join(sorted(noremovereqs))
1209 )
1215 )
1210
1216
1211 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1217 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1212 if noaddreqs:
1218 if noaddreqs:
1213 raise error.Abort(
1219 raise error.Abort(
1214 _(
1220 _(
1215 b'cannot upgrade repository; do not support adding '
1221 b'cannot upgrade repository; do not support adding '
1216 b'requirement: %s'
1222 b'requirement: %s'
1217 )
1223 )
1218 % _(b', ').join(sorted(noaddreqs))
1224 % _(b', ').join(sorted(noaddreqs))
1219 )
1225 )
1220
1226
1221 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1227 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1222 if unsupportedreqs:
1228 if unsupportedreqs:
1223 raise error.Abort(
1229 raise error.Abort(
1224 _(
1230 _(
1225 b'cannot upgrade repository; do not support '
1231 b'cannot upgrade repository; do not support '
1226 b'destination requirement: %s'
1232 b'destination requirement: %s'
1227 )
1233 )
1228 % _(b', ').join(sorted(unsupportedreqs))
1234 % _(b', ').join(sorted(unsupportedreqs))
1229 )
1235 )
1230
1236
1231 # Find and validate all improvements that can be made.
1237 # Find and validate all improvements that can be made.
1232 alloptimizations = findoptimizations(repo)
1238 alloptimizations = findoptimizations(repo)
1233
1239
1234 # Apply and Validate arguments.
1240 # Apply and Validate arguments.
1235 optimizations = []
1241 optimizations = []
1236 for o in alloptimizations:
1242 for o in alloptimizations:
1237 if o.name in optimize:
1243 if o.name in optimize:
1238 optimizations.append(o)
1244 optimizations.append(o)
1239 optimize.discard(o.name)
1245 optimize.discard(o.name)
1240
1246
1241 if optimize: # anything left is unknown
1247 if optimize: # anything left is unknown
1242 raise error.Abort(
1248 raise error.Abort(
1243 _(b'unknown optimization action requested: %s')
1249 _(b'unknown optimization action requested: %s')
1244 % b', '.join(sorted(optimize)),
1250 % b', '.join(sorted(optimize)),
1245 hint=_(b'run without arguments to see valid optimizations'),
1251 hint=_(b'run without arguments to see valid optimizations'),
1246 )
1252 )
1247
1253
1248 deficiencies = finddeficiencies(repo)
1254 deficiencies = finddeficiencies(repo)
1249 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1255 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1250 actions.extend(
1256 actions.extend(
1251 o
1257 o
1252 for o in sorted(optimizations)
1258 for o in sorted(optimizations)
1253 # determineactions could have added optimisation
1259 # determineactions could have added optimisation
1254 if o not in actions
1260 if o not in actions
1255 )
1261 )
1256
1262
1257 removedreqs = repo.requirements - newreqs
1263 removedreqs = repo.requirements - newreqs
1258 addedreqs = newreqs - repo.requirements
1264 addedreqs = newreqs - repo.requirements
1259
1265
1260 if revlogs != UPGRADE_ALL_REVLOGS:
1266 if revlogs != UPGRADE_ALL_REVLOGS:
1261 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1267 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1262 if incompatible:
1268 if incompatible:
1263 msg = _(
1269 msg = _(
1264 b'ignoring revlogs selection flags, format requirements '
1270 b'ignoring revlogs selection flags, format requirements '
1265 b'change: %s\n'
1271 b'change: %s\n'
1266 )
1272 )
1267 ui.warn(msg % b', '.join(sorted(incompatible)))
1273 ui.warn(msg % b', '.join(sorted(incompatible)))
1268 revlogs = UPGRADE_ALL_REVLOGS
1274 revlogs = UPGRADE_ALL_REVLOGS
1269
1275
1270 def write_labeled(l, label):
1276 def write_labeled(l, label):
1271 first = True
1277 first = True
1272 for r in sorted(l):
1278 for r in sorted(l):
1273 if not first:
1279 if not first:
1274 ui.write(b', ')
1280 ui.write(b', ')
1275 ui.write(r, label=label)
1281 ui.write(r, label=label)
1276 first = False
1282 first = False
1277
1283
1278 def printrequirements():
1284 def printrequirements():
1279 ui.write(_(b'requirements\n'))
1285 ui.write(_(b'requirements\n'))
1280 ui.write(_(b' preserved: '))
1286 ui.write(_(b' preserved: '))
1281 write_labeled(
1287 write_labeled(
1282 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1288 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1283 )
1289 )
1284 ui.write((b'\n'))
1290 ui.write((b'\n'))
1285 removed = repo.requirements - newreqs
1291 removed = repo.requirements - newreqs
1286 if repo.requirements - newreqs:
1292 if repo.requirements - newreqs:
1287 ui.write(_(b' removed: '))
1293 ui.write(_(b' removed: '))
1288 write_labeled(removed, "upgrade-repo.requirement.removed")
1294 write_labeled(removed, "upgrade-repo.requirement.removed")
1289 ui.write((b'\n'))
1295 ui.write((b'\n'))
1290 added = newreqs - repo.requirements
1296 added = newreqs - repo.requirements
1291 if added:
1297 if added:
1292 ui.write(_(b' added: '))
1298 ui.write(_(b' added: '))
1293 write_labeled(added, "upgrade-repo.requirement.added")
1299 write_labeled(added, "upgrade-repo.requirement.added")
1294 ui.write((b'\n'))
1300 ui.write((b'\n'))
1295 ui.write(b'\n')
1301 ui.write(b'\n')
1296
1302
1297 def printoptimisations():
1303 def printoptimisations():
1298 optimisations = [a for a in actions if a.type == OPTIMISATION]
1304 optimisations = [a for a in actions if a.type == OPTIMISATION]
1299 optimisations.sort(key=lambda a: a.name)
1305 optimisations.sort(key=lambda a: a.name)
1300 if optimisations:
1306 if optimisations:
1301 ui.write(_(b'optimisations: '))
1307 ui.write(_(b'optimisations: '))
1302 write_labeled(
1308 write_labeled(
1303 [a.name for a in optimisations],
1309 [a.name for a in optimisations],
1304 "upgrade-repo.optimisation.performed",
1310 "upgrade-repo.optimisation.performed",
1305 )
1311 )
1306 ui.write(b'\n\n')
1312 ui.write(b'\n\n')
1307
1313
1308 def printupgradeactions():
1314 def printupgradeactions():
1309 for a in actions:
1315 for a in actions:
1310 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1316 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1311
1317
1312 if not run:
1318 if not run:
1313 fromconfig = []
1319 fromconfig = []
1314 onlydefault = []
1320 onlydefault = []
1315
1321
1316 for d in deficiencies:
1322 for d in deficiencies:
1317 if d.fromconfig(repo):
1323 if d.fromconfig(repo):
1318 fromconfig.append(d)
1324 fromconfig.append(d)
1319 elif d.default:
1325 elif d.default:
1320 onlydefault.append(d)
1326 onlydefault.append(d)
1321
1327
1322 if fromconfig or onlydefault:
1328 if fromconfig or onlydefault:
1323
1329
1324 if fromconfig:
1330 if fromconfig:
1325 ui.status(
1331 ui.status(
1326 _(
1332 _(
1327 b'repository lacks features recommended by '
1333 b'repository lacks features recommended by '
1328 b'current config options:\n\n'
1334 b'current config options:\n\n'
1329 )
1335 )
1330 )
1336 )
1331 for i in fromconfig:
1337 for i in fromconfig:
1332 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1338 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1333
1339
1334 if onlydefault:
1340 if onlydefault:
1335 ui.status(
1341 ui.status(
1336 _(
1342 _(
1337 b'repository lacks features used by the default '
1343 b'repository lacks features used by the default '
1338 b'config options:\n\n'
1344 b'config options:\n\n'
1339 )
1345 )
1340 )
1346 )
1341 for i in onlydefault:
1347 for i in onlydefault:
1342 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1348 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1343
1349
1344 ui.status(b'\n')
1350 ui.status(b'\n')
1345 else:
1351 else:
1346 ui.status(
1352 ui.status(
1347 _(
1353 _(
1348 b'(no feature deficiencies found in existing '
1354 b'(no feature deficiencies found in existing '
1349 b'repository)\n'
1355 b'repository)\n'
1350 )
1356 )
1351 )
1357 )
1352
1358
1353 ui.status(
1359 ui.status(
1354 _(
1360 _(
1355 b'performing an upgrade with "--run" will make the following '
1361 b'performing an upgrade with "--run" will make the following '
1356 b'changes:\n\n'
1362 b'changes:\n\n'
1357 )
1363 )
1358 )
1364 )
1359
1365
1360 printrequirements()
1366 printrequirements()
1361 printoptimisations()
1367 printoptimisations()
1362 printupgradeactions()
1368 printupgradeactions()
1363
1369
1364 unusedoptimize = [i for i in alloptimizations if i not in actions]
1370 unusedoptimize = [i for i in alloptimizations if i not in actions]
1365
1371
1366 if unusedoptimize:
1372 if unusedoptimize:
1367 ui.status(
1373 ui.status(
1368 _(
1374 _(
1369 b'additional optimizations are available by specifying '
1375 b'additional optimizations are available by specifying '
1370 b'"--optimize <name>":\n\n'
1376 b'"--optimize <name>":\n\n'
1371 )
1377 )
1372 )
1378 )
1373 for i in unusedoptimize:
1379 for i in unusedoptimize:
1374 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1380 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1375 return
1381 return
1376
1382
1377 # Else we're in the run=true case.
1383 # Else we're in the run=true case.
1378 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1384 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1379 printrequirements()
1385 printrequirements()
1380 printoptimisations()
1386 printoptimisations()
1381 printupgradeactions()
1387 printupgradeactions()
1382
1388
1383 upgradeactions = [a.name for a in actions]
1389 upgradeactions = [a.name for a in actions]
1384
1390
1385 ui.status(_(b'beginning upgrade...\n'))
1391 ui.status(_(b'beginning upgrade...\n'))
1386 with repo.wlock(), repo.lock():
1392 with repo.wlock(), repo.lock():
1387 ui.status(_(b'repository locked and read-only\n'))
1393 ui.status(_(b'repository locked and read-only\n'))
1388 # Our strategy for upgrading the repository is to create a new,
1394 # Our strategy for upgrading the repository is to create a new,
1389 # temporary repository, write data to it, then do a swap of the
1395 # temporary repository, write data to it, then do a swap of the
1390 # data. There are less heavyweight ways to do this, but it is easier
1396 # data. There are less heavyweight ways to do this, but it is easier
1391 # to create a new repo object than to instantiate all the components
1397 # to create a new repo object than to instantiate all the components
1392 # (like the store) separately.
1398 # (like the store) separately.
1393 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1399 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1394 backuppath = None
1400 backuppath = None
1395 try:
1401 try:
1396 ui.status(
1402 ui.status(
1397 _(
1403 _(
1398 b'creating temporary repository to stage migrated '
1404 b'creating temporary repository to stage migrated '
1399 b'data: %s\n'
1405 b'data: %s\n'
1400 )
1406 )
1401 % tmppath
1407 % tmppath
1402 )
1408 )
1403
1409
1404 # clone ui without using ui.copy because repo.ui is protected
1410 # clone ui without using ui.copy because repo.ui is protected
1405 repoui = repo.ui.__class__(repo.ui)
1411 repoui = repo.ui.__class__(repo.ui)
1406 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1412 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1407
1413
1408 with dstrepo.wlock(), dstrepo.lock():
1414 with dstrepo.wlock(), dstrepo.lock():
1409 backuppath = _upgraderepo(
1415 backuppath = _upgraderepo(
1410 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1416 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1411 )
1417 )
1412 if not (backup or backuppath is None):
1418 if not (backup or backuppath is None):
1413 ui.status(
1419 ui.status(
1414 _(b'removing old repository content%s\n') % backuppath
1420 _(b'removing old repository content%s\n') % backuppath
1415 )
1421 )
1416 repo.vfs.rmtree(backuppath, forcibly=True)
1422 repo.vfs.rmtree(backuppath, forcibly=True)
1417 backuppath = None
1423 backuppath = None
1418
1424
1419 finally:
1425 finally:
1420 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1426 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1421 repo.vfs.rmtree(tmppath, forcibly=True)
1427 repo.vfs.rmtree(tmppath, forcibly=True)
1422
1428
1423 if backuppath and not ui.quiet:
1429 if backuppath and not ui.quiet:
1424 ui.warn(
1430 ui.warn(
1425 _(b'copy of old repository backed up at %s\n') % backuppath
1431 _(b'copy of old repository backed up at %s\n') % backuppath
1426 )
1432 )
1427 ui.warn(
1433 ui.warn(
1428 _(
1434 _(
1429 b'the old repository will not be deleted; remove '
1435 b'the old repository will not be deleted; remove '
1430 b'it to free up disk space once the upgraded '
1436 b'it to free up disk space once the upgraded '
1431 b'repository is verified\n'
1437 b'repository is verified\n'
1432 )
1438 )
1433 )
1439 )
General Comments 0
You need to be logged in to leave comments. Login now