##// END OF EJS Templates
upgrade: rename UPGRADE_FILELOG to UPGRADE_FILELOGS...
marmoute -
r46594:fe7d7917 default
parent child Browse files
Show More
@@ -1,1436 +1,1436 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 requirements.SHARED_REQUIREMENT,
67 requirements.SHARED_REQUIREMENT,
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 requirements.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 }
83 }
84 for name in compression.compengines:
84 for name in compression.compengines:
85 engine = compression.compengines[name]
85 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
86 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
87 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
88 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
89 supported.add(b'revlog-compression-zstd')
90 return supported
90 return supported
91
91
92
92
93 def supporteddestrequirements(repo):
93 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
94 """Obtain requirements that upgrade supports in the destination.
95
95
96 If the result of the upgrade would create requirements not in this set,
96 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
97 the upgrade is disallowed.
98
98
99 Extensions should monkeypatch this to add their custom requirements.
99 Extensions should monkeypatch this to add their custom requirements.
100 """
100 """
101 supported = {
101 supported = {
102 b'dotencode',
102 b'dotencode',
103 b'fncache',
103 b'fncache',
104 b'generaldelta',
104 b'generaldelta',
105 b'revlogv1',
105 b'revlogv1',
106 b'store',
106 b'store',
107 requirements.SPARSEREVLOG_REQUIREMENT,
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
112 }
112 }
113 for name in compression.compengines:
113 for name in compression.compengines:
114 engine = compression.compengines[name]
114 engine = compression.compengines[name]
115 if engine.available() and engine.revlogheader():
115 if engine.available() and engine.revlogheader():
116 supported.add(b'exp-compression-%s' % name)
116 supported.add(b'exp-compression-%s' % name)
117 if engine.name() == b'zstd':
117 if engine.name() == b'zstd':
118 supported.add(b'revlog-compression-zstd')
118 supported.add(b'revlog-compression-zstd')
119 return supported
119 return supported
120
120
121
121
122 def allowednewrequirements(repo):
122 def allowednewrequirements(repo):
123 """Obtain requirements that can be added to a repository during upgrade.
123 """Obtain requirements that can be added to a repository during upgrade.
124
124
125 This is used to disallow proposed requirements from being added when
125 This is used to disallow proposed requirements from being added when
126 they weren't present before.
126 they weren't present before.
127
127
128 We use a list of allowed requirement additions instead of a list of known
128 We use a list of allowed requirement additions instead of a list of known
129 bad additions because the whitelist approach is safer and will prevent
129 bad additions because the whitelist approach is safer and will prevent
130 future, unknown requirements from accidentally being added.
130 future, unknown requirements from accidentally being added.
131 """
131 """
132 supported = {
132 supported = {
133 b'dotencode',
133 b'dotencode',
134 b'fncache',
134 b'fncache',
135 b'generaldelta',
135 b'generaldelta',
136 requirements.SPARSEREVLOG_REQUIREMENT,
136 requirements.SPARSEREVLOG_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
140 }
140 }
141 for name in compression.compengines:
141 for name in compression.compengines:
142 engine = compression.compengines[name]
142 engine = compression.compengines[name]
143 if engine.available() and engine.revlogheader():
143 if engine.available() and engine.revlogheader():
144 supported.add(b'exp-compression-%s' % name)
144 supported.add(b'exp-compression-%s' % name)
145 if engine.name() == b'zstd':
145 if engine.name() == b'zstd':
146 supported.add(b'revlog-compression-zstd')
146 supported.add(b'revlog-compression-zstd')
147 return supported
147 return supported
148
148
149
149
150 def preservedrequirements(repo):
150 def preservedrequirements(repo):
151 return set()
151 return set()
152
152
153
153
154 deficiency = b'deficiency'
154 deficiency = b'deficiency'
155 optimisation = b'optimization'
155 optimisation = b'optimization'
156
156
157
157
158 class improvement(object):
158 class improvement(object):
159 """Represents an improvement that can be made as part of an upgrade.
159 """Represents an improvement that can be made as part of an upgrade.
160
160
161 The following attributes are defined on each instance:
161 The following attributes are defined on each instance:
162
162
163 name
163 name
164 Machine-readable string uniquely identifying this improvement. It
164 Machine-readable string uniquely identifying this improvement. It
165 will be mapped to an action later in the upgrade process.
165 will be mapped to an action later in the upgrade process.
166
166
167 type
167 type
168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
169 problem. An optimization is an action (sometimes optional) that
169 problem. An optimization is an action (sometimes optional) that
170 can be taken to further improve the state of the repository.
170 can be taken to further improve the state of the repository.
171
171
172 description
172 description
173 Message intended for humans explaining the improvement in more detail,
173 Message intended for humans explaining the improvement in more detail,
174 including the implications of it. For ``deficiency`` types, should be
174 including the implications of it. For ``deficiency`` types, should be
175 worded in the present tense. For ``optimisation`` types, should be
175 worded in the present tense. For ``optimisation`` types, should be
176 worded in the future tense.
176 worded in the future tense.
177
177
178 upgrademessage
178 upgrademessage
179 Message intended for humans explaining what an upgrade addressing this
179 Message intended for humans explaining what an upgrade addressing this
180 issue will do. Should be worded in the future tense.
180 issue will do. Should be worded in the future tense.
181 """
181 """
182
182
183 def __init__(self, name, type, description, upgrademessage):
183 def __init__(self, name, type, description, upgrademessage):
184 self.name = name
184 self.name = name
185 self.type = type
185 self.type = type
186 self.description = description
186 self.description = description
187 self.upgrademessage = upgrademessage
187 self.upgrademessage = upgrademessage
188
188
189 def __eq__(self, other):
189 def __eq__(self, other):
190 if not isinstance(other, improvement):
190 if not isinstance(other, improvement):
191 # This is what python tell use to do
191 # This is what python tell use to do
192 return NotImplemented
192 return NotImplemented
193 return self.name == other.name
193 return self.name == other.name
194
194
195 def __ne__(self, other):
195 def __ne__(self, other):
196 return not (self == other)
196 return not (self == other)
197
197
198 def __hash__(self):
198 def __hash__(self):
199 return hash(self.name)
199 return hash(self.name)
200
200
201
201
202 allformatvariant = []
202 allformatvariant = []
203
203
204
204
205 def registerformatvariant(cls):
205 def registerformatvariant(cls):
206 allformatvariant.append(cls)
206 allformatvariant.append(cls)
207 return cls
207 return cls
208
208
209
209
210 class formatvariant(improvement):
210 class formatvariant(improvement):
211 """an improvement subclass dedicated to repository format"""
211 """an improvement subclass dedicated to repository format"""
212
212
213 type = deficiency
213 type = deficiency
214 ### The following attributes should be defined for each class:
214 ### The following attributes should be defined for each class:
215
215
216 # machine-readable string uniquely identifying this improvement. it will be
216 # machine-readable string uniquely identifying this improvement. it will be
217 # mapped to an action later in the upgrade process.
217 # mapped to an action later in the upgrade process.
218 name = None
218 name = None
219
219
220 # message intended for humans explaining the improvement in more detail,
220 # message intended for humans explaining the improvement in more detail,
221 # including the implications of it ``deficiency`` types, should be worded
221 # including the implications of it ``deficiency`` types, should be worded
222 # in the present tense.
222 # in the present tense.
223 description = None
223 description = None
224
224
225 # message intended for humans explaining what an upgrade addressing this
225 # message intended for humans explaining what an upgrade addressing this
226 # issue will do. should be worded in the future tense.
226 # issue will do. should be worded in the future tense.
227 upgrademessage = None
227 upgrademessage = None
228
228
229 # value of current Mercurial default for new repository
229 # value of current Mercurial default for new repository
230 default = None
230 default = None
231
231
232 def __init__(self):
232 def __init__(self):
233 raise NotImplementedError()
233 raise NotImplementedError()
234
234
235 @staticmethod
235 @staticmethod
236 def fromrepo(repo):
236 def fromrepo(repo):
237 """current value of the variant in the repository"""
237 """current value of the variant in the repository"""
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 @staticmethod
240 @staticmethod
241 def fromconfig(repo):
241 def fromconfig(repo):
242 """current value of the variant in the configuration"""
242 """current value of the variant in the configuration"""
243 raise NotImplementedError()
243 raise NotImplementedError()
244
244
245
245
246 class requirementformatvariant(formatvariant):
246 class requirementformatvariant(formatvariant):
247 """formatvariant based on a 'requirement' name.
247 """formatvariant based on a 'requirement' name.
248
248
249 Many format variant are controlled by a 'requirement'. We define a small
249 Many format variant are controlled by a 'requirement'. We define a small
250 subclass to factor the code.
250 subclass to factor the code.
251 """
251 """
252
252
253 # the requirement that control this format variant
253 # the requirement that control this format variant
254 _requirement = None
254 _requirement = None
255
255
256 @staticmethod
256 @staticmethod
257 def _newreporequirements(ui):
257 def _newreporequirements(ui):
258 return localrepo.newreporequirements(
258 return localrepo.newreporequirements(
259 ui, localrepo.defaultcreateopts(ui)
259 ui, localrepo.defaultcreateopts(ui)
260 )
260 )
261
261
262 @classmethod
262 @classmethod
263 def fromrepo(cls, repo):
263 def fromrepo(cls, repo):
264 assert cls._requirement is not None
264 assert cls._requirement is not None
265 return cls._requirement in repo.requirements
265 return cls._requirement in repo.requirements
266
266
267 @classmethod
267 @classmethod
268 def fromconfig(cls, repo):
268 def fromconfig(cls, repo):
269 assert cls._requirement is not None
269 assert cls._requirement is not None
270 return cls._requirement in cls._newreporequirements(repo.ui)
270 return cls._requirement in cls._newreporequirements(repo.ui)
271
271
272
272
273 @registerformatvariant
273 @registerformatvariant
274 class fncache(requirementformatvariant):
274 class fncache(requirementformatvariant):
275 name = b'fncache'
275 name = b'fncache'
276
276
277 _requirement = b'fncache'
277 _requirement = b'fncache'
278
278
279 default = True
279 default = True
280
280
281 description = _(
281 description = _(
282 b'long and reserved filenames may not work correctly; '
282 b'long and reserved filenames may not work correctly; '
283 b'repository performance is sub-optimal'
283 b'repository performance is sub-optimal'
284 )
284 )
285
285
286 upgrademessage = _(
286 upgrademessage = _(
287 b'repository will be more resilient to storing '
287 b'repository will be more resilient to storing '
288 b'certain paths and performance of certain '
288 b'certain paths and performance of certain '
289 b'operations should be improved'
289 b'operations should be improved'
290 )
290 )
291
291
292
292
293 @registerformatvariant
293 @registerformatvariant
294 class dotencode(requirementformatvariant):
294 class dotencode(requirementformatvariant):
295 name = b'dotencode'
295 name = b'dotencode'
296
296
297 _requirement = b'dotencode'
297 _requirement = b'dotencode'
298
298
299 default = True
299 default = True
300
300
301 description = _(
301 description = _(
302 b'storage of filenames beginning with a period or '
302 b'storage of filenames beginning with a period or '
303 b'space may not work correctly'
303 b'space may not work correctly'
304 )
304 )
305
305
306 upgrademessage = _(
306 upgrademessage = _(
307 b'repository will be better able to store files '
307 b'repository will be better able to store files '
308 b'beginning with a space or period'
308 b'beginning with a space or period'
309 )
309 )
310
310
311
311
312 @registerformatvariant
312 @registerformatvariant
313 class generaldelta(requirementformatvariant):
313 class generaldelta(requirementformatvariant):
314 name = b'generaldelta'
314 name = b'generaldelta'
315
315
316 _requirement = b'generaldelta'
316 _requirement = b'generaldelta'
317
317
318 default = True
318 default = True
319
319
320 description = _(
320 description = _(
321 b'deltas within internal storage are unable to '
321 b'deltas within internal storage are unable to '
322 b'choose optimal revisions; repository is larger and '
322 b'choose optimal revisions; repository is larger and '
323 b'slower than it could be; interaction with other '
323 b'slower than it could be; interaction with other '
324 b'repositories may require extra network and CPU '
324 b'repositories may require extra network and CPU '
325 b'resources, making "hg push" and "hg pull" slower'
325 b'resources, making "hg push" and "hg pull" slower'
326 )
326 )
327
327
328 upgrademessage = _(
328 upgrademessage = _(
329 b'repository storage will be able to create '
329 b'repository storage will be able to create '
330 b'optimal deltas; new repository data will be '
330 b'optimal deltas; new repository data will be '
331 b'smaller and read times should decrease; '
331 b'smaller and read times should decrease; '
332 b'interacting with other repositories using this '
332 b'interacting with other repositories using this '
333 b'storage model should require less network and '
333 b'storage model should require less network and '
334 b'CPU resources, making "hg push" and "hg pull" '
334 b'CPU resources, making "hg push" and "hg pull" '
335 b'faster'
335 b'faster'
336 )
336 )
337
337
338
338
339 @registerformatvariant
339 @registerformatvariant
340 class sparserevlog(requirementformatvariant):
340 class sparserevlog(requirementformatvariant):
341 name = b'sparserevlog'
341 name = b'sparserevlog'
342
342
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
344
344
345 default = True
345 default = True
346
346
347 description = _(
347 description = _(
348 b'in order to limit disk reading and memory usage on older '
348 b'in order to limit disk reading and memory usage on older '
349 b'version, the span of a delta chain from its root to its '
349 b'version, the span of a delta chain from its root to its '
350 b'end is limited, whatever the relevant data in this span. '
350 b'end is limited, whatever the relevant data in this span. '
351 b'This can severly limit Mercurial ability to build good '
351 b'This can severly limit Mercurial ability to build good '
352 b'chain of delta resulting is much more storage space being '
352 b'chain of delta resulting is much more storage space being '
353 b'taken and limit reusability of on disk delta during '
353 b'taken and limit reusability of on disk delta during '
354 b'exchange.'
354 b'exchange.'
355 )
355 )
356
356
357 upgrademessage = _(
357 upgrademessage = _(
358 b'Revlog supports delta chain with more unused data '
358 b'Revlog supports delta chain with more unused data '
359 b'between payload. These gaps will be skipped at read '
359 b'between payload. These gaps will be skipped at read '
360 b'time. This allows for better delta chains, making a '
360 b'time. This allows for better delta chains, making a '
361 b'better compression and faster exchange with server.'
361 b'better compression and faster exchange with server.'
362 )
362 )
363
363
364
364
365 @registerformatvariant
365 @registerformatvariant
366 class sidedata(requirementformatvariant):
366 class sidedata(requirementformatvariant):
367 name = b'sidedata'
367 name = b'sidedata'
368
368
369 _requirement = requirements.SIDEDATA_REQUIREMENT
369 _requirement = requirements.SIDEDATA_REQUIREMENT
370
370
371 default = False
371 default = False
372
372
373 description = _(
373 description = _(
374 b'Allows storage of extra data alongside a revision, '
374 b'Allows storage of extra data alongside a revision, '
375 b'unlocking various caching options.'
375 b'unlocking various caching options.'
376 )
376 )
377
377
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
379
379
380
380
381 @registerformatvariant
381 @registerformatvariant
382 class persistentnodemap(requirementformatvariant):
382 class persistentnodemap(requirementformatvariant):
383 name = b'persistent-nodemap'
383 name = b'persistent-nodemap'
384
384
385 _requirement = requirements.NODEMAP_REQUIREMENT
385 _requirement = requirements.NODEMAP_REQUIREMENT
386
386
387 default = False
387 default = False
388
388
389 description = _(
389 description = _(
390 b'persist the node -> rev mapping on disk to speedup lookup'
390 b'persist the node -> rev mapping on disk to speedup lookup'
391 )
391 )
392
392
393 upgrademessage = _(b'Speedup revision lookup by node id.')
393 upgrademessage = _(b'Speedup revision lookup by node id.')
394
394
395
395
396 @registerformatvariant
396 @registerformatvariant
397 class copiessdc(requirementformatvariant):
397 class copiessdc(requirementformatvariant):
398 name = b'copies-sdc'
398 name = b'copies-sdc'
399
399
400 _requirement = requirements.COPIESSDC_REQUIREMENT
400 _requirement = requirements.COPIESSDC_REQUIREMENT
401
401
402 default = False
402 default = False
403
403
404 description = _(b'Stores copies information alongside changesets.')
404 description = _(b'Stores copies information alongside changesets.')
405
405
406 upgrademessage = _(
406 upgrademessage = _(
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
408 )
408 )
409
409
410
410
411 @registerformatvariant
411 @registerformatvariant
412 class removecldeltachain(formatvariant):
412 class removecldeltachain(formatvariant):
413 name = b'plain-cl-delta'
413 name = b'plain-cl-delta'
414
414
415 default = True
415 default = True
416
416
417 description = _(
417 description = _(
418 b'changelog storage is using deltas instead of '
418 b'changelog storage is using deltas instead of '
419 b'raw entries; changelog reading and any '
419 b'raw entries; changelog reading and any '
420 b'operation relying on changelog data are slower '
420 b'operation relying on changelog data are slower '
421 b'than they could be'
421 b'than they could be'
422 )
422 )
423
423
424 upgrademessage = _(
424 upgrademessage = _(
425 b'changelog storage will be reformated to '
425 b'changelog storage will be reformated to '
426 b'store raw entries; changelog reading will be '
426 b'store raw entries; changelog reading will be '
427 b'faster; changelog size may be reduced'
427 b'faster; changelog size may be reduced'
428 )
428 )
429
429
430 @staticmethod
430 @staticmethod
431 def fromrepo(repo):
431 def fromrepo(repo):
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
433 # changelogs with deltas.
433 # changelogs with deltas.
434 cl = repo.changelog
434 cl = repo.changelog
435 chainbase = cl.chainbase
435 chainbase = cl.chainbase
436 return all(rev == chainbase(rev) for rev in cl)
436 return all(rev == chainbase(rev) for rev in cl)
437
437
438 @staticmethod
438 @staticmethod
439 def fromconfig(repo):
439 def fromconfig(repo):
440 return True
440 return True
441
441
442
442
443 @registerformatvariant
443 @registerformatvariant
444 class compressionengine(formatvariant):
444 class compressionengine(formatvariant):
445 name = b'compression'
445 name = b'compression'
446 default = b'zlib'
446 default = b'zlib'
447
447
448 description = _(
448 description = _(
449 b'Compresion algorithm used to compress data. '
449 b'Compresion algorithm used to compress data. '
450 b'Some engine are faster than other'
450 b'Some engine are faster than other'
451 )
451 )
452
452
453 upgrademessage = _(
453 upgrademessage = _(
454 b'revlog content will be recompressed with the new algorithm.'
454 b'revlog content will be recompressed with the new algorithm.'
455 )
455 )
456
456
457 @classmethod
457 @classmethod
458 def fromrepo(cls, repo):
458 def fromrepo(cls, repo):
459 # we allow multiple compression engine requirement to co-exist because
459 # we allow multiple compression engine requirement to co-exist because
460 # strickly speaking, revlog seems to support mixed compression style.
460 # strickly speaking, revlog seems to support mixed compression style.
461 #
461 #
462 # The compression used for new entries will be "the last one"
462 # The compression used for new entries will be "the last one"
463 compression = b'zlib'
463 compression = b'zlib'
464 for req in repo.requirements:
464 for req in repo.requirements:
465 prefix = req.startswith
465 prefix = req.startswith
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 compression = req.split(b'-', 2)[2]
467 compression = req.split(b'-', 2)[2]
468 return compression
468 return compression
469
469
470 @classmethod
470 @classmethod
471 def fromconfig(cls, repo):
471 def fromconfig(cls, repo):
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 # return the first valid value as the selection code would do
473 # return the first valid value as the selection code would do
474 for comp in compengines:
474 for comp in compengines:
475 if comp in util.compengines:
475 if comp in util.compengines:
476 return comp
476 return comp
477
477
478 # no valide compression found lets display it all for clarity
478 # no valide compression found lets display it all for clarity
479 return b','.join(compengines)
479 return b','.join(compengines)
480
480
481
481
482 @registerformatvariant
482 @registerformatvariant
483 class compressionlevel(formatvariant):
483 class compressionlevel(formatvariant):
484 name = b'compression-level'
484 name = b'compression-level'
485 default = b'default'
485 default = b'default'
486
486
487 description = _(b'compression level')
487 description = _(b'compression level')
488
488
489 upgrademessage = _(b'revlog content will be recompressed')
489 upgrademessage = _(b'revlog content will be recompressed')
490
490
491 @classmethod
491 @classmethod
492 def fromrepo(cls, repo):
492 def fromrepo(cls, repo):
493 comp = compressionengine.fromrepo(repo)
493 comp = compressionengine.fromrepo(repo)
494 level = None
494 level = None
495 if comp == b'zlib':
495 if comp == b'zlib':
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
497 elif comp == b'zstd':
497 elif comp == b'zstd':
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
499 if level is None:
499 if level is None:
500 return b'default'
500 return b'default'
501 return bytes(level)
501 return bytes(level)
502
502
503 @classmethod
503 @classmethod
504 def fromconfig(cls, repo):
504 def fromconfig(cls, repo):
505 comp = compressionengine.fromconfig(repo)
505 comp = compressionengine.fromconfig(repo)
506 level = None
506 level = None
507 if comp == b'zlib':
507 if comp == b'zlib':
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
509 elif comp == b'zstd':
509 elif comp == b'zstd':
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
511 if level is None:
511 if level is None:
512 return b'default'
512 return b'default'
513 return bytes(level)
513 return bytes(level)
514
514
515
515
516 def finddeficiencies(repo):
516 def finddeficiencies(repo):
517 """returns a list of deficiencies that the repo suffer from"""
517 """returns a list of deficiencies that the repo suffer from"""
518 deficiencies = []
518 deficiencies = []
519
519
520 # We could detect lack of revlogv1 and store here, but they were added
520 # We could detect lack of revlogv1 and store here, but they were added
521 # in 0.9.2 and we don't support upgrading repos without these
521 # in 0.9.2 and we don't support upgrading repos without these
522 # requirements, so let's not bother.
522 # requirements, so let's not bother.
523
523
524 for fv in allformatvariant:
524 for fv in allformatvariant:
525 if not fv.fromrepo(repo):
525 if not fv.fromrepo(repo):
526 deficiencies.append(fv)
526 deficiencies.append(fv)
527
527
528 return deficiencies
528 return deficiencies
529
529
530
530
531 # search without '-' to support older form on newer client.
531 # search without '-' to support older form on newer client.
532 #
532 #
533 # We don't enforce backward compatibility for debug command so this
533 # We don't enforce backward compatibility for debug command so this
534 # might eventually be dropped. However, having to use two different
534 # might eventually be dropped. However, having to use two different
535 # forms in script when comparing result is anoying enough to add
535 # forms in script when comparing result is anoying enough to add
536 # backward compatibility for a while.
536 # backward compatibility for a while.
537 legacy_opts_map = {
537 legacy_opts_map = {
538 b'redeltaparent': b're-delta-parent',
538 b'redeltaparent': b're-delta-parent',
539 b'redeltamultibase': b're-delta-multibase',
539 b'redeltamultibase': b're-delta-multibase',
540 b'redeltaall': b're-delta-all',
540 b'redeltaall': b're-delta-all',
541 b'redeltafulladd': b're-delta-fulladd',
541 b'redeltafulladd': b're-delta-fulladd',
542 }
542 }
543
543
544
544
545 def findoptimizations(repo):
545 def findoptimizations(repo):
546 """Determine optimisation that could be used during upgrade"""
546 """Determine optimisation that could be used during upgrade"""
547 # These are unconditionally added. There is logic later that figures out
547 # These are unconditionally added. There is logic later that figures out
548 # which ones to apply.
548 # which ones to apply.
549 optimizations = []
549 optimizations = []
550
550
551 optimizations.append(
551 optimizations.append(
552 improvement(
552 improvement(
553 name=b're-delta-parent',
553 name=b're-delta-parent',
554 type=optimisation,
554 type=optimisation,
555 description=_(
555 description=_(
556 b'deltas within internal storage will be recalculated to '
556 b'deltas within internal storage will be recalculated to '
557 b'choose an optimal base revision where this was not '
557 b'choose an optimal base revision where this was not '
558 b'already done; the size of the repository may shrink and '
558 b'already done; the size of the repository may shrink and '
559 b'various operations may become faster; the first time '
559 b'various operations may become faster; the first time '
560 b'this optimization is performed could slow down upgrade '
560 b'this optimization is performed could slow down upgrade '
561 b'execution considerably; subsequent invocations should '
561 b'execution considerably; subsequent invocations should '
562 b'not run noticeably slower'
562 b'not run noticeably slower'
563 ),
563 ),
564 upgrademessage=_(
564 upgrademessage=_(
565 b'deltas within internal storage will choose a new '
565 b'deltas within internal storage will choose a new '
566 b'base revision if needed'
566 b'base revision if needed'
567 ),
567 ),
568 )
568 )
569 )
569 )
570
570
571 optimizations.append(
571 optimizations.append(
572 improvement(
572 improvement(
573 name=b're-delta-multibase',
573 name=b're-delta-multibase',
574 type=optimisation,
574 type=optimisation,
575 description=_(
575 description=_(
576 b'deltas within internal storage will be recalculated '
576 b'deltas within internal storage will be recalculated '
577 b'against multiple base revision and the smallest '
577 b'against multiple base revision and the smallest '
578 b'difference will be used; the size of the repository may '
578 b'difference will be used; the size of the repository may '
579 b'shrink significantly when there are many merges; this '
579 b'shrink significantly when there are many merges; this '
580 b'optimization will slow down execution in proportion to '
580 b'optimization will slow down execution in proportion to '
581 b'the number of merges in the repository and the amount '
581 b'the number of merges in the repository and the amount '
582 b'of files in the repository; this slow down should not '
582 b'of files in the repository; this slow down should not '
583 b'be significant unless there are tens of thousands of '
583 b'be significant unless there are tens of thousands of '
584 b'files and thousands of merges'
584 b'files and thousands of merges'
585 ),
585 ),
586 upgrademessage=_(
586 upgrademessage=_(
587 b'deltas within internal storage will choose an '
587 b'deltas within internal storage will choose an '
588 b'optimal delta by computing deltas against multiple '
588 b'optimal delta by computing deltas against multiple '
589 b'parents; may slow down execution time '
589 b'parents; may slow down execution time '
590 b'significantly'
590 b'significantly'
591 ),
591 ),
592 )
592 )
593 )
593 )
594
594
595 optimizations.append(
595 optimizations.append(
596 improvement(
596 improvement(
597 name=b're-delta-all',
597 name=b're-delta-all',
598 type=optimisation,
598 type=optimisation,
599 description=_(
599 description=_(
600 b'deltas within internal storage will always be '
600 b'deltas within internal storage will always be '
601 b'recalculated without reusing prior deltas; this will '
601 b'recalculated without reusing prior deltas; this will '
602 b'likely make execution run several times slower; this '
602 b'likely make execution run several times slower; this '
603 b'optimization is typically not needed'
603 b'optimization is typically not needed'
604 ),
604 ),
605 upgrademessage=_(
605 upgrademessage=_(
606 b'deltas within internal storage will be fully '
606 b'deltas within internal storage will be fully '
607 b'recomputed; this will likely drastically slow down '
607 b'recomputed; this will likely drastically slow down '
608 b'execution time'
608 b'execution time'
609 ),
609 ),
610 )
610 )
611 )
611 )
612
612
613 optimizations.append(
613 optimizations.append(
614 improvement(
614 improvement(
615 name=b're-delta-fulladd',
615 name=b're-delta-fulladd',
616 type=optimisation,
616 type=optimisation,
617 description=_(
617 description=_(
618 b'every revision will be re-added as if it was new '
618 b'every revision will be re-added as if it was new '
619 b'content. It will go through the full storage '
619 b'content. It will go through the full storage '
620 b'mechanism giving extensions a chance to process it '
620 b'mechanism giving extensions a chance to process it '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 b'slower since more logic is involved.'
622 b'slower since more logic is involved.'
623 ),
623 ),
624 upgrademessage=_(
624 upgrademessage=_(
625 b'each revision will be added as new content to the '
625 b'each revision will be added as new content to the '
626 b'internal storage; this will likely drastically slow '
626 b'internal storage; this will likely drastically slow '
627 b'down execution time, but some extensions might need '
627 b'down execution time, but some extensions might need '
628 b'it'
628 b'it'
629 ),
629 ),
630 )
630 )
631 )
631 )
632
632
633 return optimizations
633 return optimizations
634
634
635
635
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
637 """Determine upgrade actions that will be performed.
637 """Determine upgrade actions that will be performed.
638
638
639 Given a list of improvements as returned by ``finddeficiencies`` and
639 Given a list of improvements as returned by ``finddeficiencies`` and
640 ``findoptimizations``, determine the list of upgrade actions that
640 ``findoptimizations``, determine the list of upgrade actions that
641 will be performed.
641 will be performed.
642
642
643 The role of this function is to filter improvements if needed, apply
643 The role of this function is to filter improvements if needed, apply
644 recommended optimizations from the improvements list that make sense,
644 recommended optimizations from the improvements list that make sense,
645 etc.
645 etc.
646
646
647 Returns a list of action names.
647 Returns a list of action names.
648 """
648 """
649 newactions = []
649 newactions = []
650
650
651 for d in deficiencies:
651 for d in deficiencies:
652 name = d._requirement
652 name = d._requirement
653
653
654 # If the action is a requirement that doesn't show up in the
654 # If the action is a requirement that doesn't show up in the
655 # destination requirements, prune the action.
655 # destination requirements, prune the action.
656 if name is not None and name not in destreqs:
656 if name is not None and name not in destreqs:
657 continue
657 continue
658
658
659 newactions.append(d)
659 newactions.append(d)
660
660
661 # FUTURE consider adding some optimizations here for certain transitions.
661 # FUTURE consider adding some optimizations here for certain transitions.
662 # e.g. adding generaldelta could schedule parent redeltas.
662 # e.g. adding generaldelta could schedule parent redeltas.
663
663
664 return newactions
664 return newactions
665
665
666
666
667 def _revlogfrompath(repo, path):
667 def _revlogfrompath(repo, path):
668 """Obtain a revlog from a repo path.
668 """Obtain a revlog from a repo path.
669
669
670 An instance of the appropriate class is returned.
670 An instance of the appropriate class is returned.
671 """
671 """
672 if path == b'00changelog.i':
672 if path == b'00changelog.i':
673 return changelog.changelog(repo.svfs)
673 return changelog.changelog(repo.svfs)
674 elif path.endswith(b'00manifest.i'):
674 elif path.endswith(b'00manifest.i'):
675 mandir = path[: -len(b'00manifest.i')]
675 mandir = path[: -len(b'00manifest.i')]
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
677 else:
677 else:
678 # reverse of "/".join(("data", path + ".i"))
678 # reverse of "/".join(("data", path + ".i"))
679 return filelog.filelog(repo.svfs, path[5:-2])
679 return filelog.filelog(repo.svfs, path[5:-2])
680
680
681
681
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
683 """copy all relevant files for `oldrl` into `destrepo` store
683 """copy all relevant files for `oldrl` into `destrepo` store
684
684
685 Files are copied "as is" without any transformation. The copy is performed
685 Files are copied "as is" without any transformation. The copy is performed
686 without extra checks. Callers are responsible for making sure the copied
686 without extra checks. Callers are responsible for making sure the copied
687 content is compatible with format of the destination repository.
687 content is compatible with format of the destination repository.
688 """
688 """
689 oldrl = getattr(oldrl, '_revlog', oldrl)
689 oldrl = getattr(oldrl, '_revlog', oldrl)
690 newrl = _revlogfrompath(destrepo, unencodedname)
690 newrl = _revlogfrompath(destrepo, unencodedname)
691 newrl = getattr(newrl, '_revlog', newrl)
691 newrl = getattr(newrl, '_revlog', newrl)
692
692
693 oldvfs = oldrl.opener
693 oldvfs = oldrl.opener
694 newvfs = newrl.opener
694 newvfs = newrl.opener
695 oldindex = oldvfs.join(oldrl.indexfile)
695 oldindex = oldvfs.join(oldrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
697 olddata = oldvfs.join(oldrl.datafile)
697 olddata = oldvfs.join(oldrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
699
699
700 with newvfs(newrl.indexfile, b'w'):
700 with newvfs(newrl.indexfile, b'w'):
701 pass # create all the directories
701 pass # create all the directories
702
702
703 util.copyfile(oldindex, newindex)
703 util.copyfile(oldindex, newindex)
704 copydata = oldrl.opener.exists(oldrl.datafile)
704 copydata = oldrl.opener.exists(oldrl.datafile)
705 if copydata:
705 if copydata:
706 util.copyfile(olddata, newdata)
706 util.copyfile(olddata, newdata)
707
707
708 if not (
708 if not (
709 unencodedname.endswith(b'00changelog.i')
709 unencodedname.endswith(b'00changelog.i')
710 or unencodedname.endswith(b'00manifest.i')
710 or unencodedname.endswith(b'00manifest.i')
711 ):
711 ):
712 destrepo.svfs.fncache.add(unencodedname)
712 destrepo.svfs.fncache.add(unencodedname)
713 if copydata:
713 if copydata:
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
715
715
716
716
717 UPGRADE_CHANGELOG = object()
717 UPGRADE_CHANGELOG = object()
718 UPGRADE_MANIFEST = object()
718 UPGRADE_MANIFEST = object()
719 UPGRADE_FILELOG = object()
719 UPGRADE_FILELOGS = object()
720
720
721 UPGRADE_ALL_REVLOGS = frozenset(
721 UPGRADE_ALL_REVLOGS = frozenset(
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
723 )
723 )
724
724
725
725
726 def getsidedatacompanion(srcrepo, dstrepo):
726 def getsidedatacompanion(srcrepo, dstrepo):
727 sidedatacompanion = None
727 sidedatacompanion = None
728 removedreqs = srcrepo.requirements - dstrepo.requirements
728 removedreqs = srcrepo.requirements - dstrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
731
731
732 def sidedatacompanion(rl, rev):
732 def sidedatacompanion(rl, rev):
733 rl = getattr(rl, '_revlog', rl)
733 rl = getattr(rl, '_revlog', rl)
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
735 return True, (), {}, 0, 0
735 return True, (), {}, 0, 0
736 return False, (), {}, 0, 0
736 return False, (), {}, 0, 0
737
737
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
742 return sidedatacompanion
742 return sidedatacompanion
743
743
744
744
745 def matchrevlog(revlogfilter, entry):
745 def matchrevlog(revlogfilter, entry):
746 """check if a revlog is selected for cloning.
746 """check if a revlog is selected for cloning.
747
747
748 In other words, are there any updates which need to be done on revlog
748 In other words, are there any updates which need to be done on revlog
749 or it can be blindly copied.
749 or it can be blindly copied.
750
750
751 The store entry is checked against the passed filter"""
751 The store entry is checked against the passed filter"""
752 if entry.endswith(b'00changelog.i'):
752 if entry.endswith(b'00changelog.i'):
753 return UPGRADE_CHANGELOG in revlogfilter
753 return UPGRADE_CHANGELOG in revlogfilter
754 elif entry.endswith(b'00manifest.i'):
754 elif entry.endswith(b'00manifest.i'):
755 return UPGRADE_MANIFEST in revlogfilter
755 return UPGRADE_MANIFEST in revlogfilter
756 return UPGRADE_FILELOG in revlogfilter
756 return UPGRADE_FILELOGS in revlogfilter
757
757
758
758
759 def _clonerevlogs(
759 def _clonerevlogs(
760 ui,
760 ui,
761 srcrepo,
761 srcrepo,
762 dstrepo,
762 dstrepo,
763 tr,
763 tr,
764 deltareuse,
764 deltareuse,
765 forcedeltabothparents,
765 forcedeltabothparents,
766 revlogs=UPGRADE_ALL_REVLOGS,
766 revlogs=UPGRADE_ALL_REVLOGS,
767 ):
767 ):
768 """Copy revlogs between 2 repos."""
768 """Copy revlogs between 2 repos."""
769 revcount = 0
769 revcount = 0
770 srcsize = 0
770 srcsize = 0
771 srcrawsize = 0
771 srcrawsize = 0
772 dstsize = 0
772 dstsize = 0
773 fcount = 0
773 fcount = 0
774 frevcount = 0
774 frevcount = 0
775 fsrcsize = 0
775 fsrcsize = 0
776 frawsize = 0
776 frawsize = 0
777 fdstsize = 0
777 fdstsize = 0
778 mcount = 0
778 mcount = 0
779 mrevcount = 0
779 mrevcount = 0
780 msrcsize = 0
780 msrcsize = 0
781 mrawsize = 0
781 mrawsize = 0
782 mdstsize = 0
782 mdstsize = 0
783 crevcount = 0
783 crevcount = 0
784 csrcsize = 0
784 csrcsize = 0
785 crawsize = 0
785 crawsize = 0
786 cdstsize = 0
786 cdstsize = 0
787
787
788 alldatafiles = list(srcrepo.store.walk())
788 alldatafiles = list(srcrepo.store.walk())
789
789
790 # Perform a pass to collect metadata. This validates we can open all
790 # Perform a pass to collect metadata. This validates we can open all
791 # source files and allows a unified progress bar to be displayed.
791 # source files and allows a unified progress bar to be displayed.
792 for unencoded, encoded, size in alldatafiles:
792 for unencoded, encoded, size in alldatafiles:
793 if unencoded.endswith(b'.d'):
793 if unencoded.endswith(b'.d'):
794 continue
794 continue
795
795
796 rl = _revlogfrompath(srcrepo, unencoded)
796 rl = _revlogfrompath(srcrepo, unencoded)
797
797
798 info = rl.storageinfo(
798 info = rl.storageinfo(
799 exclusivefiles=True,
799 exclusivefiles=True,
800 revisionscount=True,
800 revisionscount=True,
801 trackedsize=True,
801 trackedsize=True,
802 storedsize=True,
802 storedsize=True,
803 )
803 )
804
804
805 revcount += info[b'revisionscount'] or 0
805 revcount += info[b'revisionscount'] or 0
806 datasize = info[b'storedsize'] or 0
806 datasize = info[b'storedsize'] or 0
807 rawsize = info[b'trackedsize'] or 0
807 rawsize = info[b'trackedsize'] or 0
808
808
809 srcsize += datasize
809 srcsize += datasize
810 srcrawsize += rawsize
810 srcrawsize += rawsize
811
811
812 # This is for the separate progress bars.
812 # This is for the separate progress bars.
813 if isinstance(rl, changelog.changelog):
813 if isinstance(rl, changelog.changelog):
814 crevcount += len(rl)
814 crevcount += len(rl)
815 csrcsize += datasize
815 csrcsize += datasize
816 crawsize += rawsize
816 crawsize += rawsize
817 elif isinstance(rl, manifest.manifestrevlog):
817 elif isinstance(rl, manifest.manifestrevlog):
818 mcount += 1
818 mcount += 1
819 mrevcount += len(rl)
819 mrevcount += len(rl)
820 msrcsize += datasize
820 msrcsize += datasize
821 mrawsize += rawsize
821 mrawsize += rawsize
822 elif isinstance(rl, filelog.filelog):
822 elif isinstance(rl, filelog.filelog):
823 fcount += 1
823 fcount += 1
824 frevcount += len(rl)
824 frevcount += len(rl)
825 fsrcsize += datasize
825 fsrcsize += datasize
826 frawsize += rawsize
826 frawsize += rawsize
827 else:
827 else:
828 error.ProgrammingError(b'unknown revlog type')
828 error.ProgrammingError(b'unknown revlog type')
829
829
830 if not revcount:
830 if not revcount:
831 return
831 return
832
832
833 ui.status(
833 ui.status(
834 _(
834 _(
835 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
835 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
836 b'%d in changelog)\n'
836 b'%d in changelog)\n'
837 )
837 )
838 % (revcount, frevcount, mrevcount, crevcount)
838 % (revcount, frevcount, mrevcount, crevcount)
839 )
839 )
840 ui.status(
840 ui.status(
841 _(b'migrating %s in store; %s tracked data\n')
841 _(b'migrating %s in store; %s tracked data\n')
842 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
842 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
843 )
843 )
844
844
845 # Used to keep track of progress.
845 # Used to keep track of progress.
846 progress = None
846 progress = None
847
847
848 def oncopiedrevision(rl, rev, node):
848 def oncopiedrevision(rl, rev, node):
849 progress.increment()
849 progress.increment()
850
850
851 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
851 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
852
852
853 # Do the actual copying.
853 # Do the actual copying.
854 # FUTURE this operation can be farmed off to worker processes.
854 # FUTURE this operation can be farmed off to worker processes.
855 seen = set()
855 seen = set()
856 for unencoded, encoded, size in alldatafiles:
856 for unencoded, encoded, size in alldatafiles:
857 if unencoded.endswith(b'.d'):
857 if unencoded.endswith(b'.d'):
858 continue
858 continue
859
859
860 oldrl = _revlogfrompath(srcrepo, unencoded)
860 oldrl = _revlogfrompath(srcrepo, unencoded)
861
861
862 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
862 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
863 ui.status(
863 ui.status(
864 _(
864 _(
865 b'finished migrating %d manifest revisions across %d '
865 b'finished migrating %d manifest revisions across %d '
866 b'manifests; change in size: %s\n'
866 b'manifests; change in size: %s\n'
867 )
867 )
868 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
868 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
869 )
869 )
870
870
871 ui.status(
871 ui.status(
872 _(
872 _(
873 b'migrating changelog containing %d revisions '
873 b'migrating changelog containing %d revisions '
874 b'(%s in store; %s tracked data)\n'
874 b'(%s in store; %s tracked data)\n'
875 )
875 )
876 % (
876 % (
877 crevcount,
877 crevcount,
878 util.bytecount(csrcsize),
878 util.bytecount(csrcsize),
879 util.bytecount(crawsize),
879 util.bytecount(crawsize),
880 )
880 )
881 )
881 )
882 seen.add(b'c')
882 seen.add(b'c')
883 progress = srcrepo.ui.makeprogress(
883 progress = srcrepo.ui.makeprogress(
884 _(b'changelog revisions'), total=crevcount
884 _(b'changelog revisions'), total=crevcount
885 )
885 )
886 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
886 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
887 ui.status(
887 ui.status(
888 _(
888 _(
889 b'finished migrating %d filelog revisions across %d '
889 b'finished migrating %d filelog revisions across %d '
890 b'filelogs; change in size: %s\n'
890 b'filelogs; change in size: %s\n'
891 )
891 )
892 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
892 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
893 )
893 )
894
894
895 ui.status(
895 ui.status(
896 _(
896 _(
897 b'migrating %d manifests containing %d revisions '
897 b'migrating %d manifests containing %d revisions '
898 b'(%s in store; %s tracked data)\n'
898 b'(%s in store; %s tracked data)\n'
899 )
899 )
900 % (
900 % (
901 mcount,
901 mcount,
902 mrevcount,
902 mrevcount,
903 util.bytecount(msrcsize),
903 util.bytecount(msrcsize),
904 util.bytecount(mrawsize),
904 util.bytecount(mrawsize),
905 )
905 )
906 )
906 )
907 seen.add(b'm')
907 seen.add(b'm')
908 if progress:
908 if progress:
909 progress.complete()
909 progress.complete()
910 progress = srcrepo.ui.makeprogress(
910 progress = srcrepo.ui.makeprogress(
911 _(b'manifest revisions'), total=mrevcount
911 _(b'manifest revisions'), total=mrevcount
912 )
912 )
913 elif b'f' not in seen:
913 elif b'f' not in seen:
914 ui.status(
914 ui.status(
915 _(
915 _(
916 b'migrating %d filelogs containing %d revisions '
916 b'migrating %d filelogs containing %d revisions '
917 b'(%s in store; %s tracked data)\n'
917 b'(%s in store; %s tracked data)\n'
918 )
918 )
919 % (
919 % (
920 fcount,
920 fcount,
921 frevcount,
921 frevcount,
922 util.bytecount(fsrcsize),
922 util.bytecount(fsrcsize),
923 util.bytecount(frawsize),
923 util.bytecount(frawsize),
924 )
924 )
925 )
925 )
926 seen.add(b'f')
926 seen.add(b'f')
927 if progress:
927 if progress:
928 progress.complete()
928 progress.complete()
929 progress = srcrepo.ui.makeprogress(
929 progress = srcrepo.ui.makeprogress(
930 _(b'file revisions'), total=frevcount
930 _(b'file revisions'), total=frevcount
931 )
931 )
932
932
933 if matchrevlog(revlogs, unencoded):
933 if matchrevlog(revlogs, unencoded):
934 ui.note(
934 ui.note(
935 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
935 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
936 )
936 )
937 newrl = _revlogfrompath(dstrepo, unencoded)
937 newrl = _revlogfrompath(dstrepo, unencoded)
938 oldrl.clone(
938 oldrl.clone(
939 tr,
939 tr,
940 newrl,
940 newrl,
941 addrevisioncb=oncopiedrevision,
941 addrevisioncb=oncopiedrevision,
942 deltareuse=deltareuse,
942 deltareuse=deltareuse,
943 forcedeltabothparents=forcedeltabothparents,
943 forcedeltabothparents=forcedeltabothparents,
944 sidedatacompanion=sidedatacompanion,
944 sidedatacompanion=sidedatacompanion,
945 )
945 )
946 else:
946 else:
947 msg = _(b'blindly copying %s containing %i revisions\n')
947 msg = _(b'blindly copying %s containing %i revisions\n')
948 ui.note(msg % (unencoded, len(oldrl)))
948 ui.note(msg % (unencoded, len(oldrl)))
949 _copyrevlog(tr, dstrepo, oldrl, unencoded)
949 _copyrevlog(tr, dstrepo, oldrl, unencoded)
950
950
951 newrl = _revlogfrompath(dstrepo, unencoded)
951 newrl = _revlogfrompath(dstrepo, unencoded)
952
952
953 info = newrl.storageinfo(storedsize=True)
953 info = newrl.storageinfo(storedsize=True)
954 datasize = info[b'storedsize'] or 0
954 datasize = info[b'storedsize'] or 0
955
955
956 dstsize += datasize
956 dstsize += datasize
957
957
958 if isinstance(newrl, changelog.changelog):
958 if isinstance(newrl, changelog.changelog):
959 cdstsize += datasize
959 cdstsize += datasize
960 elif isinstance(newrl, manifest.manifestrevlog):
960 elif isinstance(newrl, manifest.manifestrevlog):
961 mdstsize += datasize
961 mdstsize += datasize
962 else:
962 else:
963 fdstsize += datasize
963 fdstsize += datasize
964
964
965 progress.complete()
965 progress.complete()
966
966
967 ui.status(
967 ui.status(
968 _(
968 _(
969 b'finished migrating %d changelog revisions; change in size: '
969 b'finished migrating %d changelog revisions; change in size: '
970 b'%s\n'
970 b'%s\n'
971 )
971 )
972 % (crevcount, util.bytecount(cdstsize - csrcsize))
972 % (crevcount, util.bytecount(cdstsize - csrcsize))
973 )
973 )
974
974
975 ui.status(
975 ui.status(
976 _(
976 _(
977 b'finished migrating %d total revisions; total change in store '
977 b'finished migrating %d total revisions; total change in store '
978 b'size: %s\n'
978 b'size: %s\n'
979 )
979 )
980 % (revcount, util.bytecount(dstsize - srcsize))
980 % (revcount, util.bytecount(dstsize - srcsize))
981 )
981 )
982
982
983
983
984 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
984 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
985 """Determine whether to copy a store file during upgrade.
985 """Determine whether to copy a store file during upgrade.
986
986
987 This function is called when migrating store files from ``srcrepo`` to
987 This function is called when migrating store files from ``srcrepo`` to
988 ``dstrepo`` as part of upgrading a repository.
988 ``dstrepo`` as part of upgrading a repository.
989
989
990 Args:
990 Args:
991 srcrepo: repo we are copying from
991 srcrepo: repo we are copying from
992 dstrepo: repo we are copying to
992 dstrepo: repo we are copying to
993 requirements: set of requirements for ``dstrepo``
993 requirements: set of requirements for ``dstrepo``
994 path: store file being examined
994 path: store file being examined
995 mode: the ``ST_MODE`` file type of ``path``
995 mode: the ``ST_MODE`` file type of ``path``
996 st: ``stat`` data structure for ``path``
996 st: ``stat`` data structure for ``path``
997
997
998 Function should return ``True`` if the file is to be copied.
998 Function should return ``True`` if the file is to be copied.
999 """
999 """
1000 # Skip revlogs.
1000 # Skip revlogs.
1001 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1001 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1002 return False
1002 return False
1003 # Skip transaction related files.
1003 # Skip transaction related files.
1004 if path.startswith(b'undo'):
1004 if path.startswith(b'undo'):
1005 return False
1005 return False
1006 # Only copy regular files.
1006 # Only copy regular files.
1007 if mode != stat.S_IFREG:
1007 if mode != stat.S_IFREG:
1008 return False
1008 return False
1009 # Skip other skipped files.
1009 # Skip other skipped files.
1010 if path in (b'lock', b'fncache'):
1010 if path in (b'lock', b'fncache'):
1011 return False
1011 return False
1012
1012
1013 return True
1013 return True
1014
1014
1015
1015
1016 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1016 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1017 """Hook point for extensions to perform additional actions during upgrade.
1017 """Hook point for extensions to perform additional actions during upgrade.
1018
1018
1019 This function is called after revlogs and store files have been copied but
1019 This function is called after revlogs and store files have been copied but
1020 before the new store is swapped into the original location.
1020 before the new store is swapped into the original location.
1021 """
1021 """
1022
1022
1023
1023
1024 def _upgraderepo(
1024 def _upgraderepo(
1025 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1025 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1026 ):
1026 ):
1027 """Do the low-level work of upgrading a repository.
1027 """Do the low-level work of upgrading a repository.
1028
1028
1029 The upgrade is effectively performed as a copy between a source
1029 The upgrade is effectively performed as a copy between a source
1030 repository and a temporary destination repository.
1030 repository and a temporary destination repository.
1031
1031
1032 The source repository is unmodified for as long as possible so the
1032 The source repository is unmodified for as long as possible so the
1033 upgrade can abort at any time without causing loss of service for
1033 upgrade can abort at any time without causing loss of service for
1034 readers and without corrupting the source repository.
1034 readers and without corrupting the source repository.
1035 """
1035 """
1036 assert srcrepo.currentwlock()
1036 assert srcrepo.currentwlock()
1037 assert dstrepo.currentwlock()
1037 assert dstrepo.currentwlock()
1038
1038
1039 ui.status(
1039 ui.status(
1040 _(
1040 _(
1041 b'(it is safe to interrupt this process any time before '
1041 b'(it is safe to interrupt this process any time before '
1042 b'data migration completes)\n'
1042 b'data migration completes)\n'
1043 )
1043 )
1044 )
1044 )
1045
1045
1046 if b're-delta-all' in actions:
1046 if b're-delta-all' in actions:
1047 deltareuse = revlog.revlog.DELTAREUSENEVER
1047 deltareuse = revlog.revlog.DELTAREUSENEVER
1048 elif b're-delta-parent' in actions:
1048 elif b're-delta-parent' in actions:
1049 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1049 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1050 elif b're-delta-multibase' in actions:
1050 elif b're-delta-multibase' in actions:
1051 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1051 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1052 elif b're-delta-fulladd' in actions:
1052 elif b're-delta-fulladd' in actions:
1053 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1053 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1054 else:
1054 else:
1055 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1055 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1056
1056
1057 with dstrepo.transaction(b'upgrade') as tr:
1057 with dstrepo.transaction(b'upgrade') as tr:
1058 _clonerevlogs(
1058 _clonerevlogs(
1059 ui,
1059 ui,
1060 srcrepo,
1060 srcrepo,
1061 dstrepo,
1061 dstrepo,
1062 tr,
1062 tr,
1063 deltareuse,
1063 deltareuse,
1064 b're-delta-multibase' in actions,
1064 b're-delta-multibase' in actions,
1065 revlogs=revlogs,
1065 revlogs=revlogs,
1066 )
1066 )
1067
1067
1068 # Now copy other files in the store directory.
1068 # Now copy other files in the store directory.
1069 # The sorted() makes execution deterministic.
1069 # The sorted() makes execution deterministic.
1070 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1070 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1071 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1071 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1072 continue
1072 continue
1073
1073
1074 srcrepo.ui.status(_(b'copying %s\n') % p)
1074 srcrepo.ui.status(_(b'copying %s\n') % p)
1075 src = srcrepo.store.rawvfs.join(p)
1075 src = srcrepo.store.rawvfs.join(p)
1076 dst = dstrepo.store.rawvfs.join(p)
1076 dst = dstrepo.store.rawvfs.join(p)
1077 util.copyfile(src, dst, copystat=True)
1077 util.copyfile(src, dst, copystat=True)
1078
1078
1079 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1079 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1080
1080
1081 ui.status(_(b'data fully migrated to temporary repository\n'))
1081 ui.status(_(b'data fully migrated to temporary repository\n'))
1082
1082
1083 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1083 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1084 backupvfs = vfsmod.vfs(backuppath)
1084 backupvfs = vfsmod.vfs(backuppath)
1085
1085
1086 # Make a backup of requires file first, as it is the first to be modified.
1086 # Make a backup of requires file first, as it is the first to be modified.
1087 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1087 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1088
1088
1089 # We install an arbitrary requirement that clients must not support
1089 # We install an arbitrary requirement that clients must not support
1090 # as a mechanism to lock out new clients during the data swap. This is
1090 # as a mechanism to lock out new clients during the data swap. This is
1091 # better than allowing a client to continue while the repository is in
1091 # better than allowing a client to continue while the repository is in
1092 # an inconsistent state.
1092 # an inconsistent state.
1093 ui.status(
1093 ui.status(
1094 _(
1094 _(
1095 b'marking source repository as being upgraded; clients will be '
1095 b'marking source repository as being upgraded; clients will be '
1096 b'unable to read from repository\n'
1096 b'unable to read from repository\n'
1097 )
1097 )
1098 )
1098 )
1099 scmutil.writereporequirements(
1099 scmutil.writereporequirements(
1100 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1100 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1101 )
1101 )
1102
1102
1103 ui.status(_(b'starting in-place swap of repository data\n'))
1103 ui.status(_(b'starting in-place swap of repository data\n'))
1104 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1104 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1105
1105
1106 # Now swap in the new store directory. Doing it as a rename should make
1106 # Now swap in the new store directory. Doing it as a rename should make
1107 # the operation nearly instantaneous and atomic (at least in well-behaved
1107 # the operation nearly instantaneous and atomic (at least in well-behaved
1108 # environments).
1108 # environments).
1109 ui.status(_(b'replacing store...\n'))
1109 ui.status(_(b'replacing store...\n'))
1110 tstart = util.timer()
1110 tstart = util.timer()
1111 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1111 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1112 util.rename(dstrepo.spath, srcrepo.spath)
1112 util.rename(dstrepo.spath, srcrepo.spath)
1113 elapsed = util.timer() - tstart
1113 elapsed = util.timer() - tstart
1114 ui.status(
1114 ui.status(
1115 _(
1115 _(
1116 b'store replacement complete; repository was inconsistent for '
1116 b'store replacement complete; repository was inconsistent for '
1117 b'%0.1fs\n'
1117 b'%0.1fs\n'
1118 )
1118 )
1119 % elapsed
1119 % elapsed
1120 )
1120 )
1121
1121
1122 # We first write the requirements file. Any new requirements will lock
1122 # We first write the requirements file. Any new requirements will lock
1123 # out legacy clients.
1123 # out legacy clients.
1124 ui.status(
1124 ui.status(
1125 _(
1125 _(
1126 b'finalizing requirements file and making repository readable '
1126 b'finalizing requirements file and making repository readable '
1127 b'again\n'
1127 b'again\n'
1128 )
1128 )
1129 )
1129 )
1130 scmutil.writereporequirements(srcrepo, requirements)
1130 scmutil.writereporequirements(srcrepo, requirements)
1131
1131
1132 # The lock file from the old store won't be removed because nothing has a
1132 # The lock file from the old store won't be removed because nothing has a
1133 # reference to its new location. So clean it up manually. Alternatively, we
1133 # reference to its new location. So clean it up manually. Alternatively, we
1134 # could update srcrepo.svfs and other variables to point to the new
1134 # could update srcrepo.svfs and other variables to point to the new
1135 # location. This is simpler.
1135 # location. This is simpler.
1136 backupvfs.unlink(b'store/lock')
1136 backupvfs.unlink(b'store/lock')
1137
1137
1138 return backuppath
1138 return backuppath
1139
1139
1140
1140
1141 def upgraderepo(
1141 def upgraderepo(
1142 ui,
1142 ui,
1143 repo,
1143 repo,
1144 run=False,
1144 run=False,
1145 optimize=None,
1145 optimize=None,
1146 backup=True,
1146 backup=True,
1147 manifest=None,
1147 manifest=None,
1148 changelog=None,
1148 changelog=None,
1149 ):
1149 ):
1150 """Upgrade a repository in place."""
1150 """Upgrade a repository in place."""
1151 if optimize is None:
1151 if optimize is None:
1152 optimize = []
1152 optimize = []
1153 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1153 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1154 repo = repo.unfiltered()
1154 repo = repo.unfiltered()
1155
1155
1156 revlogs = set(UPGRADE_ALL_REVLOGS)
1156 revlogs = set(UPGRADE_ALL_REVLOGS)
1157 specentries = ((b'c', changelog), (b'm', manifest))
1157 specentries = ((b'c', changelog), (b'm', manifest))
1158 specified = [(y, x) for (y, x) in specentries if x is not None]
1158 specified = [(y, x) for (y, x) in specentries if x is not None]
1159 if specified:
1159 if specified:
1160 # we have some limitation on revlogs to be recloned
1160 # we have some limitation on revlogs to be recloned
1161 if any(x for y, x in specified):
1161 if any(x for y, x in specified):
1162 revlogs = set()
1162 revlogs = set()
1163 for r, enabled in specified:
1163 for r, enabled in specified:
1164 if enabled:
1164 if enabled:
1165 if r == b'c':
1165 if r == b'c':
1166 revlogs.add(UPGRADE_CHANGELOG)
1166 revlogs.add(UPGRADE_CHANGELOG)
1167 elif r == b'm':
1167 elif r == b'm':
1168 revlogs.add(UPGRADE_MANIFEST)
1168 revlogs.add(UPGRADE_MANIFEST)
1169 else:
1169 else:
1170 # none are enabled
1170 # none are enabled
1171 for r, __ in specified:
1171 for r, __ in specified:
1172 if r == b'c':
1172 if r == b'c':
1173 revlogs.discard(UPGRADE_CHANGELOG)
1173 revlogs.discard(UPGRADE_CHANGELOG)
1174 elif r == b'm':
1174 elif r == b'm':
1175 revlogs.discard(UPGRADE_MANIFEST)
1175 revlogs.discard(UPGRADE_MANIFEST)
1176
1176
1177 # Ensure the repository can be upgraded.
1177 # Ensure the repository can be upgraded.
1178 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1178 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1179 if missingreqs:
1179 if missingreqs:
1180 raise error.Abort(
1180 raise error.Abort(
1181 _(b'cannot upgrade repository; requirement missing: %s')
1181 _(b'cannot upgrade repository; requirement missing: %s')
1182 % _(b', ').join(sorted(missingreqs))
1182 % _(b', ').join(sorted(missingreqs))
1183 )
1183 )
1184
1184
1185 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1185 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1186 if blockedreqs:
1186 if blockedreqs:
1187 raise error.Abort(
1187 raise error.Abort(
1188 _(
1188 _(
1189 b'cannot upgrade repository; unsupported source '
1189 b'cannot upgrade repository; unsupported source '
1190 b'requirement: %s'
1190 b'requirement: %s'
1191 )
1191 )
1192 % _(b', ').join(sorted(blockedreqs))
1192 % _(b', ').join(sorted(blockedreqs))
1193 )
1193 )
1194
1194
1195 # FUTURE there is potentially a need to control the wanted requirements via
1195 # FUTURE there is potentially a need to control the wanted requirements via
1196 # command arguments or via an extension hook point.
1196 # command arguments or via an extension hook point.
1197 newreqs = localrepo.newreporequirements(
1197 newreqs = localrepo.newreporequirements(
1198 repo.ui, localrepo.defaultcreateopts(repo.ui)
1198 repo.ui, localrepo.defaultcreateopts(repo.ui)
1199 )
1199 )
1200 newreqs.update(preservedrequirements(repo))
1200 newreqs.update(preservedrequirements(repo))
1201
1201
1202 noremovereqs = (
1202 noremovereqs = (
1203 repo.requirements - newreqs - supportremovedrequirements(repo)
1203 repo.requirements - newreqs - supportremovedrequirements(repo)
1204 )
1204 )
1205 if noremovereqs:
1205 if noremovereqs:
1206 raise error.Abort(
1206 raise error.Abort(
1207 _(
1207 _(
1208 b'cannot upgrade repository; requirement would be '
1208 b'cannot upgrade repository; requirement would be '
1209 b'removed: %s'
1209 b'removed: %s'
1210 )
1210 )
1211 % _(b', ').join(sorted(noremovereqs))
1211 % _(b', ').join(sorted(noremovereqs))
1212 )
1212 )
1213
1213
1214 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1214 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1215 if noaddreqs:
1215 if noaddreqs:
1216 raise error.Abort(
1216 raise error.Abort(
1217 _(
1217 _(
1218 b'cannot upgrade repository; do not support adding '
1218 b'cannot upgrade repository; do not support adding '
1219 b'requirement: %s'
1219 b'requirement: %s'
1220 )
1220 )
1221 % _(b', ').join(sorted(noaddreqs))
1221 % _(b', ').join(sorted(noaddreqs))
1222 )
1222 )
1223
1223
1224 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1224 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1225 if unsupportedreqs:
1225 if unsupportedreqs:
1226 raise error.Abort(
1226 raise error.Abort(
1227 _(
1227 _(
1228 b'cannot upgrade repository; do not support '
1228 b'cannot upgrade repository; do not support '
1229 b'destination requirement: %s'
1229 b'destination requirement: %s'
1230 )
1230 )
1231 % _(b', ').join(sorted(unsupportedreqs))
1231 % _(b', ').join(sorted(unsupportedreqs))
1232 )
1232 )
1233
1233
1234 # Find and validate all improvements that can be made.
1234 # Find and validate all improvements that can be made.
1235 alloptimizations = findoptimizations(repo)
1235 alloptimizations = findoptimizations(repo)
1236
1236
1237 # Apply and Validate arguments.
1237 # Apply and Validate arguments.
1238 optimizations = []
1238 optimizations = []
1239 for o in alloptimizations:
1239 for o in alloptimizations:
1240 if o.name in optimize:
1240 if o.name in optimize:
1241 optimizations.append(o)
1241 optimizations.append(o)
1242 optimize.discard(o.name)
1242 optimize.discard(o.name)
1243
1243
1244 if optimize: # anything left is unknown
1244 if optimize: # anything left is unknown
1245 raise error.Abort(
1245 raise error.Abort(
1246 _(b'unknown optimization action requested: %s')
1246 _(b'unknown optimization action requested: %s')
1247 % b', '.join(sorted(optimize)),
1247 % b', '.join(sorted(optimize)),
1248 hint=_(b'run without arguments to see valid optimizations'),
1248 hint=_(b'run without arguments to see valid optimizations'),
1249 )
1249 )
1250
1250
1251 deficiencies = finddeficiencies(repo)
1251 deficiencies = finddeficiencies(repo)
1252 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1252 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1253 actions.extend(
1253 actions.extend(
1254 o
1254 o
1255 for o in sorted(optimizations)
1255 for o in sorted(optimizations)
1256 # determineactions could have added optimisation
1256 # determineactions could have added optimisation
1257 if o not in actions
1257 if o not in actions
1258 )
1258 )
1259
1259
1260 removedreqs = repo.requirements - newreqs
1260 removedreqs = repo.requirements - newreqs
1261 addedreqs = newreqs - repo.requirements
1261 addedreqs = newreqs - repo.requirements
1262
1262
1263 if revlogs != UPGRADE_ALL_REVLOGS:
1263 if revlogs != UPGRADE_ALL_REVLOGS:
1264 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1264 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1265 if incompatible:
1265 if incompatible:
1266 msg = _(
1266 msg = _(
1267 b'ignoring revlogs selection flags, format requirements '
1267 b'ignoring revlogs selection flags, format requirements '
1268 b'change: %s\n'
1268 b'change: %s\n'
1269 )
1269 )
1270 ui.warn(msg % b', '.join(sorted(incompatible)))
1270 ui.warn(msg % b', '.join(sorted(incompatible)))
1271 revlogs = UPGRADE_ALL_REVLOGS
1271 revlogs = UPGRADE_ALL_REVLOGS
1272
1272
1273 def write_labeled(l, label):
1273 def write_labeled(l, label):
1274 first = True
1274 first = True
1275 for r in sorted(l):
1275 for r in sorted(l):
1276 if not first:
1276 if not first:
1277 ui.write(b', ')
1277 ui.write(b', ')
1278 ui.write(r, label=label)
1278 ui.write(r, label=label)
1279 first = False
1279 first = False
1280
1280
1281 def printrequirements():
1281 def printrequirements():
1282 ui.write(_(b'requirements\n'))
1282 ui.write(_(b'requirements\n'))
1283 ui.write(_(b' preserved: '))
1283 ui.write(_(b' preserved: '))
1284 write_labeled(
1284 write_labeled(
1285 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1285 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1286 )
1286 )
1287 ui.write((b'\n'))
1287 ui.write((b'\n'))
1288 removed = repo.requirements - newreqs
1288 removed = repo.requirements - newreqs
1289 if repo.requirements - newreqs:
1289 if repo.requirements - newreqs:
1290 ui.write(_(b' removed: '))
1290 ui.write(_(b' removed: '))
1291 write_labeled(removed, "upgrade-repo.requirement.removed")
1291 write_labeled(removed, "upgrade-repo.requirement.removed")
1292 ui.write((b'\n'))
1292 ui.write((b'\n'))
1293 added = newreqs - repo.requirements
1293 added = newreqs - repo.requirements
1294 if added:
1294 if added:
1295 ui.write(_(b' added: '))
1295 ui.write(_(b' added: '))
1296 write_labeled(added, "upgrade-repo.requirement.added")
1296 write_labeled(added, "upgrade-repo.requirement.added")
1297 ui.write((b'\n'))
1297 ui.write((b'\n'))
1298 ui.write(b'\n')
1298 ui.write(b'\n')
1299
1299
1300 def printoptimisations():
1300 def printoptimisations():
1301 optimisations = [a for a in actions if a.type == optimisation]
1301 optimisations = [a for a in actions if a.type == optimisation]
1302 optimisations.sort(key=lambda a: a.name)
1302 optimisations.sort(key=lambda a: a.name)
1303 if optimisations:
1303 if optimisations:
1304 ui.write(_(b'optimisations: '))
1304 ui.write(_(b'optimisations: '))
1305 write_labeled(
1305 write_labeled(
1306 [a.name for a in optimisations],
1306 [a.name for a in optimisations],
1307 "upgrade-repo.optimisation.performed",
1307 "upgrade-repo.optimisation.performed",
1308 )
1308 )
1309 ui.write(b'\n\n')
1309 ui.write(b'\n\n')
1310
1310
1311 def printupgradeactions():
1311 def printupgradeactions():
1312 for a in actions:
1312 for a in actions:
1313 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1313 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1314
1314
1315 if not run:
1315 if not run:
1316 fromconfig = []
1316 fromconfig = []
1317 onlydefault = []
1317 onlydefault = []
1318
1318
1319 for d in deficiencies:
1319 for d in deficiencies:
1320 if d.fromconfig(repo):
1320 if d.fromconfig(repo):
1321 fromconfig.append(d)
1321 fromconfig.append(d)
1322 elif d.default:
1322 elif d.default:
1323 onlydefault.append(d)
1323 onlydefault.append(d)
1324
1324
1325 if fromconfig or onlydefault:
1325 if fromconfig or onlydefault:
1326
1326
1327 if fromconfig:
1327 if fromconfig:
1328 ui.status(
1328 ui.status(
1329 _(
1329 _(
1330 b'repository lacks features recommended by '
1330 b'repository lacks features recommended by '
1331 b'current config options:\n\n'
1331 b'current config options:\n\n'
1332 )
1332 )
1333 )
1333 )
1334 for i in fromconfig:
1334 for i in fromconfig:
1335 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1335 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1336
1336
1337 if onlydefault:
1337 if onlydefault:
1338 ui.status(
1338 ui.status(
1339 _(
1339 _(
1340 b'repository lacks features used by the default '
1340 b'repository lacks features used by the default '
1341 b'config options:\n\n'
1341 b'config options:\n\n'
1342 )
1342 )
1343 )
1343 )
1344 for i in onlydefault:
1344 for i in onlydefault:
1345 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1345 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1346
1346
1347 ui.status(b'\n')
1347 ui.status(b'\n')
1348 else:
1348 else:
1349 ui.status(
1349 ui.status(
1350 _(
1350 _(
1351 b'(no feature deficiencies found in existing '
1351 b'(no feature deficiencies found in existing '
1352 b'repository)\n'
1352 b'repository)\n'
1353 )
1353 )
1354 )
1354 )
1355
1355
1356 ui.status(
1356 ui.status(
1357 _(
1357 _(
1358 b'performing an upgrade with "--run" will make the following '
1358 b'performing an upgrade with "--run" will make the following '
1359 b'changes:\n\n'
1359 b'changes:\n\n'
1360 )
1360 )
1361 )
1361 )
1362
1362
1363 printrequirements()
1363 printrequirements()
1364 printoptimisations()
1364 printoptimisations()
1365 printupgradeactions()
1365 printupgradeactions()
1366
1366
1367 unusedoptimize = [i for i in alloptimizations if i not in actions]
1367 unusedoptimize = [i for i in alloptimizations if i not in actions]
1368
1368
1369 if unusedoptimize:
1369 if unusedoptimize:
1370 ui.status(
1370 ui.status(
1371 _(
1371 _(
1372 b'additional optimizations are available by specifying '
1372 b'additional optimizations are available by specifying '
1373 b'"--optimize <name>":\n\n'
1373 b'"--optimize <name>":\n\n'
1374 )
1374 )
1375 )
1375 )
1376 for i in unusedoptimize:
1376 for i in unusedoptimize:
1377 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1377 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1378 return
1378 return
1379
1379
1380 # Else we're in the run=true case.
1380 # Else we're in the run=true case.
1381 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1381 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1382 printrequirements()
1382 printrequirements()
1383 printoptimisations()
1383 printoptimisations()
1384 printupgradeactions()
1384 printupgradeactions()
1385
1385
1386 upgradeactions = [a.name for a in actions]
1386 upgradeactions = [a.name for a in actions]
1387
1387
1388 ui.status(_(b'beginning upgrade...\n'))
1388 ui.status(_(b'beginning upgrade...\n'))
1389 with repo.wlock(), repo.lock():
1389 with repo.wlock(), repo.lock():
1390 ui.status(_(b'repository locked and read-only\n'))
1390 ui.status(_(b'repository locked and read-only\n'))
1391 # Our strategy for upgrading the repository is to create a new,
1391 # Our strategy for upgrading the repository is to create a new,
1392 # temporary repository, write data to it, then do a swap of the
1392 # temporary repository, write data to it, then do a swap of the
1393 # data. There are less heavyweight ways to do this, but it is easier
1393 # data. There are less heavyweight ways to do this, but it is easier
1394 # to create a new repo object than to instantiate all the components
1394 # to create a new repo object than to instantiate all the components
1395 # (like the store) separately.
1395 # (like the store) separately.
1396 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1396 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1397 backuppath = None
1397 backuppath = None
1398 try:
1398 try:
1399 ui.status(
1399 ui.status(
1400 _(
1400 _(
1401 b'creating temporary repository to stage migrated '
1401 b'creating temporary repository to stage migrated '
1402 b'data: %s\n'
1402 b'data: %s\n'
1403 )
1403 )
1404 % tmppath
1404 % tmppath
1405 )
1405 )
1406
1406
1407 # clone ui without using ui.copy because repo.ui is protected
1407 # clone ui without using ui.copy because repo.ui is protected
1408 repoui = repo.ui.__class__(repo.ui)
1408 repoui = repo.ui.__class__(repo.ui)
1409 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1409 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1410
1410
1411 with dstrepo.wlock(), dstrepo.lock():
1411 with dstrepo.wlock(), dstrepo.lock():
1412 backuppath = _upgraderepo(
1412 backuppath = _upgraderepo(
1413 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1413 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1414 )
1414 )
1415 if not (backup or backuppath is None):
1415 if not (backup or backuppath is None):
1416 ui.status(
1416 ui.status(
1417 _(b'removing old repository content%s\n') % backuppath
1417 _(b'removing old repository content%s\n') % backuppath
1418 )
1418 )
1419 repo.vfs.rmtree(backuppath, forcibly=True)
1419 repo.vfs.rmtree(backuppath, forcibly=True)
1420 backuppath = None
1420 backuppath = None
1421
1421
1422 finally:
1422 finally:
1423 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1423 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1424 repo.vfs.rmtree(tmppath, forcibly=True)
1424 repo.vfs.rmtree(tmppath, forcibly=True)
1425
1425
1426 if backuppath and not ui.quiet:
1426 if backuppath and not ui.quiet:
1427 ui.warn(
1427 ui.warn(
1428 _(b'copy of old repository backed up at %s\n') % backuppath
1428 _(b'copy of old repository backed up at %s\n') % backuppath
1429 )
1429 )
1430 ui.warn(
1430 ui.warn(
1431 _(
1431 _(
1432 b'the old repository will not be deleted; remove '
1432 b'the old repository will not be deleted; remove '
1433 b'it to free up disk space once the upgraded '
1433 b'it to free up disk space once the upgraded '
1434 b'repository is verified\n'
1434 b'repository is verified\n'
1435 )
1435 )
1436 )
1436 )
General Comments 0
You need to be logged in to leave comments. Login now