##// END OF EJS Templates
upgrade: improve documentation of matchrevlog()...
Pulkit Goyal -
r46286:d1c10d33 default
parent child Browse files
Show More
@@ -1,1433 +1,1436 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 requirements.SHARED_REQUIREMENT,
67 requirements.SHARED_REQUIREMENT,
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 requirements.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 }
83 }
84 for name in compression.compengines:
84 for name in compression.compengines:
85 engine = compression.compengines[name]
85 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
86 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
87 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
88 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
89 supported.add(b'revlog-compression-zstd')
90 return supported
90 return supported
91
91
92
92
93 def supporteddestrequirements(repo):
93 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
94 """Obtain requirements that upgrade supports in the destination.
95
95
96 If the result of the upgrade would create requirements not in this set,
96 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
97 the upgrade is disallowed.
98
98
99 Extensions should monkeypatch this to add their custom requirements.
99 Extensions should monkeypatch this to add their custom requirements.
100 """
100 """
101 supported = {
101 supported = {
102 b'dotencode',
102 b'dotencode',
103 b'fncache',
103 b'fncache',
104 b'generaldelta',
104 b'generaldelta',
105 b'revlogv1',
105 b'revlogv1',
106 b'store',
106 b'store',
107 requirements.SPARSEREVLOG_REQUIREMENT,
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
112 }
112 }
113 for name in compression.compengines:
113 for name in compression.compengines:
114 engine = compression.compengines[name]
114 engine = compression.compengines[name]
115 if engine.available() and engine.revlogheader():
115 if engine.available() and engine.revlogheader():
116 supported.add(b'exp-compression-%s' % name)
116 supported.add(b'exp-compression-%s' % name)
117 if engine.name() == b'zstd':
117 if engine.name() == b'zstd':
118 supported.add(b'revlog-compression-zstd')
118 supported.add(b'revlog-compression-zstd')
119 return supported
119 return supported
120
120
121
121
122 def allowednewrequirements(repo):
122 def allowednewrequirements(repo):
123 """Obtain requirements that can be added to a repository during upgrade.
123 """Obtain requirements that can be added to a repository during upgrade.
124
124
125 This is used to disallow proposed requirements from being added when
125 This is used to disallow proposed requirements from being added when
126 they weren't present before.
126 they weren't present before.
127
127
128 We use a list of allowed requirement additions instead of a list of known
128 We use a list of allowed requirement additions instead of a list of known
129 bad additions because the whitelist approach is safer and will prevent
129 bad additions because the whitelist approach is safer and will prevent
130 future, unknown requirements from accidentally being added.
130 future, unknown requirements from accidentally being added.
131 """
131 """
132 supported = {
132 supported = {
133 b'dotencode',
133 b'dotencode',
134 b'fncache',
134 b'fncache',
135 b'generaldelta',
135 b'generaldelta',
136 requirements.SPARSEREVLOG_REQUIREMENT,
136 requirements.SPARSEREVLOG_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
140 }
140 }
141 for name in compression.compengines:
141 for name in compression.compengines:
142 engine = compression.compengines[name]
142 engine = compression.compengines[name]
143 if engine.available() and engine.revlogheader():
143 if engine.available() and engine.revlogheader():
144 supported.add(b'exp-compression-%s' % name)
144 supported.add(b'exp-compression-%s' % name)
145 if engine.name() == b'zstd':
145 if engine.name() == b'zstd':
146 supported.add(b'revlog-compression-zstd')
146 supported.add(b'revlog-compression-zstd')
147 return supported
147 return supported
148
148
149
149
150 def preservedrequirements(repo):
150 def preservedrequirements(repo):
151 return set()
151 return set()
152
152
153
153
154 deficiency = b'deficiency'
154 deficiency = b'deficiency'
155 optimisation = b'optimization'
155 optimisation = b'optimization'
156
156
157
157
158 class improvement(object):
158 class improvement(object):
159 """Represents an improvement that can be made as part of an upgrade.
159 """Represents an improvement that can be made as part of an upgrade.
160
160
161 The following attributes are defined on each instance:
161 The following attributes are defined on each instance:
162
162
163 name
163 name
164 Machine-readable string uniquely identifying this improvement. It
164 Machine-readable string uniquely identifying this improvement. It
165 will be mapped to an action later in the upgrade process.
165 will be mapped to an action later in the upgrade process.
166
166
167 type
167 type
168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
169 problem. An optimization is an action (sometimes optional) that
169 problem. An optimization is an action (sometimes optional) that
170 can be taken to further improve the state of the repository.
170 can be taken to further improve the state of the repository.
171
171
172 description
172 description
173 Message intended for humans explaining the improvement in more detail,
173 Message intended for humans explaining the improvement in more detail,
174 including the implications of it. For ``deficiency`` types, should be
174 including the implications of it. For ``deficiency`` types, should be
175 worded in the present tense. For ``optimisation`` types, should be
175 worded in the present tense. For ``optimisation`` types, should be
176 worded in the future tense.
176 worded in the future tense.
177
177
178 upgrademessage
178 upgrademessage
179 Message intended for humans explaining what an upgrade addressing this
179 Message intended for humans explaining what an upgrade addressing this
180 issue will do. Should be worded in the future tense.
180 issue will do. Should be worded in the future tense.
181 """
181 """
182
182
183 def __init__(self, name, type, description, upgrademessage):
183 def __init__(self, name, type, description, upgrademessage):
184 self.name = name
184 self.name = name
185 self.type = type
185 self.type = type
186 self.description = description
186 self.description = description
187 self.upgrademessage = upgrademessage
187 self.upgrademessage = upgrademessage
188
188
189 def __eq__(self, other):
189 def __eq__(self, other):
190 if not isinstance(other, improvement):
190 if not isinstance(other, improvement):
191 # This is what python tell use to do
191 # This is what python tell use to do
192 return NotImplemented
192 return NotImplemented
193 return self.name == other.name
193 return self.name == other.name
194
194
195 def __ne__(self, other):
195 def __ne__(self, other):
196 return not (self == other)
196 return not (self == other)
197
197
198 def __hash__(self):
198 def __hash__(self):
199 return hash(self.name)
199 return hash(self.name)
200
200
201
201
202 allformatvariant = []
202 allformatvariant = []
203
203
204
204
205 def registerformatvariant(cls):
205 def registerformatvariant(cls):
206 allformatvariant.append(cls)
206 allformatvariant.append(cls)
207 return cls
207 return cls
208
208
209
209
210 class formatvariant(improvement):
210 class formatvariant(improvement):
211 """an improvement subclass dedicated to repository format"""
211 """an improvement subclass dedicated to repository format"""
212
212
213 type = deficiency
213 type = deficiency
214 ### The following attributes should be defined for each class:
214 ### The following attributes should be defined for each class:
215
215
216 # machine-readable string uniquely identifying this improvement. it will be
216 # machine-readable string uniquely identifying this improvement. it will be
217 # mapped to an action later in the upgrade process.
217 # mapped to an action later in the upgrade process.
218 name = None
218 name = None
219
219
220 # message intended for humans explaining the improvement in more detail,
220 # message intended for humans explaining the improvement in more detail,
221 # including the implications of it ``deficiency`` types, should be worded
221 # including the implications of it ``deficiency`` types, should be worded
222 # in the present tense.
222 # in the present tense.
223 description = None
223 description = None
224
224
225 # message intended for humans explaining what an upgrade addressing this
225 # message intended for humans explaining what an upgrade addressing this
226 # issue will do. should be worded in the future tense.
226 # issue will do. should be worded in the future tense.
227 upgrademessage = None
227 upgrademessage = None
228
228
229 # value of current Mercurial default for new repository
229 # value of current Mercurial default for new repository
230 default = None
230 default = None
231
231
232 def __init__(self):
232 def __init__(self):
233 raise NotImplementedError()
233 raise NotImplementedError()
234
234
235 @staticmethod
235 @staticmethod
236 def fromrepo(repo):
236 def fromrepo(repo):
237 """current value of the variant in the repository"""
237 """current value of the variant in the repository"""
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 @staticmethod
240 @staticmethod
241 def fromconfig(repo):
241 def fromconfig(repo):
242 """current value of the variant in the configuration"""
242 """current value of the variant in the configuration"""
243 raise NotImplementedError()
243 raise NotImplementedError()
244
244
245
245
246 class requirementformatvariant(formatvariant):
246 class requirementformatvariant(formatvariant):
247 """formatvariant based on a 'requirement' name.
247 """formatvariant based on a 'requirement' name.
248
248
249 Many format variant are controlled by a 'requirement'. We define a small
249 Many format variant are controlled by a 'requirement'. We define a small
250 subclass to factor the code.
250 subclass to factor the code.
251 """
251 """
252
252
253 # the requirement that control this format variant
253 # the requirement that control this format variant
254 _requirement = None
254 _requirement = None
255
255
256 @staticmethod
256 @staticmethod
257 def _newreporequirements(ui):
257 def _newreporequirements(ui):
258 return localrepo.newreporequirements(
258 return localrepo.newreporequirements(
259 ui, localrepo.defaultcreateopts(ui)
259 ui, localrepo.defaultcreateopts(ui)
260 )
260 )
261
261
262 @classmethod
262 @classmethod
263 def fromrepo(cls, repo):
263 def fromrepo(cls, repo):
264 assert cls._requirement is not None
264 assert cls._requirement is not None
265 return cls._requirement in repo.requirements
265 return cls._requirement in repo.requirements
266
266
267 @classmethod
267 @classmethod
268 def fromconfig(cls, repo):
268 def fromconfig(cls, repo):
269 assert cls._requirement is not None
269 assert cls._requirement is not None
270 return cls._requirement in cls._newreporequirements(repo.ui)
270 return cls._requirement in cls._newreporequirements(repo.ui)
271
271
272
272
273 @registerformatvariant
273 @registerformatvariant
274 class fncache(requirementformatvariant):
274 class fncache(requirementformatvariant):
275 name = b'fncache'
275 name = b'fncache'
276
276
277 _requirement = b'fncache'
277 _requirement = b'fncache'
278
278
279 default = True
279 default = True
280
280
281 description = _(
281 description = _(
282 b'long and reserved filenames may not work correctly; '
282 b'long and reserved filenames may not work correctly; '
283 b'repository performance is sub-optimal'
283 b'repository performance is sub-optimal'
284 )
284 )
285
285
286 upgrademessage = _(
286 upgrademessage = _(
287 b'repository will be more resilient to storing '
287 b'repository will be more resilient to storing '
288 b'certain paths and performance of certain '
288 b'certain paths and performance of certain '
289 b'operations should be improved'
289 b'operations should be improved'
290 )
290 )
291
291
292
292
293 @registerformatvariant
293 @registerformatvariant
294 class dotencode(requirementformatvariant):
294 class dotencode(requirementformatvariant):
295 name = b'dotencode'
295 name = b'dotencode'
296
296
297 _requirement = b'dotencode'
297 _requirement = b'dotencode'
298
298
299 default = True
299 default = True
300
300
301 description = _(
301 description = _(
302 b'storage of filenames beginning with a period or '
302 b'storage of filenames beginning with a period or '
303 b'space may not work correctly'
303 b'space may not work correctly'
304 )
304 )
305
305
306 upgrademessage = _(
306 upgrademessage = _(
307 b'repository will be better able to store files '
307 b'repository will be better able to store files '
308 b'beginning with a space or period'
308 b'beginning with a space or period'
309 )
309 )
310
310
311
311
312 @registerformatvariant
312 @registerformatvariant
313 class generaldelta(requirementformatvariant):
313 class generaldelta(requirementformatvariant):
314 name = b'generaldelta'
314 name = b'generaldelta'
315
315
316 _requirement = b'generaldelta'
316 _requirement = b'generaldelta'
317
317
318 default = True
318 default = True
319
319
320 description = _(
320 description = _(
321 b'deltas within internal storage are unable to '
321 b'deltas within internal storage are unable to '
322 b'choose optimal revisions; repository is larger and '
322 b'choose optimal revisions; repository is larger and '
323 b'slower than it could be; interaction with other '
323 b'slower than it could be; interaction with other '
324 b'repositories may require extra network and CPU '
324 b'repositories may require extra network and CPU '
325 b'resources, making "hg push" and "hg pull" slower'
325 b'resources, making "hg push" and "hg pull" slower'
326 )
326 )
327
327
328 upgrademessage = _(
328 upgrademessage = _(
329 b'repository storage will be able to create '
329 b'repository storage will be able to create '
330 b'optimal deltas; new repository data will be '
330 b'optimal deltas; new repository data will be '
331 b'smaller and read times should decrease; '
331 b'smaller and read times should decrease; '
332 b'interacting with other repositories using this '
332 b'interacting with other repositories using this '
333 b'storage model should require less network and '
333 b'storage model should require less network and '
334 b'CPU resources, making "hg push" and "hg pull" '
334 b'CPU resources, making "hg push" and "hg pull" '
335 b'faster'
335 b'faster'
336 )
336 )
337
337
338
338
339 @registerformatvariant
339 @registerformatvariant
340 class sparserevlog(requirementformatvariant):
340 class sparserevlog(requirementformatvariant):
341 name = b'sparserevlog'
341 name = b'sparserevlog'
342
342
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
344
344
345 default = True
345 default = True
346
346
347 description = _(
347 description = _(
348 b'in order to limit disk reading and memory usage on older '
348 b'in order to limit disk reading and memory usage on older '
349 b'version, the span of a delta chain from its root to its '
349 b'version, the span of a delta chain from its root to its '
350 b'end is limited, whatever the relevant data in this span. '
350 b'end is limited, whatever the relevant data in this span. '
351 b'This can severly limit Mercurial ability to build good '
351 b'This can severly limit Mercurial ability to build good '
352 b'chain of delta resulting is much more storage space being '
352 b'chain of delta resulting is much more storage space being '
353 b'taken and limit reusability of on disk delta during '
353 b'taken and limit reusability of on disk delta during '
354 b'exchange.'
354 b'exchange.'
355 )
355 )
356
356
357 upgrademessage = _(
357 upgrademessage = _(
358 b'Revlog supports delta chain with more unused data '
358 b'Revlog supports delta chain with more unused data '
359 b'between payload. These gaps will be skipped at read '
359 b'between payload. These gaps will be skipped at read '
360 b'time. This allows for better delta chains, making a '
360 b'time. This allows for better delta chains, making a '
361 b'better compression and faster exchange with server.'
361 b'better compression and faster exchange with server.'
362 )
362 )
363
363
364
364
365 @registerformatvariant
365 @registerformatvariant
366 class sidedata(requirementformatvariant):
366 class sidedata(requirementformatvariant):
367 name = b'sidedata'
367 name = b'sidedata'
368
368
369 _requirement = requirements.SIDEDATA_REQUIREMENT
369 _requirement = requirements.SIDEDATA_REQUIREMENT
370
370
371 default = False
371 default = False
372
372
373 description = _(
373 description = _(
374 b'Allows storage of extra data alongside a revision, '
374 b'Allows storage of extra data alongside a revision, '
375 b'unlocking various caching options.'
375 b'unlocking various caching options.'
376 )
376 )
377
377
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
379
379
380
380
381 @registerformatvariant
381 @registerformatvariant
382 class persistentnodemap(requirementformatvariant):
382 class persistentnodemap(requirementformatvariant):
383 name = b'persistent-nodemap'
383 name = b'persistent-nodemap'
384
384
385 _requirement = requirements.NODEMAP_REQUIREMENT
385 _requirement = requirements.NODEMAP_REQUIREMENT
386
386
387 default = False
387 default = False
388
388
389 description = _(
389 description = _(
390 b'persist the node -> rev mapping on disk to speedup lookup'
390 b'persist the node -> rev mapping on disk to speedup lookup'
391 )
391 )
392
392
393 upgrademessage = _(b'Speedup revision lookup by node id.')
393 upgrademessage = _(b'Speedup revision lookup by node id.')
394
394
395
395
396 @registerformatvariant
396 @registerformatvariant
397 class copiessdc(requirementformatvariant):
397 class copiessdc(requirementformatvariant):
398 name = b'copies-sdc'
398 name = b'copies-sdc'
399
399
400 _requirement = requirements.COPIESSDC_REQUIREMENT
400 _requirement = requirements.COPIESSDC_REQUIREMENT
401
401
402 default = False
402 default = False
403
403
404 description = _(b'Stores copies information alongside changesets.')
404 description = _(b'Stores copies information alongside changesets.')
405
405
406 upgrademessage = _(
406 upgrademessage = _(
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
408 )
408 )
409
409
410
410
411 @registerformatvariant
411 @registerformatvariant
412 class removecldeltachain(formatvariant):
412 class removecldeltachain(formatvariant):
413 name = b'plain-cl-delta'
413 name = b'plain-cl-delta'
414
414
415 default = True
415 default = True
416
416
417 description = _(
417 description = _(
418 b'changelog storage is using deltas instead of '
418 b'changelog storage is using deltas instead of '
419 b'raw entries; changelog reading and any '
419 b'raw entries; changelog reading and any '
420 b'operation relying on changelog data are slower '
420 b'operation relying on changelog data are slower '
421 b'than they could be'
421 b'than they could be'
422 )
422 )
423
423
424 upgrademessage = _(
424 upgrademessage = _(
425 b'changelog storage will be reformated to '
425 b'changelog storage will be reformated to '
426 b'store raw entries; changelog reading will be '
426 b'store raw entries; changelog reading will be '
427 b'faster; changelog size may be reduced'
427 b'faster; changelog size may be reduced'
428 )
428 )
429
429
430 @staticmethod
430 @staticmethod
431 def fromrepo(repo):
431 def fromrepo(repo):
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
433 # changelogs with deltas.
433 # changelogs with deltas.
434 cl = repo.changelog
434 cl = repo.changelog
435 chainbase = cl.chainbase
435 chainbase = cl.chainbase
436 return all(rev == chainbase(rev) for rev in cl)
436 return all(rev == chainbase(rev) for rev in cl)
437
437
438 @staticmethod
438 @staticmethod
439 def fromconfig(repo):
439 def fromconfig(repo):
440 return True
440 return True
441
441
442
442
443 @registerformatvariant
443 @registerformatvariant
444 class compressionengine(formatvariant):
444 class compressionengine(formatvariant):
445 name = b'compression'
445 name = b'compression'
446 default = b'zlib'
446 default = b'zlib'
447
447
448 description = _(
448 description = _(
449 b'Compresion algorithm used to compress data. '
449 b'Compresion algorithm used to compress data. '
450 b'Some engine are faster than other'
450 b'Some engine are faster than other'
451 )
451 )
452
452
453 upgrademessage = _(
453 upgrademessage = _(
454 b'revlog content will be recompressed with the new algorithm.'
454 b'revlog content will be recompressed with the new algorithm.'
455 )
455 )
456
456
457 @classmethod
457 @classmethod
458 def fromrepo(cls, repo):
458 def fromrepo(cls, repo):
459 # we allow multiple compression engine requirement to co-exist because
459 # we allow multiple compression engine requirement to co-exist because
460 # strickly speaking, revlog seems to support mixed compression style.
460 # strickly speaking, revlog seems to support mixed compression style.
461 #
461 #
462 # The compression used for new entries will be "the last one"
462 # The compression used for new entries will be "the last one"
463 compression = b'zlib'
463 compression = b'zlib'
464 for req in repo.requirements:
464 for req in repo.requirements:
465 prefix = req.startswith
465 prefix = req.startswith
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 compression = req.split(b'-', 2)[2]
467 compression = req.split(b'-', 2)[2]
468 return compression
468 return compression
469
469
470 @classmethod
470 @classmethod
471 def fromconfig(cls, repo):
471 def fromconfig(cls, repo):
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 # return the first valid value as the selection code would do
473 # return the first valid value as the selection code would do
474 for comp in compengines:
474 for comp in compengines:
475 if comp in util.compengines:
475 if comp in util.compengines:
476 return comp
476 return comp
477
477
478 # no valide compression found lets display it all for clarity
478 # no valide compression found lets display it all for clarity
479 return b','.join(compengines)
479 return b','.join(compengines)
480
480
481
481
482 @registerformatvariant
482 @registerformatvariant
483 class compressionlevel(formatvariant):
483 class compressionlevel(formatvariant):
484 name = b'compression-level'
484 name = b'compression-level'
485 default = b'default'
485 default = b'default'
486
486
487 description = _(b'compression level')
487 description = _(b'compression level')
488
488
489 upgrademessage = _(b'revlog content will be recompressed')
489 upgrademessage = _(b'revlog content will be recompressed')
490
490
491 @classmethod
491 @classmethod
492 def fromrepo(cls, repo):
492 def fromrepo(cls, repo):
493 comp = compressionengine.fromrepo(repo)
493 comp = compressionengine.fromrepo(repo)
494 level = None
494 level = None
495 if comp == b'zlib':
495 if comp == b'zlib':
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
497 elif comp == b'zstd':
497 elif comp == b'zstd':
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
499 if level is None:
499 if level is None:
500 return b'default'
500 return b'default'
501 return bytes(level)
501 return bytes(level)
502
502
503 @classmethod
503 @classmethod
504 def fromconfig(cls, repo):
504 def fromconfig(cls, repo):
505 comp = compressionengine.fromconfig(repo)
505 comp = compressionengine.fromconfig(repo)
506 level = None
506 level = None
507 if comp == b'zlib':
507 if comp == b'zlib':
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
509 elif comp == b'zstd':
509 elif comp == b'zstd':
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
511 if level is None:
511 if level is None:
512 return b'default'
512 return b'default'
513 return bytes(level)
513 return bytes(level)
514
514
515
515
516 def finddeficiencies(repo):
516 def finddeficiencies(repo):
517 """returns a list of deficiencies that the repo suffer from"""
517 """returns a list of deficiencies that the repo suffer from"""
518 deficiencies = []
518 deficiencies = []
519
519
520 # We could detect lack of revlogv1 and store here, but they were added
520 # We could detect lack of revlogv1 and store here, but they were added
521 # in 0.9.2 and we don't support upgrading repos without these
521 # in 0.9.2 and we don't support upgrading repos without these
522 # requirements, so let's not bother.
522 # requirements, so let's not bother.
523
523
524 for fv in allformatvariant:
524 for fv in allformatvariant:
525 if not fv.fromrepo(repo):
525 if not fv.fromrepo(repo):
526 deficiencies.append(fv)
526 deficiencies.append(fv)
527
527
528 return deficiencies
528 return deficiencies
529
529
530
530
531 # search without '-' to support older form on newer client.
531 # search without '-' to support older form on newer client.
532 #
532 #
533 # We don't enforce backward compatibility for debug command so this
533 # We don't enforce backward compatibility for debug command so this
534 # might eventually be dropped. However, having to use two different
534 # might eventually be dropped. However, having to use two different
535 # forms in script when comparing result is anoying enough to add
535 # forms in script when comparing result is anoying enough to add
536 # backward compatibility for a while.
536 # backward compatibility for a while.
537 legacy_opts_map = {
537 legacy_opts_map = {
538 b'redeltaparent': b're-delta-parent',
538 b'redeltaparent': b're-delta-parent',
539 b'redeltamultibase': b're-delta-multibase',
539 b'redeltamultibase': b're-delta-multibase',
540 b'redeltaall': b're-delta-all',
540 b'redeltaall': b're-delta-all',
541 b'redeltafulladd': b're-delta-fulladd',
541 b'redeltafulladd': b're-delta-fulladd',
542 }
542 }
543
543
544
544
545 def findoptimizations(repo):
545 def findoptimizations(repo):
546 """Determine optimisation that could be used during upgrade"""
546 """Determine optimisation that could be used during upgrade"""
547 # These are unconditionally added. There is logic later that figures out
547 # These are unconditionally added. There is logic later that figures out
548 # which ones to apply.
548 # which ones to apply.
549 optimizations = []
549 optimizations = []
550
550
551 optimizations.append(
551 optimizations.append(
552 improvement(
552 improvement(
553 name=b're-delta-parent',
553 name=b're-delta-parent',
554 type=optimisation,
554 type=optimisation,
555 description=_(
555 description=_(
556 b'deltas within internal storage will be recalculated to '
556 b'deltas within internal storage will be recalculated to '
557 b'choose an optimal base revision where this was not '
557 b'choose an optimal base revision where this was not '
558 b'already done; the size of the repository may shrink and '
558 b'already done; the size of the repository may shrink and '
559 b'various operations may become faster; the first time '
559 b'various operations may become faster; the first time '
560 b'this optimization is performed could slow down upgrade '
560 b'this optimization is performed could slow down upgrade '
561 b'execution considerably; subsequent invocations should '
561 b'execution considerably; subsequent invocations should '
562 b'not run noticeably slower'
562 b'not run noticeably slower'
563 ),
563 ),
564 upgrademessage=_(
564 upgrademessage=_(
565 b'deltas within internal storage will choose a new '
565 b'deltas within internal storage will choose a new '
566 b'base revision if needed'
566 b'base revision if needed'
567 ),
567 ),
568 )
568 )
569 )
569 )
570
570
571 optimizations.append(
571 optimizations.append(
572 improvement(
572 improvement(
573 name=b're-delta-multibase',
573 name=b're-delta-multibase',
574 type=optimisation,
574 type=optimisation,
575 description=_(
575 description=_(
576 b'deltas within internal storage will be recalculated '
576 b'deltas within internal storage will be recalculated '
577 b'against multiple base revision and the smallest '
577 b'against multiple base revision and the smallest '
578 b'difference will be used; the size of the repository may '
578 b'difference will be used; the size of the repository may '
579 b'shrink significantly when there are many merges; this '
579 b'shrink significantly when there are many merges; this '
580 b'optimization will slow down execution in proportion to '
580 b'optimization will slow down execution in proportion to '
581 b'the number of merges in the repository and the amount '
581 b'the number of merges in the repository and the amount '
582 b'of files in the repository; this slow down should not '
582 b'of files in the repository; this slow down should not '
583 b'be significant unless there are tens of thousands of '
583 b'be significant unless there are tens of thousands of '
584 b'files and thousands of merges'
584 b'files and thousands of merges'
585 ),
585 ),
586 upgrademessage=_(
586 upgrademessage=_(
587 b'deltas within internal storage will choose an '
587 b'deltas within internal storage will choose an '
588 b'optimal delta by computing deltas against multiple '
588 b'optimal delta by computing deltas against multiple '
589 b'parents; may slow down execution time '
589 b'parents; may slow down execution time '
590 b'significantly'
590 b'significantly'
591 ),
591 ),
592 )
592 )
593 )
593 )
594
594
595 optimizations.append(
595 optimizations.append(
596 improvement(
596 improvement(
597 name=b're-delta-all',
597 name=b're-delta-all',
598 type=optimisation,
598 type=optimisation,
599 description=_(
599 description=_(
600 b'deltas within internal storage will always be '
600 b'deltas within internal storage will always be '
601 b'recalculated without reusing prior deltas; this will '
601 b'recalculated without reusing prior deltas; this will '
602 b'likely make execution run several times slower; this '
602 b'likely make execution run several times slower; this '
603 b'optimization is typically not needed'
603 b'optimization is typically not needed'
604 ),
604 ),
605 upgrademessage=_(
605 upgrademessage=_(
606 b'deltas within internal storage will be fully '
606 b'deltas within internal storage will be fully '
607 b'recomputed; this will likely drastically slow down '
607 b'recomputed; this will likely drastically slow down '
608 b'execution time'
608 b'execution time'
609 ),
609 ),
610 )
610 )
611 )
611 )
612
612
613 optimizations.append(
613 optimizations.append(
614 improvement(
614 improvement(
615 name=b're-delta-fulladd',
615 name=b're-delta-fulladd',
616 type=optimisation,
616 type=optimisation,
617 description=_(
617 description=_(
618 b'every revision will be re-added as if it was new '
618 b'every revision will be re-added as if it was new '
619 b'content. It will go through the full storage '
619 b'content. It will go through the full storage '
620 b'mechanism giving extensions a chance to process it '
620 b'mechanism giving extensions a chance to process it '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 b'slower since more logic is involved.'
622 b'slower since more logic is involved.'
623 ),
623 ),
624 upgrademessage=_(
624 upgrademessage=_(
625 b'each revision will be added as new content to the '
625 b'each revision will be added as new content to the '
626 b'internal storage; this will likely drastically slow '
626 b'internal storage; this will likely drastically slow '
627 b'down execution time, but some extensions might need '
627 b'down execution time, but some extensions might need '
628 b'it'
628 b'it'
629 ),
629 ),
630 )
630 )
631 )
631 )
632
632
633 return optimizations
633 return optimizations
634
634
635
635
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
637 """Determine upgrade actions that will be performed.
637 """Determine upgrade actions that will be performed.
638
638
639 Given a list of improvements as returned by ``finddeficiencies`` and
639 Given a list of improvements as returned by ``finddeficiencies`` and
640 ``findoptimizations``, determine the list of upgrade actions that
640 ``findoptimizations``, determine the list of upgrade actions that
641 will be performed.
641 will be performed.
642
642
643 The role of this function is to filter improvements if needed, apply
643 The role of this function is to filter improvements if needed, apply
644 recommended optimizations from the improvements list that make sense,
644 recommended optimizations from the improvements list that make sense,
645 etc.
645 etc.
646
646
647 Returns a list of action names.
647 Returns a list of action names.
648 """
648 """
649 newactions = []
649 newactions = []
650
650
651 for d in deficiencies:
651 for d in deficiencies:
652 name = d._requirement
652 name = d._requirement
653
653
654 # If the action is a requirement that doesn't show up in the
654 # If the action is a requirement that doesn't show up in the
655 # destination requirements, prune the action.
655 # destination requirements, prune the action.
656 if name is not None and name not in destreqs:
656 if name is not None and name not in destreqs:
657 continue
657 continue
658
658
659 newactions.append(d)
659 newactions.append(d)
660
660
661 # FUTURE consider adding some optimizations here for certain transitions.
661 # FUTURE consider adding some optimizations here for certain transitions.
662 # e.g. adding generaldelta could schedule parent redeltas.
662 # e.g. adding generaldelta could schedule parent redeltas.
663
663
664 return newactions
664 return newactions
665
665
666
666
667 def _revlogfrompath(repo, path):
667 def _revlogfrompath(repo, path):
668 """Obtain a revlog from a repo path.
668 """Obtain a revlog from a repo path.
669
669
670 An instance of the appropriate class is returned.
670 An instance of the appropriate class is returned.
671 """
671 """
672 if path == b'00changelog.i':
672 if path == b'00changelog.i':
673 return changelog.changelog(repo.svfs)
673 return changelog.changelog(repo.svfs)
674 elif path.endswith(b'00manifest.i'):
674 elif path.endswith(b'00manifest.i'):
675 mandir = path[: -len(b'00manifest.i')]
675 mandir = path[: -len(b'00manifest.i')]
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
677 else:
677 else:
678 # reverse of "/".join(("data", path + ".i"))
678 # reverse of "/".join(("data", path + ".i"))
679 return filelog.filelog(repo.svfs, path[5:-2])
679 return filelog.filelog(repo.svfs, path[5:-2])
680
680
681
681
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
683 """copy all relevant files for `oldrl` into `destrepo` store
683 """copy all relevant files for `oldrl` into `destrepo` store
684
684
685 Files are copied "as is" without any transformation. The copy is performed
685 Files are copied "as is" without any transformation. The copy is performed
686 without extra checks. Callers are responsible for making sure the copied
686 without extra checks. Callers are responsible for making sure the copied
687 content is compatible with format of the destination repository.
687 content is compatible with format of the destination repository.
688 """
688 """
689 oldrl = getattr(oldrl, '_revlog', oldrl)
689 oldrl = getattr(oldrl, '_revlog', oldrl)
690 newrl = _revlogfrompath(destrepo, unencodedname)
690 newrl = _revlogfrompath(destrepo, unencodedname)
691 newrl = getattr(newrl, '_revlog', newrl)
691 newrl = getattr(newrl, '_revlog', newrl)
692
692
693 oldvfs = oldrl.opener
693 oldvfs = oldrl.opener
694 newvfs = newrl.opener
694 newvfs = newrl.opener
695 oldindex = oldvfs.join(oldrl.indexfile)
695 oldindex = oldvfs.join(oldrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
696 newindex = newvfs.join(newrl.indexfile)
697 olddata = oldvfs.join(oldrl.datafile)
697 olddata = oldvfs.join(oldrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
698 newdata = newvfs.join(newrl.datafile)
699
699
700 with newvfs(newrl.indexfile, b'w'):
700 with newvfs(newrl.indexfile, b'w'):
701 pass # create all the directories
701 pass # create all the directories
702
702
703 util.copyfile(oldindex, newindex)
703 util.copyfile(oldindex, newindex)
704 copydata = oldrl.opener.exists(oldrl.datafile)
704 copydata = oldrl.opener.exists(oldrl.datafile)
705 if copydata:
705 if copydata:
706 util.copyfile(olddata, newdata)
706 util.copyfile(olddata, newdata)
707
707
708 if not (
708 if not (
709 unencodedname.endswith(b'00changelog.i')
709 unencodedname.endswith(b'00changelog.i')
710 or unencodedname.endswith(b'00manifest.i')
710 or unencodedname.endswith(b'00manifest.i')
711 ):
711 ):
712 destrepo.svfs.fncache.add(unencodedname)
712 destrepo.svfs.fncache.add(unencodedname)
713 if copydata:
713 if copydata:
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
715
715
716
716
717 UPGRADE_CHANGELOG = object()
717 UPGRADE_CHANGELOG = object()
718 UPGRADE_MANIFEST = object()
718 UPGRADE_MANIFEST = object()
719 UPGRADE_FILELOG = object()
719 UPGRADE_FILELOG = object()
720
720
721 UPGRADE_ALL_REVLOGS = frozenset(
721 UPGRADE_ALL_REVLOGS = frozenset(
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
723 )
723 )
724
724
725
725
726 def getsidedatacompanion(srcrepo, dstrepo):
726 def getsidedatacompanion(srcrepo, dstrepo):
727 sidedatacompanion = None
727 sidedatacompanion = None
728 removedreqs = srcrepo.requirements - dstrepo.requirements
728 removedreqs = srcrepo.requirements - dstrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
729 addedreqs = dstrepo.requirements - srcrepo.requirements
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
731
731
732 def sidedatacompanion(rl, rev):
732 def sidedatacompanion(rl, rev):
733 rl = getattr(rl, '_revlog', rl)
733 rl = getattr(rl, '_revlog', rl)
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
735 return True, (), {}
735 return True, (), {}
736 return False, (), {}
736 return False, (), {}
737
737
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
742 return sidedatacompanion
742 return sidedatacompanion
743
743
744
744
745 def matchrevlog(revlogfilter, entry):
745 def matchrevlog(revlogfilter, entry):
746 """check is a revlog is selected for cloning
746 """check if a revlog is selected for cloning.
747
748 In other words, are there any updates which need to be done on revlog
749 or it can be blindly copied.
747
750
748 The store entry is checked against the passed filter"""
751 The store entry is checked against the passed filter"""
749 if entry.endswith(b'00changelog.i'):
752 if entry.endswith(b'00changelog.i'):
750 return UPGRADE_CHANGELOG in revlogfilter
753 return UPGRADE_CHANGELOG in revlogfilter
751 elif entry.endswith(b'00manifest.i'):
754 elif entry.endswith(b'00manifest.i'):
752 return UPGRADE_MANIFEST in revlogfilter
755 return UPGRADE_MANIFEST in revlogfilter
753 return UPGRADE_FILELOG in revlogfilter
756 return UPGRADE_FILELOG in revlogfilter
754
757
755
758
756 def _clonerevlogs(
759 def _clonerevlogs(
757 ui,
760 ui,
758 srcrepo,
761 srcrepo,
759 dstrepo,
762 dstrepo,
760 tr,
763 tr,
761 deltareuse,
764 deltareuse,
762 forcedeltabothparents,
765 forcedeltabothparents,
763 revlogs=UPGRADE_ALL_REVLOGS,
766 revlogs=UPGRADE_ALL_REVLOGS,
764 ):
767 ):
765 """Copy revlogs between 2 repos."""
768 """Copy revlogs between 2 repos."""
766 revcount = 0
769 revcount = 0
767 srcsize = 0
770 srcsize = 0
768 srcrawsize = 0
771 srcrawsize = 0
769 dstsize = 0
772 dstsize = 0
770 fcount = 0
773 fcount = 0
771 frevcount = 0
774 frevcount = 0
772 fsrcsize = 0
775 fsrcsize = 0
773 frawsize = 0
776 frawsize = 0
774 fdstsize = 0
777 fdstsize = 0
775 mcount = 0
778 mcount = 0
776 mrevcount = 0
779 mrevcount = 0
777 msrcsize = 0
780 msrcsize = 0
778 mrawsize = 0
781 mrawsize = 0
779 mdstsize = 0
782 mdstsize = 0
780 crevcount = 0
783 crevcount = 0
781 csrcsize = 0
784 csrcsize = 0
782 crawsize = 0
785 crawsize = 0
783 cdstsize = 0
786 cdstsize = 0
784
787
785 alldatafiles = list(srcrepo.store.walk())
788 alldatafiles = list(srcrepo.store.walk())
786
789
787 # Perform a pass to collect metadata. This validates we can open all
790 # Perform a pass to collect metadata. This validates we can open all
788 # source files and allows a unified progress bar to be displayed.
791 # source files and allows a unified progress bar to be displayed.
789 for unencoded, encoded, size in alldatafiles:
792 for unencoded, encoded, size in alldatafiles:
790 if unencoded.endswith(b'.d'):
793 if unencoded.endswith(b'.d'):
791 continue
794 continue
792
795
793 rl = _revlogfrompath(srcrepo, unencoded)
796 rl = _revlogfrompath(srcrepo, unencoded)
794
797
795 info = rl.storageinfo(
798 info = rl.storageinfo(
796 exclusivefiles=True,
799 exclusivefiles=True,
797 revisionscount=True,
800 revisionscount=True,
798 trackedsize=True,
801 trackedsize=True,
799 storedsize=True,
802 storedsize=True,
800 )
803 )
801
804
802 revcount += info[b'revisionscount'] or 0
805 revcount += info[b'revisionscount'] or 0
803 datasize = info[b'storedsize'] or 0
806 datasize = info[b'storedsize'] or 0
804 rawsize = info[b'trackedsize'] or 0
807 rawsize = info[b'trackedsize'] or 0
805
808
806 srcsize += datasize
809 srcsize += datasize
807 srcrawsize += rawsize
810 srcrawsize += rawsize
808
811
809 # This is for the separate progress bars.
812 # This is for the separate progress bars.
810 if isinstance(rl, changelog.changelog):
813 if isinstance(rl, changelog.changelog):
811 crevcount += len(rl)
814 crevcount += len(rl)
812 csrcsize += datasize
815 csrcsize += datasize
813 crawsize += rawsize
816 crawsize += rawsize
814 elif isinstance(rl, manifest.manifestrevlog):
817 elif isinstance(rl, manifest.manifestrevlog):
815 mcount += 1
818 mcount += 1
816 mrevcount += len(rl)
819 mrevcount += len(rl)
817 msrcsize += datasize
820 msrcsize += datasize
818 mrawsize += rawsize
821 mrawsize += rawsize
819 elif isinstance(rl, filelog.filelog):
822 elif isinstance(rl, filelog.filelog):
820 fcount += 1
823 fcount += 1
821 frevcount += len(rl)
824 frevcount += len(rl)
822 fsrcsize += datasize
825 fsrcsize += datasize
823 frawsize += rawsize
826 frawsize += rawsize
824 else:
827 else:
825 error.ProgrammingError(b'unknown revlog type')
828 error.ProgrammingError(b'unknown revlog type')
826
829
827 if not revcount:
830 if not revcount:
828 return
831 return
829
832
830 ui.status(
833 ui.status(
831 _(
834 _(
832 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
835 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
833 b'%d in changelog)\n'
836 b'%d in changelog)\n'
834 )
837 )
835 % (revcount, frevcount, mrevcount, crevcount)
838 % (revcount, frevcount, mrevcount, crevcount)
836 )
839 )
837 ui.status(
840 ui.status(
838 _(b'migrating %s in store; %s tracked data\n')
841 _(b'migrating %s in store; %s tracked data\n')
839 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
842 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
840 )
843 )
841
844
842 # Used to keep track of progress.
845 # Used to keep track of progress.
843 progress = None
846 progress = None
844
847
845 def oncopiedrevision(rl, rev, node):
848 def oncopiedrevision(rl, rev, node):
846 progress.increment()
849 progress.increment()
847
850
848 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
851 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
849
852
850 # Do the actual copying.
853 # Do the actual copying.
851 # FUTURE this operation can be farmed off to worker processes.
854 # FUTURE this operation can be farmed off to worker processes.
852 seen = set()
855 seen = set()
853 for unencoded, encoded, size in alldatafiles:
856 for unencoded, encoded, size in alldatafiles:
854 if unencoded.endswith(b'.d'):
857 if unencoded.endswith(b'.d'):
855 continue
858 continue
856
859
857 oldrl = _revlogfrompath(srcrepo, unencoded)
860 oldrl = _revlogfrompath(srcrepo, unencoded)
858
861
859 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
862 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
860 ui.status(
863 ui.status(
861 _(
864 _(
862 b'finished migrating %d manifest revisions across %d '
865 b'finished migrating %d manifest revisions across %d '
863 b'manifests; change in size: %s\n'
866 b'manifests; change in size: %s\n'
864 )
867 )
865 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
868 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
866 )
869 )
867
870
868 ui.status(
871 ui.status(
869 _(
872 _(
870 b'migrating changelog containing %d revisions '
873 b'migrating changelog containing %d revisions '
871 b'(%s in store; %s tracked data)\n'
874 b'(%s in store; %s tracked data)\n'
872 )
875 )
873 % (
876 % (
874 crevcount,
877 crevcount,
875 util.bytecount(csrcsize),
878 util.bytecount(csrcsize),
876 util.bytecount(crawsize),
879 util.bytecount(crawsize),
877 )
880 )
878 )
881 )
879 seen.add(b'c')
882 seen.add(b'c')
880 progress = srcrepo.ui.makeprogress(
883 progress = srcrepo.ui.makeprogress(
881 _(b'changelog revisions'), total=crevcount
884 _(b'changelog revisions'), total=crevcount
882 )
885 )
883 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
886 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
884 ui.status(
887 ui.status(
885 _(
888 _(
886 b'finished migrating %d filelog revisions across %d '
889 b'finished migrating %d filelog revisions across %d '
887 b'filelogs; change in size: %s\n'
890 b'filelogs; change in size: %s\n'
888 )
891 )
889 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
892 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
890 )
893 )
891
894
892 ui.status(
895 ui.status(
893 _(
896 _(
894 b'migrating %d manifests containing %d revisions '
897 b'migrating %d manifests containing %d revisions '
895 b'(%s in store; %s tracked data)\n'
898 b'(%s in store; %s tracked data)\n'
896 )
899 )
897 % (
900 % (
898 mcount,
901 mcount,
899 mrevcount,
902 mrevcount,
900 util.bytecount(msrcsize),
903 util.bytecount(msrcsize),
901 util.bytecount(mrawsize),
904 util.bytecount(mrawsize),
902 )
905 )
903 )
906 )
904 seen.add(b'm')
907 seen.add(b'm')
905 if progress:
908 if progress:
906 progress.complete()
909 progress.complete()
907 progress = srcrepo.ui.makeprogress(
910 progress = srcrepo.ui.makeprogress(
908 _(b'manifest revisions'), total=mrevcount
911 _(b'manifest revisions'), total=mrevcount
909 )
912 )
910 elif b'f' not in seen:
913 elif b'f' not in seen:
911 ui.status(
914 ui.status(
912 _(
915 _(
913 b'migrating %d filelogs containing %d revisions '
916 b'migrating %d filelogs containing %d revisions '
914 b'(%s in store; %s tracked data)\n'
917 b'(%s in store; %s tracked data)\n'
915 )
918 )
916 % (
919 % (
917 fcount,
920 fcount,
918 frevcount,
921 frevcount,
919 util.bytecount(fsrcsize),
922 util.bytecount(fsrcsize),
920 util.bytecount(frawsize),
923 util.bytecount(frawsize),
921 )
924 )
922 )
925 )
923 seen.add(b'f')
926 seen.add(b'f')
924 if progress:
927 if progress:
925 progress.complete()
928 progress.complete()
926 progress = srcrepo.ui.makeprogress(
929 progress = srcrepo.ui.makeprogress(
927 _(b'file revisions'), total=frevcount
930 _(b'file revisions'), total=frevcount
928 )
931 )
929
932
930 if matchrevlog(revlogs, unencoded):
933 if matchrevlog(revlogs, unencoded):
931 ui.note(
934 ui.note(
932 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
935 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
933 )
936 )
934 newrl = _revlogfrompath(dstrepo, unencoded)
937 newrl = _revlogfrompath(dstrepo, unencoded)
935 oldrl.clone(
938 oldrl.clone(
936 tr,
939 tr,
937 newrl,
940 newrl,
938 addrevisioncb=oncopiedrevision,
941 addrevisioncb=oncopiedrevision,
939 deltareuse=deltareuse,
942 deltareuse=deltareuse,
940 forcedeltabothparents=forcedeltabothparents,
943 forcedeltabothparents=forcedeltabothparents,
941 sidedatacompanion=sidedatacompanion,
944 sidedatacompanion=sidedatacompanion,
942 )
945 )
943 else:
946 else:
944 msg = _(b'blindly copying %s containing %i revisions\n')
947 msg = _(b'blindly copying %s containing %i revisions\n')
945 ui.note(msg % (unencoded, len(oldrl)))
948 ui.note(msg % (unencoded, len(oldrl)))
946 _copyrevlog(tr, dstrepo, oldrl, unencoded)
949 _copyrevlog(tr, dstrepo, oldrl, unencoded)
947
950
948 newrl = _revlogfrompath(dstrepo, unencoded)
951 newrl = _revlogfrompath(dstrepo, unencoded)
949
952
950 info = newrl.storageinfo(storedsize=True)
953 info = newrl.storageinfo(storedsize=True)
951 datasize = info[b'storedsize'] or 0
954 datasize = info[b'storedsize'] or 0
952
955
953 dstsize += datasize
956 dstsize += datasize
954
957
955 if isinstance(newrl, changelog.changelog):
958 if isinstance(newrl, changelog.changelog):
956 cdstsize += datasize
959 cdstsize += datasize
957 elif isinstance(newrl, manifest.manifestrevlog):
960 elif isinstance(newrl, manifest.manifestrevlog):
958 mdstsize += datasize
961 mdstsize += datasize
959 else:
962 else:
960 fdstsize += datasize
963 fdstsize += datasize
961
964
962 progress.complete()
965 progress.complete()
963
966
964 ui.status(
967 ui.status(
965 _(
968 _(
966 b'finished migrating %d changelog revisions; change in size: '
969 b'finished migrating %d changelog revisions; change in size: '
967 b'%s\n'
970 b'%s\n'
968 )
971 )
969 % (crevcount, util.bytecount(cdstsize - csrcsize))
972 % (crevcount, util.bytecount(cdstsize - csrcsize))
970 )
973 )
971
974
972 ui.status(
975 ui.status(
973 _(
976 _(
974 b'finished migrating %d total revisions; total change in store '
977 b'finished migrating %d total revisions; total change in store '
975 b'size: %s\n'
978 b'size: %s\n'
976 )
979 )
977 % (revcount, util.bytecount(dstsize - srcsize))
980 % (revcount, util.bytecount(dstsize - srcsize))
978 )
981 )
979
982
980
983
981 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
984 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
982 """Determine whether to copy a store file during upgrade.
985 """Determine whether to copy a store file during upgrade.
983
986
984 This function is called when migrating store files from ``srcrepo`` to
987 This function is called when migrating store files from ``srcrepo`` to
985 ``dstrepo`` as part of upgrading a repository.
988 ``dstrepo`` as part of upgrading a repository.
986
989
987 Args:
990 Args:
988 srcrepo: repo we are copying from
991 srcrepo: repo we are copying from
989 dstrepo: repo we are copying to
992 dstrepo: repo we are copying to
990 requirements: set of requirements for ``dstrepo``
993 requirements: set of requirements for ``dstrepo``
991 path: store file being examined
994 path: store file being examined
992 mode: the ``ST_MODE`` file type of ``path``
995 mode: the ``ST_MODE`` file type of ``path``
993 st: ``stat`` data structure for ``path``
996 st: ``stat`` data structure for ``path``
994
997
995 Function should return ``True`` if the file is to be copied.
998 Function should return ``True`` if the file is to be copied.
996 """
999 """
997 # Skip revlogs.
1000 # Skip revlogs.
998 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1001 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
999 return False
1002 return False
1000 # Skip transaction related files.
1003 # Skip transaction related files.
1001 if path.startswith(b'undo'):
1004 if path.startswith(b'undo'):
1002 return False
1005 return False
1003 # Only copy regular files.
1006 # Only copy regular files.
1004 if mode != stat.S_IFREG:
1007 if mode != stat.S_IFREG:
1005 return False
1008 return False
1006 # Skip other skipped files.
1009 # Skip other skipped files.
1007 if path in (b'lock', b'fncache'):
1010 if path in (b'lock', b'fncache'):
1008 return False
1011 return False
1009
1012
1010 return True
1013 return True
1011
1014
1012
1015
1013 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1016 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1014 """Hook point for extensions to perform additional actions during upgrade.
1017 """Hook point for extensions to perform additional actions during upgrade.
1015
1018
1016 This function is called after revlogs and store files have been copied but
1019 This function is called after revlogs and store files have been copied but
1017 before the new store is swapped into the original location.
1020 before the new store is swapped into the original location.
1018 """
1021 """
1019
1022
1020
1023
1021 def _upgraderepo(
1024 def _upgraderepo(
1022 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1025 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1023 ):
1026 ):
1024 """Do the low-level work of upgrading a repository.
1027 """Do the low-level work of upgrading a repository.
1025
1028
1026 The upgrade is effectively performed as a copy between a source
1029 The upgrade is effectively performed as a copy between a source
1027 repository and a temporary destination repository.
1030 repository and a temporary destination repository.
1028
1031
1029 The source repository is unmodified for as long as possible so the
1032 The source repository is unmodified for as long as possible so the
1030 upgrade can abort at any time without causing loss of service for
1033 upgrade can abort at any time without causing loss of service for
1031 readers and without corrupting the source repository.
1034 readers and without corrupting the source repository.
1032 """
1035 """
1033 assert srcrepo.currentwlock()
1036 assert srcrepo.currentwlock()
1034 assert dstrepo.currentwlock()
1037 assert dstrepo.currentwlock()
1035
1038
1036 ui.status(
1039 ui.status(
1037 _(
1040 _(
1038 b'(it is safe to interrupt this process any time before '
1041 b'(it is safe to interrupt this process any time before '
1039 b'data migration completes)\n'
1042 b'data migration completes)\n'
1040 )
1043 )
1041 )
1044 )
1042
1045
1043 if b're-delta-all' in actions:
1046 if b're-delta-all' in actions:
1044 deltareuse = revlog.revlog.DELTAREUSENEVER
1047 deltareuse = revlog.revlog.DELTAREUSENEVER
1045 elif b're-delta-parent' in actions:
1048 elif b're-delta-parent' in actions:
1046 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1049 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1047 elif b're-delta-multibase' in actions:
1050 elif b're-delta-multibase' in actions:
1048 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1051 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1049 elif b're-delta-fulladd' in actions:
1052 elif b're-delta-fulladd' in actions:
1050 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1053 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1051 else:
1054 else:
1052 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1055 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1053
1056
1054 with dstrepo.transaction(b'upgrade') as tr:
1057 with dstrepo.transaction(b'upgrade') as tr:
1055 _clonerevlogs(
1058 _clonerevlogs(
1056 ui,
1059 ui,
1057 srcrepo,
1060 srcrepo,
1058 dstrepo,
1061 dstrepo,
1059 tr,
1062 tr,
1060 deltareuse,
1063 deltareuse,
1061 b're-delta-multibase' in actions,
1064 b're-delta-multibase' in actions,
1062 revlogs=revlogs,
1065 revlogs=revlogs,
1063 )
1066 )
1064
1067
1065 # Now copy other files in the store directory.
1068 # Now copy other files in the store directory.
1066 # The sorted() makes execution deterministic.
1069 # The sorted() makes execution deterministic.
1067 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1070 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1068 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1071 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1069 continue
1072 continue
1070
1073
1071 srcrepo.ui.status(_(b'copying %s\n') % p)
1074 srcrepo.ui.status(_(b'copying %s\n') % p)
1072 src = srcrepo.store.rawvfs.join(p)
1075 src = srcrepo.store.rawvfs.join(p)
1073 dst = dstrepo.store.rawvfs.join(p)
1076 dst = dstrepo.store.rawvfs.join(p)
1074 util.copyfile(src, dst, copystat=True)
1077 util.copyfile(src, dst, copystat=True)
1075
1078
1076 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1079 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1077
1080
1078 ui.status(_(b'data fully migrated to temporary repository\n'))
1081 ui.status(_(b'data fully migrated to temporary repository\n'))
1079
1082
1080 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1083 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1081 backupvfs = vfsmod.vfs(backuppath)
1084 backupvfs = vfsmod.vfs(backuppath)
1082
1085
1083 # Make a backup of requires file first, as it is the first to be modified.
1086 # Make a backup of requires file first, as it is the first to be modified.
1084 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1087 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1085
1088
1086 # We install an arbitrary requirement that clients must not support
1089 # We install an arbitrary requirement that clients must not support
1087 # as a mechanism to lock out new clients during the data swap. This is
1090 # as a mechanism to lock out new clients during the data swap. This is
1088 # better than allowing a client to continue while the repository is in
1091 # better than allowing a client to continue while the repository is in
1089 # an inconsistent state.
1092 # an inconsistent state.
1090 ui.status(
1093 ui.status(
1091 _(
1094 _(
1092 b'marking source repository as being upgraded; clients will be '
1095 b'marking source repository as being upgraded; clients will be '
1093 b'unable to read from repository\n'
1096 b'unable to read from repository\n'
1094 )
1097 )
1095 )
1098 )
1096 scmutil.writereporequirements(
1099 scmutil.writereporequirements(
1097 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1100 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1098 )
1101 )
1099
1102
1100 ui.status(_(b'starting in-place swap of repository data\n'))
1103 ui.status(_(b'starting in-place swap of repository data\n'))
1101 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1104 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1102
1105
1103 # Now swap in the new store directory. Doing it as a rename should make
1106 # Now swap in the new store directory. Doing it as a rename should make
1104 # the operation nearly instantaneous and atomic (at least in well-behaved
1107 # the operation nearly instantaneous and atomic (at least in well-behaved
1105 # environments).
1108 # environments).
1106 ui.status(_(b'replacing store...\n'))
1109 ui.status(_(b'replacing store...\n'))
1107 tstart = util.timer()
1110 tstart = util.timer()
1108 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1111 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1109 util.rename(dstrepo.spath, srcrepo.spath)
1112 util.rename(dstrepo.spath, srcrepo.spath)
1110 elapsed = util.timer() - tstart
1113 elapsed = util.timer() - tstart
1111 ui.status(
1114 ui.status(
1112 _(
1115 _(
1113 b'store replacement complete; repository was inconsistent for '
1116 b'store replacement complete; repository was inconsistent for '
1114 b'%0.1fs\n'
1117 b'%0.1fs\n'
1115 )
1118 )
1116 % elapsed
1119 % elapsed
1117 )
1120 )
1118
1121
1119 # We first write the requirements file. Any new requirements will lock
1122 # We first write the requirements file. Any new requirements will lock
1120 # out legacy clients.
1123 # out legacy clients.
1121 ui.status(
1124 ui.status(
1122 _(
1125 _(
1123 b'finalizing requirements file and making repository readable '
1126 b'finalizing requirements file and making repository readable '
1124 b'again\n'
1127 b'again\n'
1125 )
1128 )
1126 )
1129 )
1127 scmutil.writereporequirements(srcrepo, requirements)
1130 scmutil.writereporequirements(srcrepo, requirements)
1128
1131
1129 # The lock file from the old store won't be removed because nothing has a
1132 # The lock file from the old store won't be removed because nothing has a
1130 # reference to its new location. So clean it up manually. Alternatively, we
1133 # reference to its new location. So clean it up manually. Alternatively, we
1131 # could update srcrepo.svfs and other variables to point to the new
1134 # could update srcrepo.svfs and other variables to point to the new
1132 # location. This is simpler.
1135 # location. This is simpler.
1133 backupvfs.unlink(b'store/lock')
1136 backupvfs.unlink(b'store/lock')
1134
1137
1135 return backuppath
1138 return backuppath
1136
1139
1137
1140
1138 def upgraderepo(
1141 def upgraderepo(
1139 ui,
1142 ui,
1140 repo,
1143 repo,
1141 run=False,
1144 run=False,
1142 optimize=None,
1145 optimize=None,
1143 backup=True,
1146 backup=True,
1144 manifest=None,
1147 manifest=None,
1145 changelog=None,
1148 changelog=None,
1146 ):
1149 ):
1147 """Upgrade a repository in place."""
1150 """Upgrade a repository in place."""
1148 if optimize is None:
1151 if optimize is None:
1149 optimize = []
1152 optimize = []
1150 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1153 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1151 repo = repo.unfiltered()
1154 repo = repo.unfiltered()
1152
1155
1153 revlogs = set(UPGRADE_ALL_REVLOGS)
1156 revlogs = set(UPGRADE_ALL_REVLOGS)
1154 specentries = ((b'c', changelog), (b'm', manifest))
1157 specentries = ((b'c', changelog), (b'm', manifest))
1155 specified = [(y, x) for (y, x) in specentries if x is not None]
1158 specified = [(y, x) for (y, x) in specentries if x is not None]
1156 if specified:
1159 if specified:
1157 # we have some limitation on revlogs to be recloned
1160 # we have some limitation on revlogs to be recloned
1158 if any(x for y, x in specified):
1161 if any(x for y, x in specified):
1159 revlogs = set()
1162 revlogs = set()
1160 for r, enabled in specified:
1163 for r, enabled in specified:
1161 if enabled:
1164 if enabled:
1162 if r == b'c':
1165 if r == b'c':
1163 revlogs.add(UPGRADE_CHANGELOG)
1166 revlogs.add(UPGRADE_CHANGELOG)
1164 elif r == b'm':
1167 elif r == b'm':
1165 revlogs.add(UPGRADE_MANIFEST)
1168 revlogs.add(UPGRADE_MANIFEST)
1166 else:
1169 else:
1167 # none are enabled
1170 # none are enabled
1168 for r, __ in specified:
1171 for r, __ in specified:
1169 if r == b'c':
1172 if r == b'c':
1170 revlogs.discard(UPGRADE_CHANGELOG)
1173 revlogs.discard(UPGRADE_CHANGELOG)
1171 elif r == b'm':
1174 elif r == b'm':
1172 revlogs.discard(UPGRADE_MANIFEST)
1175 revlogs.discard(UPGRADE_MANIFEST)
1173
1176
1174 # Ensure the repository can be upgraded.
1177 # Ensure the repository can be upgraded.
1175 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1178 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1176 if missingreqs:
1179 if missingreqs:
1177 raise error.Abort(
1180 raise error.Abort(
1178 _(b'cannot upgrade repository; requirement missing: %s')
1181 _(b'cannot upgrade repository; requirement missing: %s')
1179 % _(b', ').join(sorted(missingreqs))
1182 % _(b', ').join(sorted(missingreqs))
1180 )
1183 )
1181
1184
1182 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1185 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1183 if blockedreqs:
1186 if blockedreqs:
1184 raise error.Abort(
1187 raise error.Abort(
1185 _(
1188 _(
1186 b'cannot upgrade repository; unsupported source '
1189 b'cannot upgrade repository; unsupported source '
1187 b'requirement: %s'
1190 b'requirement: %s'
1188 )
1191 )
1189 % _(b', ').join(sorted(blockedreqs))
1192 % _(b', ').join(sorted(blockedreqs))
1190 )
1193 )
1191
1194
1192 # FUTURE there is potentially a need to control the wanted requirements via
1195 # FUTURE there is potentially a need to control the wanted requirements via
1193 # command arguments or via an extension hook point.
1196 # command arguments or via an extension hook point.
1194 newreqs = localrepo.newreporequirements(
1197 newreqs = localrepo.newreporequirements(
1195 repo.ui, localrepo.defaultcreateopts(repo.ui)
1198 repo.ui, localrepo.defaultcreateopts(repo.ui)
1196 )
1199 )
1197 newreqs.update(preservedrequirements(repo))
1200 newreqs.update(preservedrequirements(repo))
1198
1201
1199 noremovereqs = (
1202 noremovereqs = (
1200 repo.requirements - newreqs - supportremovedrequirements(repo)
1203 repo.requirements - newreqs - supportremovedrequirements(repo)
1201 )
1204 )
1202 if noremovereqs:
1205 if noremovereqs:
1203 raise error.Abort(
1206 raise error.Abort(
1204 _(
1207 _(
1205 b'cannot upgrade repository; requirement would be '
1208 b'cannot upgrade repository; requirement would be '
1206 b'removed: %s'
1209 b'removed: %s'
1207 )
1210 )
1208 % _(b', ').join(sorted(noremovereqs))
1211 % _(b', ').join(sorted(noremovereqs))
1209 )
1212 )
1210
1213
1211 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1214 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1212 if noaddreqs:
1215 if noaddreqs:
1213 raise error.Abort(
1216 raise error.Abort(
1214 _(
1217 _(
1215 b'cannot upgrade repository; do not support adding '
1218 b'cannot upgrade repository; do not support adding '
1216 b'requirement: %s'
1219 b'requirement: %s'
1217 )
1220 )
1218 % _(b', ').join(sorted(noaddreqs))
1221 % _(b', ').join(sorted(noaddreqs))
1219 )
1222 )
1220
1223
1221 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1224 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1222 if unsupportedreqs:
1225 if unsupportedreqs:
1223 raise error.Abort(
1226 raise error.Abort(
1224 _(
1227 _(
1225 b'cannot upgrade repository; do not support '
1228 b'cannot upgrade repository; do not support '
1226 b'destination requirement: %s'
1229 b'destination requirement: %s'
1227 )
1230 )
1228 % _(b', ').join(sorted(unsupportedreqs))
1231 % _(b', ').join(sorted(unsupportedreqs))
1229 )
1232 )
1230
1233
1231 # Find and validate all improvements that can be made.
1234 # Find and validate all improvements that can be made.
1232 alloptimizations = findoptimizations(repo)
1235 alloptimizations = findoptimizations(repo)
1233
1236
1234 # Apply and Validate arguments.
1237 # Apply and Validate arguments.
1235 optimizations = []
1238 optimizations = []
1236 for o in alloptimizations:
1239 for o in alloptimizations:
1237 if o.name in optimize:
1240 if o.name in optimize:
1238 optimizations.append(o)
1241 optimizations.append(o)
1239 optimize.discard(o.name)
1242 optimize.discard(o.name)
1240
1243
1241 if optimize: # anything left is unknown
1244 if optimize: # anything left is unknown
1242 raise error.Abort(
1245 raise error.Abort(
1243 _(b'unknown optimization action requested: %s')
1246 _(b'unknown optimization action requested: %s')
1244 % b', '.join(sorted(optimize)),
1247 % b', '.join(sorted(optimize)),
1245 hint=_(b'run without arguments to see valid optimizations'),
1248 hint=_(b'run without arguments to see valid optimizations'),
1246 )
1249 )
1247
1250
1248 deficiencies = finddeficiencies(repo)
1251 deficiencies = finddeficiencies(repo)
1249 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1252 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1250 actions.extend(
1253 actions.extend(
1251 o
1254 o
1252 for o in sorted(optimizations)
1255 for o in sorted(optimizations)
1253 # determineactions could have added optimisation
1256 # determineactions could have added optimisation
1254 if o not in actions
1257 if o not in actions
1255 )
1258 )
1256
1259
1257 removedreqs = repo.requirements - newreqs
1260 removedreqs = repo.requirements - newreqs
1258 addedreqs = newreqs - repo.requirements
1261 addedreqs = newreqs - repo.requirements
1259
1262
1260 if revlogs != UPGRADE_ALL_REVLOGS:
1263 if revlogs != UPGRADE_ALL_REVLOGS:
1261 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1264 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1262 if incompatible:
1265 if incompatible:
1263 msg = _(
1266 msg = _(
1264 b'ignoring revlogs selection flags, format requirements '
1267 b'ignoring revlogs selection flags, format requirements '
1265 b'change: %s\n'
1268 b'change: %s\n'
1266 )
1269 )
1267 ui.warn(msg % b', '.join(sorted(incompatible)))
1270 ui.warn(msg % b', '.join(sorted(incompatible)))
1268 revlogs = UPGRADE_ALL_REVLOGS
1271 revlogs = UPGRADE_ALL_REVLOGS
1269
1272
1270 def write_labeled(l, label):
1273 def write_labeled(l, label):
1271 first = True
1274 first = True
1272 for r in sorted(l):
1275 for r in sorted(l):
1273 if not first:
1276 if not first:
1274 ui.write(b', ')
1277 ui.write(b', ')
1275 ui.write(r, label=label)
1278 ui.write(r, label=label)
1276 first = False
1279 first = False
1277
1280
1278 def printrequirements():
1281 def printrequirements():
1279 ui.write(_(b'requirements\n'))
1282 ui.write(_(b'requirements\n'))
1280 ui.write(_(b' preserved: '))
1283 ui.write(_(b' preserved: '))
1281 write_labeled(
1284 write_labeled(
1282 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1285 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1283 )
1286 )
1284 ui.write((b'\n'))
1287 ui.write((b'\n'))
1285 removed = repo.requirements - newreqs
1288 removed = repo.requirements - newreqs
1286 if repo.requirements - newreqs:
1289 if repo.requirements - newreqs:
1287 ui.write(_(b' removed: '))
1290 ui.write(_(b' removed: '))
1288 write_labeled(removed, "upgrade-repo.requirement.removed")
1291 write_labeled(removed, "upgrade-repo.requirement.removed")
1289 ui.write((b'\n'))
1292 ui.write((b'\n'))
1290 added = newreqs - repo.requirements
1293 added = newreqs - repo.requirements
1291 if added:
1294 if added:
1292 ui.write(_(b' added: '))
1295 ui.write(_(b' added: '))
1293 write_labeled(added, "upgrade-repo.requirement.added")
1296 write_labeled(added, "upgrade-repo.requirement.added")
1294 ui.write((b'\n'))
1297 ui.write((b'\n'))
1295 ui.write(b'\n')
1298 ui.write(b'\n')
1296
1299
1297 def printoptimisations():
1300 def printoptimisations():
1298 optimisations = [a for a in actions if a.type == optimisation]
1301 optimisations = [a for a in actions if a.type == optimisation]
1299 optimisations.sort(key=lambda a: a.name)
1302 optimisations.sort(key=lambda a: a.name)
1300 if optimisations:
1303 if optimisations:
1301 ui.write(_(b'optimisations: '))
1304 ui.write(_(b'optimisations: '))
1302 write_labeled(
1305 write_labeled(
1303 [a.name for a in optimisations],
1306 [a.name for a in optimisations],
1304 "upgrade-repo.optimisation.performed",
1307 "upgrade-repo.optimisation.performed",
1305 )
1308 )
1306 ui.write(b'\n\n')
1309 ui.write(b'\n\n')
1307
1310
1308 def printupgradeactions():
1311 def printupgradeactions():
1309 for a in actions:
1312 for a in actions:
1310 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1313 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1311
1314
1312 if not run:
1315 if not run:
1313 fromconfig = []
1316 fromconfig = []
1314 onlydefault = []
1317 onlydefault = []
1315
1318
1316 for d in deficiencies:
1319 for d in deficiencies:
1317 if d.fromconfig(repo):
1320 if d.fromconfig(repo):
1318 fromconfig.append(d)
1321 fromconfig.append(d)
1319 elif d.default:
1322 elif d.default:
1320 onlydefault.append(d)
1323 onlydefault.append(d)
1321
1324
1322 if fromconfig or onlydefault:
1325 if fromconfig or onlydefault:
1323
1326
1324 if fromconfig:
1327 if fromconfig:
1325 ui.status(
1328 ui.status(
1326 _(
1329 _(
1327 b'repository lacks features recommended by '
1330 b'repository lacks features recommended by '
1328 b'current config options:\n\n'
1331 b'current config options:\n\n'
1329 )
1332 )
1330 )
1333 )
1331 for i in fromconfig:
1334 for i in fromconfig:
1332 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1335 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1333
1336
1334 if onlydefault:
1337 if onlydefault:
1335 ui.status(
1338 ui.status(
1336 _(
1339 _(
1337 b'repository lacks features used by the default '
1340 b'repository lacks features used by the default '
1338 b'config options:\n\n'
1341 b'config options:\n\n'
1339 )
1342 )
1340 )
1343 )
1341 for i in onlydefault:
1344 for i in onlydefault:
1342 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1345 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1343
1346
1344 ui.status(b'\n')
1347 ui.status(b'\n')
1345 else:
1348 else:
1346 ui.status(
1349 ui.status(
1347 _(
1350 _(
1348 b'(no feature deficiencies found in existing '
1351 b'(no feature deficiencies found in existing '
1349 b'repository)\n'
1352 b'repository)\n'
1350 )
1353 )
1351 )
1354 )
1352
1355
1353 ui.status(
1356 ui.status(
1354 _(
1357 _(
1355 b'performing an upgrade with "--run" will make the following '
1358 b'performing an upgrade with "--run" will make the following '
1356 b'changes:\n\n'
1359 b'changes:\n\n'
1357 )
1360 )
1358 )
1361 )
1359
1362
1360 printrequirements()
1363 printrequirements()
1361 printoptimisations()
1364 printoptimisations()
1362 printupgradeactions()
1365 printupgradeactions()
1363
1366
1364 unusedoptimize = [i for i in alloptimizations if i not in actions]
1367 unusedoptimize = [i for i in alloptimizations if i not in actions]
1365
1368
1366 if unusedoptimize:
1369 if unusedoptimize:
1367 ui.status(
1370 ui.status(
1368 _(
1371 _(
1369 b'additional optimizations are available by specifying '
1372 b'additional optimizations are available by specifying '
1370 b'"--optimize <name>":\n\n'
1373 b'"--optimize <name>":\n\n'
1371 )
1374 )
1372 )
1375 )
1373 for i in unusedoptimize:
1376 for i in unusedoptimize:
1374 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1377 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1375 return
1378 return
1376
1379
1377 # Else we're in the run=true case.
1380 # Else we're in the run=true case.
1378 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1381 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1379 printrequirements()
1382 printrequirements()
1380 printoptimisations()
1383 printoptimisations()
1381 printupgradeactions()
1384 printupgradeactions()
1382
1385
1383 upgradeactions = [a.name for a in actions]
1386 upgradeactions = [a.name for a in actions]
1384
1387
1385 ui.status(_(b'beginning upgrade...\n'))
1388 ui.status(_(b'beginning upgrade...\n'))
1386 with repo.wlock(), repo.lock():
1389 with repo.wlock(), repo.lock():
1387 ui.status(_(b'repository locked and read-only\n'))
1390 ui.status(_(b'repository locked and read-only\n'))
1388 # Our strategy for upgrading the repository is to create a new,
1391 # Our strategy for upgrading the repository is to create a new,
1389 # temporary repository, write data to it, then do a swap of the
1392 # temporary repository, write data to it, then do a swap of the
1390 # data. There are less heavyweight ways to do this, but it is easier
1393 # data. There are less heavyweight ways to do this, but it is easier
1391 # to create a new repo object than to instantiate all the components
1394 # to create a new repo object than to instantiate all the components
1392 # (like the store) separately.
1395 # (like the store) separately.
1393 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1396 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1394 backuppath = None
1397 backuppath = None
1395 try:
1398 try:
1396 ui.status(
1399 ui.status(
1397 _(
1400 _(
1398 b'creating temporary repository to stage migrated '
1401 b'creating temporary repository to stage migrated '
1399 b'data: %s\n'
1402 b'data: %s\n'
1400 )
1403 )
1401 % tmppath
1404 % tmppath
1402 )
1405 )
1403
1406
1404 # clone ui without using ui.copy because repo.ui is protected
1407 # clone ui without using ui.copy because repo.ui is protected
1405 repoui = repo.ui.__class__(repo.ui)
1408 repoui = repo.ui.__class__(repo.ui)
1406 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1409 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1407
1410
1408 with dstrepo.wlock(), dstrepo.lock():
1411 with dstrepo.wlock(), dstrepo.lock():
1409 backuppath = _upgraderepo(
1412 backuppath = _upgraderepo(
1410 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1413 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1411 )
1414 )
1412 if not (backup or backuppath is None):
1415 if not (backup or backuppath is None):
1413 ui.status(
1416 ui.status(
1414 _(b'removing old repository content%s\n') % backuppath
1417 _(b'removing old repository content%s\n') % backuppath
1415 )
1418 )
1416 repo.vfs.rmtree(backuppath, forcibly=True)
1419 repo.vfs.rmtree(backuppath, forcibly=True)
1417 backuppath = None
1420 backuppath = None
1418
1421
1419 finally:
1422 finally:
1420 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1423 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1421 repo.vfs.rmtree(tmppath, forcibly=True)
1424 repo.vfs.rmtree(tmppath, forcibly=True)
1422
1425
1423 if backuppath and not ui.quiet:
1426 if backuppath and not ui.quiet:
1424 ui.warn(
1427 ui.warn(
1425 _(b'copy of old repository backed up at %s\n') % backuppath
1428 _(b'copy of old repository backed up at %s\n') % backuppath
1426 )
1429 )
1427 ui.warn(
1430 ui.warn(
1428 _(
1431 _(
1429 b'the old repository will not be deleted; remove '
1432 b'the old repository will not be deleted; remove '
1430 b'it to free up disk space once the upgraded '
1433 b'it to free up disk space once the upgraded '
1431 b'repository is verified\n'
1434 b'repository is verified\n'
1432 )
1435 )
1433 )
1436 )
General Comments 0
You need to be logged in to leave comments. Login now