##// END OF EJS Templates
engine: prevent multiple checking of re-delta-multibase...
Pulkit Goyal -
r46835:3f92a9bb default
parent child Browse files
Show More
@@ -1,899 +1,904 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 requirements,
14 requirements,
15 revlog,
15 revlog,
16 util,
16 util,
17 )
17 )
18
18
19 from ..utils import compression
19 from ..utils import compression
20
20
21 # list of requirements that request a clone of all revlog if added/removed
21 # list of requirements that request a clone of all revlog if added/removed
22 RECLONES_REQUIREMENTS = {
22 RECLONES_REQUIREMENTS = {
23 b'generaldelta',
23 b'generaldelta',
24 requirements.SPARSEREVLOG_REQUIREMENT,
24 requirements.SPARSEREVLOG_REQUIREMENT,
25 }
25 }
26
26
27
27
28 def preservedrequirements(repo):
28 def preservedrequirements(repo):
29 return set()
29 return set()
30
30
31
31
32 FORMAT_VARIANT = b'deficiency'
32 FORMAT_VARIANT = b'deficiency'
33 OPTIMISATION = b'optimization'
33 OPTIMISATION = b'optimization'
34
34
35
35
36 class improvement(object):
36 class improvement(object):
37 """Represents an improvement that can be made as part of an upgrade.
37 """Represents an improvement that can be made as part of an upgrade.
38
38
39 The following attributes are defined on each instance:
39 The following attributes are defined on each instance:
40
40
41 name
41 name
42 Machine-readable string uniquely identifying this improvement. It
42 Machine-readable string uniquely identifying this improvement. It
43 will be mapped to an action later in the upgrade process.
43 will be mapped to an action later in the upgrade process.
44
44
45 type
45 type
46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
47 A format variant is where we change the storage format. Not all format
47 A format variant is where we change the storage format. Not all format
48 variant changes are an obvious problem.
48 variant changes are an obvious problem.
49 An optimization is an action (sometimes optional) that
49 An optimization is an action (sometimes optional) that
50 can be taken to further improve the state of the repository.
50 can be taken to further improve the state of the repository.
51
51
52 description
52 description
53 Message intended for humans explaining the improvement in more detail,
53 Message intended for humans explaining the improvement in more detail,
54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
55 worded in the present tense. For ``OPTIMISATION`` types, should be
55 worded in the present tense. For ``OPTIMISATION`` types, should be
56 worded in the future tense.
56 worded in the future tense.
57
57
58 upgrademessage
58 upgrademessage
59 Message intended for humans explaining what an upgrade addressing this
59 Message intended for humans explaining what an upgrade addressing this
60 issue will do. Should be worded in the future tense.
60 issue will do. Should be worded in the future tense.
61
61
62 postupgrademessage
62 postupgrademessage
63 Message intended for humans which will be shown post an upgrade
63 Message intended for humans which will be shown post an upgrade
64 operation when the improvement will be added
64 operation when the improvement will be added
65
65
66 postdowngrademessage
66 postdowngrademessage
67 Message intended for humans which will be shown post an upgrade
67 Message intended for humans which will be shown post an upgrade
68 operation in which this improvement was removed
68 operation in which this improvement was removed
69 """
69 """
70
70
71 def __init__(self, name, type, description, upgrademessage):
71 def __init__(self, name, type, description, upgrademessage):
72 self.name = name
72 self.name = name
73 self.type = type
73 self.type = type
74 self.description = description
74 self.description = description
75 self.upgrademessage = upgrademessage
75 self.upgrademessage = upgrademessage
76 self.postupgrademessage = None
76 self.postupgrademessage = None
77 self.postdowngrademessage = None
77 self.postdowngrademessage = None
78
78
79 def __eq__(self, other):
79 def __eq__(self, other):
80 if not isinstance(other, improvement):
80 if not isinstance(other, improvement):
81 # This is what python tell use to do
81 # This is what python tell use to do
82 return NotImplemented
82 return NotImplemented
83 return self.name == other.name
83 return self.name == other.name
84
84
85 def __ne__(self, other):
85 def __ne__(self, other):
86 return not (self == other)
86 return not (self == other)
87
87
88 def __hash__(self):
88 def __hash__(self):
89 return hash(self.name)
89 return hash(self.name)
90
90
91
91
92 allformatvariant = []
92 allformatvariant = []
93
93
94
94
95 def registerformatvariant(cls):
95 def registerformatvariant(cls):
96 allformatvariant.append(cls)
96 allformatvariant.append(cls)
97 return cls
97 return cls
98
98
99
99
100 class formatvariant(improvement):
100 class formatvariant(improvement):
101 """an improvement subclass dedicated to repository format"""
101 """an improvement subclass dedicated to repository format"""
102
102
103 type = FORMAT_VARIANT
103 type = FORMAT_VARIANT
104 ### The following attributes should be defined for each class:
104 ### The following attributes should be defined for each class:
105
105
106 # machine-readable string uniquely identifying this improvement. it will be
106 # machine-readable string uniquely identifying this improvement. it will be
107 # mapped to an action later in the upgrade process.
107 # mapped to an action later in the upgrade process.
108 name = None
108 name = None
109
109
110 # message intended for humans explaining the improvement in more detail,
110 # message intended for humans explaining the improvement in more detail,
111 # including the implications of it ``FORMAT_VARIANT`` types, should be
111 # including the implications of it ``FORMAT_VARIANT`` types, should be
112 # worded
112 # worded
113 # in the present tense.
113 # in the present tense.
114 description = None
114 description = None
115
115
116 # message intended for humans explaining what an upgrade addressing this
116 # message intended for humans explaining what an upgrade addressing this
117 # issue will do. should be worded in the future tense.
117 # issue will do. should be worded in the future tense.
118 upgrademessage = None
118 upgrademessage = None
119
119
120 # value of current Mercurial default for new repository
120 # value of current Mercurial default for new repository
121 default = None
121 default = None
122
122
123 # Message intended for humans which will be shown post an upgrade
123 # Message intended for humans which will be shown post an upgrade
124 # operation when the improvement will be added
124 # operation when the improvement will be added
125 postupgrademessage = None
125 postupgrademessage = None
126
126
127 # Message intended for humans which will be shown post an upgrade
127 # Message intended for humans which will be shown post an upgrade
128 # operation in which this improvement was removed
128 # operation in which this improvement was removed
129 postdowngrademessage = None
129 postdowngrademessage = None
130
130
131 def __init__(self):
131 def __init__(self):
132 raise NotImplementedError()
132 raise NotImplementedError()
133
133
134 @staticmethod
134 @staticmethod
135 def fromrepo(repo):
135 def fromrepo(repo):
136 """current value of the variant in the repository"""
136 """current value of the variant in the repository"""
137 raise NotImplementedError()
137 raise NotImplementedError()
138
138
139 @staticmethod
139 @staticmethod
140 def fromconfig(repo):
140 def fromconfig(repo):
141 """current value of the variant in the configuration"""
141 """current value of the variant in the configuration"""
142 raise NotImplementedError()
142 raise NotImplementedError()
143
143
144
144
145 class requirementformatvariant(formatvariant):
145 class requirementformatvariant(formatvariant):
146 """formatvariant based on a 'requirement' name.
146 """formatvariant based on a 'requirement' name.
147
147
148 Many format variant are controlled by a 'requirement'. We define a small
148 Many format variant are controlled by a 'requirement'. We define a small
149 subclass to factor the code.
149 subclass to factor the code.
150 """
150 """
151
151
152 # the requirement that control this format variant
152 # the requirement that control this format variant
153 _requirement = None
153 _requirement = None
154
154
155 @staticmethod
155 @staticmethod
156 def _newreporequirements(ui):
156 def _newreporequirements(ui):
157 return localrepo.newreporequirements(
157 return localrepo.newreporequirements(
158 ui, localrepo.defaultcreateopts(ui)
158 ui, localrepo.defaultcreateopts(ui)
159 )
159 )
160
160
161 @classmethod
161 @classmethod
162 def fromrepo(cls, repo):
162 def fromrepo(cls, repo):
163 assert cls._requirement is not None
163 assert cls._requirement is not None
164 return cls._requirement in repo.requirements
164 return cls._requirement in repo.requirements
165
165
166 @classmethod
166 @classmethod
167 def fromconfig(cls, repo):
167 def fromconfig(cls, repo):
168 assert cls._requirement is not None
168 assert cls._requirement is not None
169 return cls._requirement in cls._newreporequirements(repo.ui)
169 return cls._requirement in cls._newreporequirements(repo.ui)
170
170
171
171
172 @registerformatvariant
172 @registerformatvariant
173 class fncache(requirementformatvariant):
173 class fncache(requirementformatvariant):
174 name = b'fncache'
174 name = b'fncache'
175
175
176 _requirement = b'fncache'
176 _requirement = b'fncache'
177
177
178 default = True
178 default = True
179
179
180 description = _(
180 description = _(
181 b'long and reserved filenames may not work correctly; '
181 b'long and reserved filenames may not work correctly; '
182 b'repository performance is sub-optimal'
182 b'repository performance is sub-optimal'
183 )
183 )
184
184
185 upgrademessage = _(
185 upgrademessage = _(
186 b'repository will be more resilient to storing '
186 b'repository will be more resilient to storing '
187 b'certain paths and performance of certain '
187 b'certain paths and performance of certain '
188 b'operations should be improved'
188 b'operations should be improved'
189 )
189 )
190
190
191
191
192 @registerformatvariant
192 @registerformatvariant
193 class dotencode(requirementformatvariant):
193 class dotencode(requirementformatvariant):
194 name = b'dotencode'
194 name = b'dotencode'
195
195
196 _requirement = b'dotencode'
196 _requirement = b'dotencode'
197
197
198 default = True
198 default = True
199
199
200 description = _(
200 description = _(
201 b'storage of filenames beginning with a period or '
201 b'storage of filenames beginning with a period or '
202 b'space may not work correctly'
202 b'space may not work correctly'
203 )
203 )
204
204
205 upgrademessage = _(
205 upgrademessage = _(
206 b'repository will be better able to store files '
206 b'repository will be better able to store files '
207 b'beginning with a space or period'
207 b'beginning with a space or period'
208 )
208 )
209
209
210
210
211 @registerformatvariant
211 @registerformatvariant
212 class generaldelta(requirementformatvariant):
212 class generaldelta(requirementformatvariant):
213 name = b'generaldelta'
213 name = b'generaldelta'
214
214
215 _requirement = b'generaldelta'
215 _requirement = b'generaldelta'
216
216
217 default = True
217 default = True
218
218
219 description = _(
219 description = _(
220 b'deltas within internal storage are unable to '
220 b'deltas within internal storage are unable to '
221 b'choose optimal revisions; repository is larger and '
221 b'choose optimal revisions; repository is larger and '
222 b'slower than it could be; interaction with other '
222 b'slower than it could be; interaction with other '
223 b'repositories may require extra network and CPU '
223 b'repositories may require extra network and CPU '
224 b'resources, making "hg push" and "hg pull" slower'
224 b'resources, making "hg push" and "hg pull" slower'
225 )
225 )
226
226
227 upgrademessage = _(
227 upgrademessage = _(
228 b'repository storage will be able to create '
228 b'repository storage will be able to create '
229 b'optimal deltas; new repository data will be '
229 b'optimal deltas; new repository data will be '
230 b'smaller and read times should decrease; '
230 b'smaller and read times should decrease; '
231 b'interacting with other repositories using this '
231 b'interacting with other repositories using this '
232 b'storage model should require less network and '
232 b'storage model should require less network and '
233 b'CPU resources, making "hg push" and "hg pull" '
233 b'CPU resources, making "hg push" and "hg pull" '
234 b'faster'
234 b'faster'
235 )
235 )
236
236
237
237
238 @registerformatvariant
238 @registerformatvariant
239 class sharesafe(requirementformatvariant):
239 class sharesafe(requirementformatvariant):
240 name = b'exp-sharesafe'
240 name = b'exp-sharesafe'
241 _requirement = requirements.SHARESAFE_REQUIREMENT
241 _requirement = requirements.SHARESAFE_REQUIREMENT
242
242
243 default = False
243 default = False
244
244
245 description = _(
245 description = _(
246 b'old shared repositories do not share source repository '
246 b'old shared repositories do not share source repository '
247 b'requirements and config. This leads to various problems '
247 b'requirements and config. This leads to various problems '
248 b'when the source repository format is upgraded or some new '
248 b'when the source repository format is upgraded or some new '
249 b'extensions are enabled.'
249 b'extensions are enabled.'
250 )
250 )
251
251
252 upgrademessage = _(
252 upgrademessage = _(
253 b'Upgrades a repository to share-safe format so that future '
253 b'Upgrades a repository to share-safe format so that future '
254 b'shares of this repository share its requirements and configs.'
254 b'shares of this repository share its requirements and configs.'
255 )
255 )
256
256
257 postdowngrademessage = _(
257 postdowngrademessage = _(
258 b'repository downgraded to not use share safe mode, '
258 b'repository downgraded to not use share safe mode, '
259 b'existing shares will not work and needs to'
259 b'existing shares will not work and needs to'
260 b' be reshared.'
260 b' be reshared.'
261 )
261 )
262
262
263 postupgrademessage = _(
263 postupgrademessage = _(
264 b'repository upgraded to share safe mode, existing'
264 b'repository upgraded to share safe mode, existing'
265 b' shares will still work in old non-safe mode. '
265 b' shares will still work in old non-safe mode. '
266 b'Re-share existing shares to use them in safe mode'
266 b'Re-share existing shares to use them in safe mode'
267 b' New shares will be created in safe mode.'
267 b' New shares will be created in safe mode.'
268 )
268 )
269
269
270
270
271 @registerformatvariant
271 @registerformatvariant
272 class sparserevlog(requirementformatvariant):
272 class sparserevlog(requirementformatvariant):
273 name = b'sparserevlog'
273 name = b'sparserevlog'
274
274
275 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
275 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
276
276
277 default = True
277 default = True
278
278
279 description = _(
279 description = _(
280 b'in order to limit disk reading and memory usage on older '
280 b'in order to limit disk reading and memory usage on older '
281 b'version, the span of a delta chain from its root to its '
281 b'version, the span of a delta chain from its root to its '
282 b'end is limited, whatever the relevant data in this span. '
282 b'end is limited, whatever the relevant data in this span. '
283 b'This can severly limit Mercurial ability to build good '
283 b'This can severly limit Mercurial ability to build good '
284 b'chain of delta resulting is much more storage space being '
284 b'chain of delta resulting is much more storage space being '
285 b'taken and limit reusability of on disk delta during '
285 b'taken and limit reusability of on disk delta during '
286 b'exchange.'
286 b'exchange.'
287 )
287 )
288
288
289 upgrademessage = _(
289 upgrademessage = _(
290 b'Revlog supports delta chain with more unused data '
290 b'Revlog supports delta chain with more unused data '
291 b'between payload. These gaps will be skipped at read '
291 b'between payload. These gaps will be skipped at read '
292 b'time. This allows for better delta chains, making a '
292 b'time. This allows for better delta chains, making a '
293 b'better compression and faster exchange with server.'
293 b'better compression and faster exchange with server.'
294 )
294 )
295
295
296
296
297 @registerformatvariant
297 @registerformatvariant
298 class sidedata(requirementformatvariant):
298 class sidedata(requirementformatvariant):
299 name = b'sidedata'
299 name = b'sidedata'
300
300
301 _requirement = requirements.SIDEDATA_REQUIREMENT
301 _requirement = requirements.SIDEDATA_REQUIREMENT
302
302
303 default = False
303 default = False
304
304
305 description = _(
305 description = _(
306 b'Allows storage of extra data alongside a revision, '
306 b'Allows storage of extra data alongside a revision, '
307 b'unlocking various caching options.'
307 b'unlocking various caching options.'
308 )
308 )
309
309
310 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
310 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
311
311
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class persistentnodemap(requirementformatvariant):
314 class persistentnodemap(requirementformatvariant):
315 name = b'persistent-nodemap'
315 name = b'persistent-nodemap'
316
316
317 _requirement = requirements.NODEMAP_REQUIREMENT
317 _requirement = requirements.NODEMAP_REQUIREMENT
318
318
319 default = False
319 default = False
320
320
321 description = _(
321 description = _(
322 b'persist the node -> rev mapping on disk to speedup lookup'
322 b'persist the node -> rev mapping on disk to speedup lookup'
323 )
323 )
324
324
325 upgrademessage = _(b'Speedup revision lookup by node id.')
325 upgrademessage = _(b'Speedup revision lookup by node id.')
326
326
327
327
328 @registerformatvariant
328 @registerformatvariant
329 class copiessdc(requirementformatvariant):
329 class copiessdc(requirementformatvariant):
330 name = b'copies-sdc'
330 name = b'copies-sdc'
331
331
332 _requirement = requirements.COPIESSDC_REQUIREMENT
332 _requirement = requirements.COPIESSDC_REQUIREMENT
333
333
334 default = False
334 default = False
335
335
336 description = _(b'Stores copies information alongside changesets.')
336 description = _(b'Stores copies information alongside changesets.')
337
337
338 upgrademessage = _(
338 upgrademessage = _(
339 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
339 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
340 )
340 )
341
341
342
342
343 @registerformatvariant
343 @registerformatvariant
344 class removecldeltachain(formatvariant):
344 class removecldeltachain(formatvariant):
345 name = b'plain-cl-delta'
345 name = b'plain-cl-delta'
346
346
347 default = True
347 default = True
348
348
349 description = _(
349 description = _(
350 b'changelog storage is using deltas instead of '
350 b'changelog storage is using deltas instead of '
351 b'raw entries; changelog reading and any '
351 b'raw entries; changelog reading and any '
352 b'operation relying on changelog data are slower '
352 b'operation relying on changelog data are slower '
353 b'than they could be'
353 b'than they could be'
354 )
354 )
355
355
356 upgrademessage = _(
356 upgrademessage = _(
357 b'changelog storage will be reformated to '
357 b'changelog storage will be reformated to '
358 b'store raw entries; changelog reading will be '
358 b'store raw entries; changelog reading will be '
359 b'faster; changelog size may be reduced'
359 b'faster; changelog size may be reduced'
360 )
360 )
361
361
362 @staticmethod
362 @staticmethod
363 def fromrepo(repo):
363 def fromrepo(repo):
364 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
364 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
365 # changelogs with deltas.
365 # changelogs with deltas.
366 cl = repo.changelog
366 cl = repo.changelog
367 chainbase = cl.chainbase
367 chainbase = cl.chainbase
368 return all(rev == chainbase(rev) for rev in cl)
368 return all(rev == chainbase(rev) for rev in cl)
369
369
370 @staticmethod
370 @staticmethod
371 def fromconfig(repo):
371 def fromconfig(repo):
372 return True
372 return True
373
373
374
374
375 @registerformatvariant
375 @registerformatvariant
376 class compressionengine(formatvariant):
376 class compressionengine(formatvariant):
377 name = b'compression'
377 name = b'compression'
378 default = b'zlib'
378 default = b'zlib'
379
379
380 description = _(
380 description = _(
381 b'Compresion algorithm used to compress data. '
381 b'Compresion algorithm used to compress data. '
382 b'Some engine are faster than other'
382 b'Some engine are faster than other'
383 )
383 )
384
384
385 upgrademessage = _(
385 upgrademessage = _(
386 b'revlog content will be recompressed with the new algorithm.'
386 b'revlog content will be recompressed with the new algorithm.'
387 )
387 )
388
388
389 @classmethod
389 @classmethod
390 def fromrepo(cls, repo):
390 def fromrepo(cls, repo):
391 # we allow multiple compression engine requirement to co-exist because
391 # we allow multiple compression engine requirement to co-exist because
392 # strickly speaking, revlog seems to support mixed compression style.
392 # strickly speaking, revlog seems to support mixed compression style.
393 #
393 #
394 # The compression used for new entries will be "the last one"
394 # The compression used for new entries will be "the last one"
395 compression = b'zlib'
395 compression = b'zlib'
396 for req in repo.requirements:
396 for req in repo.requirements:
397 prefix = req.startswith
397 prefix = req.startswith
398 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
398 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
399 compression = req.split(b'-', 2)[2]
399 compression = req.split(b'-', 2)[2]
400 return compression
400 return compression
401
401
402 @classmethod
402 @classmethod
403 def fromconfig(cls, repo):
403 def fromconfig(cls, repo):
404 compengines = repo.ui.configlist(b'format', b'revlog-compression')
404 compengines = repo.ui.configlist(b'format', b'revlog-compression')
405 # return the first valid value as the selection code would do
405 # return the first valid value as the selection code would do
406 for comp in compengines:
406 for comp in compengines:
407 if comp in util.compengines:
407 if comp in util.compengines:
408 return comp
408 return comp
409
409
410 # no valide compression found lets display it all for clarity
410 # no valide compression found lets display it all for clarity
411 return b','.join(compengines)
411 return b','.join(compengines)
412
412
413
413
414 @registerformatvariant
414 @registerformatvariant
415 class compressionlevel(formatvariant):
415 class compressionlevel(formatvariant):
416 name = b'compression-level'
416 name = b'compression-level'
417 default = b'default'
417 default = b'default'
418
418
419 description = _(b'compression level')
419 description = _(b'compression level')
420
420
421 upgrademessage = _(b'revlog content will be recompressed')
421 upgrademessage = _(b'revlog content will be recompressed')
422
422
423 @classmethod
423 @classmethod
424 def fromrepo(cls, repo):
424 def fromrepo(cls, repo):
425 comp = compressionengine.fromrepo(repo)
425 comp = compressionengine.fromrepo(repo)
426 level = None
426 level = None
427 if comp == b'zlib':
427 if comp == b'zlib':
428 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
428 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
429 elif comp == b'zstd':
429 elif comp == b'zstd':
430 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
430 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
431 if level is None:
431 if level is None:
432 return b'default'
432 return b'default'
433 return bytes(level)
433 return bytes(level)
434
434
435 @classmethod
435 @classmethod
436 def fromconfig(cls, repo):
436 def fromconfig(cls, repo):
437 comp = compressionengine.fromconfig(repo)
437 comp = compressionengine.fromconfig(repo)
438 level = None
438 level = None
439 if comp == b'zlib':
439 if comp == b'zlib':
440 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
440 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
441 elif comp == b'zstd':
441 elif comp == b'zstd':
442 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
442 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
443 if level is None:
443 if level is None:
444 return b'default'
444 return b'default'
445 return bytes(level)
445 return bytes(level)
446
446
447
447
448 def find_format_upgrades(repo):
448 def find_format_upgrades(repo):
449 """returns a list of format upgrades which can be perform on the repo"""
449 """returns a list of format upgrades which can be perform on the repo"""
450 upgrades = []
450 upgrades = []
451
451
452 # We could detect lack of revlogv1 and store here, but they were added
452 # We could detect lack of revlogv1 and store here, but they were added
453 # in 0.9.2 and we don't support upgrading repos without these
453 # in 0.9.2 and we don't support upgrading repos without these
454 # requirements, so let's not bother.
454 # requirements, so let's not bother.
455
455
456 for fv in allformatvariant:
456 for fv in allformatvariant:
457 if not fv.fromrepo(repo):
457 if not fv.fromrepo(repo):
458 upgrades.append(fv)
458 upgrades.append(fv)
459
459
460 return upgrades
460 return upgrades
461
461
462
462
463 def find_format_downgrades(repo):
463 def find_format_downgrades(repo):
464 """returns a list of format downgrades which will be performed on the repo
464 """returns a list of format downgrades which will be performed on the repo
465 because of disabled config option for them"""
465 because of disabled config option for them"""
466
466
467 downgrades = []
467 downgrades = []
468
468
469 for fv in allformatvariant:
469 for fv in allformatvariant:
470 # format variant exist in repo but does not exist in new repository
470 # format variant exist in repo but does not exist in new repository
471 # config
471 # config
472 if fv.fromrepo(repo) and not fv.fromconfig(repo):
472 if fv.fromrepo(repo) and not fv.fromconfig(repo):
473 downgrades.append(fv)
473 downgrades.append(fv)
474
474
475 return downgrades
475 return downgrades
476
476
477
477
478 ALL_OPTIMISATIONS = []
478 ALL_OPTIMISATIONS = []
479
479
480
480
481 def register_optimization(obj):
481 def register_optimization(obj):
482 ALL_OPTIMISATIONS.append(obj)
482 ALL_OPTIMISATIONS.append(obj)
483 return obj
483 return obj
484
484
485
485
486 register_optimization(
486 register_optimization(
487 improvement(
487 improvement(
488 name=b're-delta-parent',
488 name=b're-delta-parent',
489 type=OPTIMISATION,
489 type=OPTIMISATION,
490 description=_(
490 description=_(
491 b'deltas within internal storage will be recalculated to '
491 b'deltas within internal storage will be recalculated to '
492 b'choose an optimal base revision where this was not '
492 b'choose an optimal base revision where this was not '
493 b'already done; the size of the repository may shrink and '
493 b'already done; the size of the repository may shrink and '
494 b'various operations may become faster; the first time '
494 b'various operations may become faster; the first time '
495 b'this optimization is performed could slow down upgrade '
495 b'this optimization is performed could slow down upgrade '
496 b'execution considerably; subsequent invocations should '
496 b'execution considerably; subsequent invocations should '
497 b'not run noticeably slower'
497 b'not run noticeably slower'
498 ),
498 ),
499 upgrademessage=_(
499 upgrademessage=_(
500 b'deltas within internal storage will choose a new '
500 b'deltas within internal storage will choose a new '
501 b'base revision if needed'
501 b'base revision if needed'
502 ),
502 ),
503 )
503 )
504 )
504 )
505
505
506 register_optimization(
506 register_optimization(
507 improvement(
507 improvement(
508 name=b're-delta-multibase',
508 name=b're-delta-multibase',
509 type=OPTIMISATION,
509 type=OPTIMISATION,
510 description=_(
510 description=_(
511 b'deltas within internal storage will be recalculated '
511 b'deltas within internal storage will be recalculated '
512 b'against multiple base revision and the smallest '
512 b'against multiple base revision and the smallest '
513 b'difference will be used; the size of the repository may '
513 b'difference will be used; the size of the repository may '
514 b'shrink significantly when there are many merges; this '
514 b'shrink significantly when there are many merges; this '
515 b'optimization will slow down execution in proportion to '
515 b'optimization will slow down execution in proportion to '
516 b'the number of merges in the repository and the amount '
516 b'the number of merges in the repository and the amount '
517 b'of files in the repository; this slow down should not '
517 b'of files in the repository; this slow down should not '
518 b'be significant unless there are tens of thousands of '
518 b'be significant unless there are tens of thousands of '
519 b'files and thousands of merges'
519 b'files and thousands of merges'
520 ),
520 ),
521 upgrademessage=_(
521 upgrademessage=_(
522 b'deltas within internal storage will choose an '
522 b'deltas within internal storage will choose an '
523 b'optimal delta by computing deltas against multiple '
523 b'optimal delta by computing deltas against multiple '
524 b'parents; may slow down execution time '
524 b'parents; may slow down execution time '
525 b'significantly'
525 b'significantly'
526 ),
526 ),
527 )
527 )
528 )
528 )
529
529
530 register_optimization(
530 register_optimization(
531 improvement(
531 improvement(
532 name=b're-delta-all',
532 name=b're-delta-all',
533 type=OPTIMISATION,
533 type=OPTIMISATION,
534 description=_(
534 description=_(
535 b'deltas within internal storage will always be '
535 b'deltas within internal storage will always be '
536 b'recalculated without reusing prior deltas; this will '
536 b'recalculated without reusing prior deltas; this will '
537 b'likely make execution run several times slower; this '
537 b'likely make execution run several times slower; this '
538 b'optimization is typically not needed'
538 b'optimization is typically not needed'
539 ),
539 ),
540 upgrademessage=_(
540 upgrademessage=_(
541 b'deltas within internal storage will be fully '
541 b'deltas within internal storage will be fully '
542 b'recomputed; this will likely drastically slow down '
542 b'recomputed; this will likely drastically slow down '
543 b'execution time'
543 b'execution time'
544 ),
544 ),
545 )
545 )
546 )
546 )
547
547
548 register_optimization(
548 register_optimization(
549 improvement(
549 improvement(
550 name=b're-delta-fulladd',
550 name=b're-delta-fulladd',
551 type=OPTIMISATION,
551 type=OPTIMISATION,
552 description=_(
552 description=_(
553 b'every revision will be re-added as if it was new '
553 b'every revision will be re-added as if it was new '
554 b'content. It will go through the full storage '
554 b'content. It will go through the full storage '
555 b'mechanism giving extensions a chance to process it '
555 b'mechanism giving extensions a chance to process it '
556 b'(eg. lfs). This is similar to "re-delta-all" but even '
556 b'(eg. lfs). This is similar to "re-delta-all" but even '
557 b'slower since more logic is involved.'
557 b'slower since more logic is involved.'
558 ),
558 ),
559 upgrademessage=_(
559 upgrademessage=_(
560 b'each revision will be added as new content to the '
560 b'each revision will be added as new content to the '
561 b'internal storage; this will likely drastically slow '
561 b'internal storage; this will likely drastically slow '
562 b'down execution time, but some extensions might need '
562 b'down execution time, but some extensions might need '
563 b'it'
563 b'it'
564 ),
564 ),
565 )
565 )
566 )
566 )
567
567
568
568
569 def findoptimizations(repo):
569 def findoptimizations(repo):
570 """Determine optimisation that could be used during upgrade"""
570 """Determine optimisation that could be used during upgrade"""
571 # These are unconditionally added. There is logic later that figures out
571 # These are unconditionally added. There is logic later that figures out
572 # which ones to apply.
572 # which ones to apply.
573 return list(ALL_OPTIMISATIONS)
573 return list(ALL_OPTIMISATIONS)
574
574
575
575
576 def determine_upgrade_actions(
576 def determine_upgrade_actions(
577 repo, format_upgrades, optimizations, sourcereqs, destreqs
577 repo, format_upgrades, optimizations, sourcereqs, destreqs
578 ):
578 ):
579 """Determine upgrade actions that will be performed.
579 """Determine upgrade actions that will be performed.
580
580
581 Given a list of improvements as returned by ``find_format_upgrades`` and
581 Given a list of improvements as returned by ``find_format_upgrades`` and
582 ``findoptimizations``, determine the list of upgrade actions that
582 ``findoptimizations``, determine the list of upgrade actions that
583 will be performed.
583 will be performed.
584
584
585 The role of this function is to filter improvements if needed, apply
585 The role of this function is to filter improvements if needed, apply
586 recommended optimizations from the improvements list that make sense,
586 recommended optimizations from the improvements list that make sense,
587 etc.
587 etc.
588
588
589 Returns a list of action names.
589 Returns a list of action names.
590 """
590 """
591 newactions = []
591 newactions = []
592
592
593 for d in format_upgrades:
593 for d in format_upgrades:
594 name = d._requirement
594 name = d._requirement
595
595
596 # If the action is a requirement that doesn't show up in the
596 # If the action is a requirement that doesn't show up in the
597 # destination requirements, prune the action.
597 # destination requirements, prune the action.
598 if name is not None and name not in destreqs:
598 if name is not None and name not in destreqs:
599 continue
599 continue
600
600
601 newactions.append(d)
601 newactions.append(d)
602
602
603 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
603 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
604
604
605 # FUTURE consider adding some optimizations here for certain transitions.
605 # FUTURE consider adding some optimizations here for certain transitions.
606 # e.g. adding generaldelta could schedule parent redeltas.
606 # e.g. adding generaldelta could schedule parent redeltas.
607
607
608 return newactions
608 return newactions
609
609
610
610
611 class UpgradeOperation(object):
611 class UpgradeOperation(object):
612 """represent the work to be done during an upgrade"""
612 """represent the work to be done during an upgrade"""
613
613
614 def __init__(
614 def __init__(
615 self,
615 self,
616 ui,
616 ui,
617 new_requirements,
617 new_requirements,
618 current_requirements,
618 current_requirements,
619 upgrade_actions,
619 upgrade_actions,
620 removed_actions,
620 removed_actions,
621 revlogs_to_process,
621 revlogs_to_process,
622 ):
622 ):
623 self.ui = ui
623 self.ui = ui
624 self.new_requirements = new_requirements
624 self.new_requirements = new_requirements
625 self.current_requirements = current_requirements
625 self.current_requirements = current_requirements
626 # list of upgrade actions the operation will perform
626 # list of upgrade actions the operation will perform
627 self.upgrade_actions = upgrade_actions
627 self.upgrade_actions = upgrade_actions
628 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
628 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
629 self.removed_actions = removed_actions
629 self.removed_actions = removed_actions
630 self.revlogs_to_process = revlogs_to_process
630 self.revlogs_to_process = revlogs_to_process
631 # requirements which will be added by the operation
631 # requirements which will be added by the operation
632 self._added_requirements = (
632 self._added_requirements = (
633 self.new_requirements - self.current_requirements
633 self.new_requirements - self.current_requirements
634 )
634 )
635 # requirements which will be removed by the operation
635 # requirements which will be removed by the operation
636 self._removed_requirements = (
636 self._removed_requirements = (
637 self.current_requirements - self.new_requirements
637 self.current_requirements - self.new_requirements
638 )
638 )
639 # requirements which will be preserved by the operation
639 # requirements which will be preserved by the operation
640 self._preserved_requirements = (
640 self._preserved_requirements = (
641 self.current_requirements & self.new_requirements
641 self.current_requirements & self.new_requirements
642 )
642 )
643 # optimizations which are not used and it's recommended that they
643 # optimizations which are not used and it's recommended that they
644 # should use them
644 # should use them
645 all_optimizations = findoptimizations(None)
645 all_optimizations = findoptimizations(None)
646 self.unused_optimizations = [
646 self.unused_optimizations = [
647 i for i in all_optimizations if i not in self.upgrade_actions
647 i for i in all_optimizations if i not in self.upgrade_actions
648 ]
648 ]
649
649
650 # delta reuse mode of this upgrade operation
650 # delta reuse mode of this upgrade operation
651 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
651 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
652 if b're-delta-all' in self._upgrade_actions_names:
652 if b're-delta-all' in self._upgrade_actions_names:
653 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
653 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
654 elif b're-delta-parent' in self._upgrade_actions_names:
654 elif b're-delta-parent' in self._upgrade_actions_names:
655 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
655 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
656 elif b're-delta-multibase' in self._upgrade_actions_names:
656 elif b're-delta-multibase' in self._upgrade_actions_names:
657 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
657 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
658 elif b're-delta-fulladd' in self._upgrade_actions_names:
658 elif b're-delta-fulladd' in self._upgrade_actions_names:
659 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
659 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
660
660
661 # should this operation force re-delta of both parents
662 self.force_re_delta_both_parents = (
663 b're-delta-multibase' in self._upgrade_actions_names
664 )
665
661 def _write_labeled(self, l, label):
666 def _write_labeled(self, l, label):
662 """
667 """
663 Utility function to aid writing of a list under one label
668 Utility function to aid writing of a list under one label
664 """
669 """
665 first = True
670 first = True
666 for r in sorted(l):
671 for r in sorted(l):
667 if not first:
672 if not first:
668 self.ui.write(b', ')
673 self.ui.write(b', ')
669 self.ui.write(r, label=label)
674 self.ui.write(r, label=label)
670 first = False
675 first = False
671
676
672 def print_requirements(self):
677 def print_requirements(self):
673 self.ui.write(_(b'requirements\n'))
678 self.ui.write(_(b'requirements\n'))
674 self.ui.write(_(b' preserved: '))
679 self.ui.write(_(b' preserved: '))
675 self._write_labeled(
680 self._write_labeled(
676 self._preserved_requirements, "upgrade-repo.requirement.preserved"
681 self._preserved_requirements, "upgrade-repo.requirement.preserved"
677 )
682 )
678 self.ui.write((b'\n'))
683 self.ui.write((b'\n'))
679 if self._removed_requirements:
684 if self._removed_requirements:
680 self.ui.write(_(b' removed: '))
685 self.ui.write(_(b' removed: '))
681 self._write_labeled(
686 self._write_labeled(
682 self._removed_requirements, "upgrade-repo.requirement.removed"
687 self._removed_requirements, "upgrade-repo.requirement.removed"
683 )
688 )
684 self.ui.write((b'\n'))
689 self.ui.write((b'\n'))
685 if self._added_requirements:
690 if self._added_requirements:
686 self.ui.write(_(b' added: '))
691 self.ui.write(_(b' added: '))
687 self._write_labeled(
692 self._write_labeled(
688 self._added_requirements, "upgrade-repo.requirement.added"
693 self._added_requirements, "upgrade-repo.requirement.added"
689 )
694 )
690 self.ui.write((b'\n'))
695 self.ui.write((b'\n'))
691 self.ui.write(b'\n')
696 self.ui.write(b'\n')
692
697
693 def print_optimisations(self):
698 def print_optimisations(self):
694 optimisations = [
699 optimisations = [
695 a for a in self.upgrade_actions if a.type == OPTIMISATION
700 a for a in self.upgrade_actions if a.type == OPTIMISATION
696 ]
701 ]
697 optimisations.sort(key=lambda a: a.name)
702 optimisations.sort(key=lambda a: a.name)
698 if optimisations:
703 if optimisations:
699 self.ui.write(_(b'optimisations: '))
704 self.ui.write(_(b'optimisations: '))
700 self._write_labeled(
705 self._write_labeled(
701 [a.name for a in optimisations],
706 [a.name for a in optimisations],
702 "upgrade-repo.optimisation.performed",
707 "upgrade-repo.optimisation.performed",
703 )
708 )
704 self.ui.write(b'\n\n')
709 self.ui.write(b'\n\n')
705
710
706 def print_upgrade_actions(self):
711 def print_upgrade_actions(self):
707 for a in self.upgrade_actions:
712 for a in self.upgrade_actions:
708 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
713 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
709
714
710 def print_affected_revlogs(self):
715 def print_affected_revlogs(self):
711 if not self.revlogs_to_process:
716 if not self.revlogs_to_process:
712 self.ui.write((b'no revlogs to process\n'))
717 self.ui.write((b'no revlogs to process\n'))
713 else:
718 else:
714 self.ui.write((b'processed revlogs:\n'))
719 self.ui.write((b'processed revlogs:\n'))
715 for r in sorted(self.revlogs_to_process):
720 for r in sorted(self.revlogs_to_process):
716 self.ui.write((b' - %s\n' % r))
721 self.ui.write((b' - %s\n' % r))
717 self.ui.write((b'\n'))
722 self.ui.write((b'\n'))
718
723
719 def print_unused_optimizations(self):
724 def print_unused_optimizations(self):
720 for i in self.unused_optimizations:
725 for i in self.unused_optimizations:
721 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
726 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
722
727
723 def has_upgrade_action(self, name):
728 def has_upgrade_action(self, name):
724 """ Check whether the upgrade operation will perform this action """
729 """ Check whether the upgrade operation will perform this action """
725 return name in self._upgrade_actions_names
730 return name in self._upgrade_actions_names
726
731
727 def print_post_op_messages(self):
732 def print_post_op_messages(self):
728 """ print post upgrade operation warning messages """
733 """ print post upgrade operation warning messages """
729 for a in self.upgrade_actions:
734 for a in self.upgrade_actions:
730 if a.postupgrademessage is not None:
735 if a.postupgrademessage is not None:
731 self.ui.warn(b'%s\n' % a.postupgrademessage)
736 self.ui.warn(b'%s\n' % a.postupgrademessage)
732 for a in self.removed_actions:
737 for a in self.removed_actions:
733 if a.postdowngrademessage is not None:
738 if a.postdowngrademessage is not None:
734 self.ui.warn(b'%s\n' % a.postdowngrademessage)
739 self.ui.warn(b'%s\n' % a.postdowngrademessage)
735
740
736
741
737 ### Code checking if a repository can got through the upgrade process at all. #
742 ### Code checking if a repository can got through the upgrade process at all. #
738
743
739
744
740 def requiredsourcerequirements(repo):
745 def requiredsourcerequirements(repo):
741 """Obtain requirements required to be present to upgrade a repo.
746 """Obtain requirements required to be present to upgrade a repo.
742
747
743 An upgrade will not be allowed if the repository doesn't have the
748 An upgrade will not be allowed if the repository doesn't have the
744 requirements returned by this function.
749 requirements returned by this function.
745 """
750 """
746 return {
751 return {
747 # Introduced in Mercurial 0.9.2.
752 # Introduced in Mercurial 0.9.2.
748 b'revlogv1',
753 b'revlogv1',
749 # Introduced in Mercurial 0.9.2.
754 # Introduced in Mercurial 0.9.2.
750 b'store',
755 b'store',
751 }
756 }
752
757
753
758
754 def blocksourcerequirements(repo):
759 def blocksourcerequirements(repo):
755 """Obtain requirements that will prevent an upgrade from occurring.
760 """Obtain requirements that will prevent an upgrade from occurring.
756
761
757 An upgrade cannot be performed if the source repository contains a
762 An upgrade cannot be performed if the source repository contains a
758 requirements in the returned set.
763 requirements in the returned set.
759 """
764 """
760 return {
765 return {
761 # The upgrade code does not yet support these experimental features.
766 # The upgrade code does not yet support these experimental features.
762 # This is an artificial limitation.
767 # This is an artificial limitation.
763 requirements.TREEMANIFEST_REQUIREMENT,
768 requirements.TREEMANIFEST_REQUIREMENT,
764 # This was a precursor to generaldelta and was never enabled by default.
769 # This was a precursor to generaldelta and was never enabled by default.
765 # It should (hopefully) not exist in the wild.
770 # It should (hopefully) not exist in the wild.
766 b'parentdelta',
771 b'parentdelta',
767 # Upgrade should operate on the actual store, not the shared link.
772 # Upgrade should operate on the actual store, not the shared link.
768 requirements.SHARED_REQUIREMENT,
773 requirements.SHARED_REQUIREMENT,
769 }
774 }
770
775
771
776
772 def check_source_requirements(repo):
777 def check_source_requirements(repo):
773 """Ensure that no existing requirements prevent the repository upgrade"""
778 """Ensure that no existing requirements prevent the repository upgrade"""
774
779
775 required = requiredsourcerequirements(repo)
780 required = requiredsourcerequirements(repo)
776 missingreqs = required - repo.requirements
781 missingreqs = required - repo.requirements
777 if missingreqs:
782 if missingreqs:
778 msg = _(b'cannot upgrade repository; requirement missing: %s')
783 msg = _(b'cannot upgrade repository; requirement missing: %s')
779 missingreqs = b', '.join(sorted(missingreqs))
784 missingreqs = b', '.join(sorted(missingreqs))
780 raise error.Abort(msg % missingreqs)
785 raise error.Abort(msg % missingreqs)
781
786
782 blocking = blocksourcerequirements(repo)
787 blocking = blocksourcerequirements(repo)
783 blockingreqs = blocking & repo.requirements
788 blockingreqs = blocking & repo.requirements
784 if blockingreqs:
789 if blockingreqs:
785 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
790 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
786 blockingreqs = b', '.join(sorted(blockingreqs))
791 blockingreqs = b', '.join(sorted(blockingreqs))
787 raise error.Abort(m % blockingreqs)
792 raise error.Abort(m % blockingreqs)
788
793
789
794
790 ### Verify the validity of the planned requirement changes ####################
795 ### Verify the validity of the planned requirement changes ####################
791
796
792
797
793 def supportremovedrequirements(repo):
798 def supportremovedrequirements(repo):
794 """Obtain requirements that can be removed during an upgrade.
799 """Obtain requirements that can be removed during an upgrade.
795
800
796 If an upgrade were to create a repository that dropped a requirement,
801 If an upgrade were to create a repository that dropped a requirement,
797 the dropped requirement must appear in the returned set for the upgrade
802 the dropped requirement must appear in the returned set for the upgrade
798 to be allowed.
803 to be allowed.
799 """
804 """
800 supported = {
805 supported = {
801 requirements.SPARSEREVLOG_REQUIREMENT,
806 requirements.SPARSEREVLOG_REQUIREMENT,
802 requirements.SIDEDATA_REQUIREMENT,
807 requirements.SIDEDATA_REQUIREMENT,
803 requirements.COPIESSDC_REQUIREMENT,
808 requirements.COPIESSDC_REQUIREMENT,
804 requirements.NODEMAP_REQUIREMENT,
809 requirements.NODEMAP_REQUIREMENT,
805 requirements.SHARESAFE_REQUIREMENT,
810 requirements.SHARESAFE_REQUIREMENT,
806 }
811 }
807 for name in compression.compengines:
812 for name in compression.compengines:
808 engine = compression.compengines[name]
813 engine = compression.compengines[name]
809 if engine.available() and engine.revlogheader():
814 if engine.available() and engine.revlogheader():
810 supported.add(b'exp-compression-%s' % name)
815 supported.add(b'exp-compression-%s' % name)
811 if engine.name() == b'zstd':
816 if engine.name() == b'zstd':
812 supported.add(b'revlog-compression-zstd')
817 supported.add(b'revlog-compression-zstd')
813 return supported
818 return supported
814
819
815
820
816 def supporteddestrequirements(repo):
821 def supporteddestrequirements(repo):
817 """Obtain requirements that upgrade supports in the destination.
822 """Obtain requirements that upgrade supports in the destination.
818
823
819 If the result of the upgrade would create requirements not in this set,
824 If the result of the upgrade would create requirements not in this set,
820 the upgrade is disallowed.
825 the upgrade is disallowed.
821
826
822 Extensions should monkeypatch this to add their custom requirements.
827 Extensions should monkeypatch this to add their custom requirements.
823 """
828 """
824 supported = {
829 supported = {
825 b'dotencode',
830 b'dotencode',
826 b'fncache',
831 b'fncache',
827 b'generaldelta',
832 b'generaldelta',
828 b'revlogv1',
833 b'revlogv1',
829 b'store',
834 b'store',
830 requirements.SPARSEREVLOG_REQUIREMENT,
835 requirements.SPARSEREVLOG_REQUIREMENT,
831 requirements.SIDEDATA_REQUIREMENT,
836 requirements.SIDEDATA_REQUIREMENT,
832 requirements.COPIESSDC_REQUIREMENT,
837 requirements.COPIESSDC_REQUIREMENT,
833 requirements.NODEMAP_REQUIREMENT,
838 requirements.NODEMAP_REQUIREMENT,
834 requirements.SHARESAFE_REQUIREMENT,
839 requirements.SHARESAFE_REQUIREMENT,
835 }
840 }
836 for name in compression.compengines:
841 for name in compression.compengines:
837 engine = compression.compengines[name]
842 engine = compression.compengines[name]
838 if engine.available() and engine.revlogheader():
843 if engine.available() and engine.revlogheader():
839 supported.add(b'exp-compression-%s' % name)
844 supported.add(b'exp-compression-%s' % name)
840 if engine.name() == b'zstd':
845 if engine.name() == b'zstd':
841 supported.add(b'revlog-compression-zstd')
846 supported.add(b'revlog-compression-zstd')
842 return supported
847 return supported
843
848
844
849
845 def allowednewrequirements(repo):
850 def allowednewrequirements(repo):
846 """Obtain requirements that can be added to a repository during upgrade.
851 """Obtain requirements that can be added to a repository during upgrade.
847
852
848 This is used to disallow proposed requirements from being added when
853 This is used to disallow proposed requirements from being added when
849 they weren't present before.
854 they weren't present before.
850
855
851 We use a list of allowed requirement additions instead of a list of known
856 We use a list of allowed requirement additions instead of a list of known
852 bad additions because the whitelist approach is safer and will prevent
857 bad additions because the whitelist approach is safer and will prevent
853 future, unknown requirements from accidentally being added.
858 future, unknown requirements from accidentally being added.
854 """
859 """
855 supported = {
860 supported = {
856 b'dotencode',
861 b'dotencode',
857 b'fncache',
862 b'fncache',
858 b'generaldelta',
863 b'generaldelta',
859 requirements.SPARSEREVLOG_REQUIREMENT,
864 requirements.SPARSEREVLOG_REQUIREMENT,
860 requirements.SIDEDATA_REQUIREMENT,
865 requirements.SIDEDATA_REQUIREMENT,
861 requirements.COPIESSDC_REQUIREMENT,
866 requirements.COPIESSDC_REQUIREMENT,
862 requirements.NODEMAP_REQUIREMENT,
867 requirements.NODEMAP_REQUIREMENT,
863 requirements.SHARESAFE_REQUIREMENT,
868 requirements.SHARESAFE_REQUIREMENT,
864 }
869 }
865 for name in compression.compengines:
870 for name in compression.compengines:
866 engine = compression.compengines[name]
871 engine = compression.compengines[name]
867 if engine.available() and engine.revlogheader():
872 if engine.available() and engine.revlogheader():
868 supported.add(b'exp-compression-%s' % name)
873 supported.add(b'exp-compression-%s' % name)
869 if engine.name() == b'zstd':
874 if engine.name() == b'zstd':
870 supported.add(b'revlog-compression-zstd')
875 supported.add(b'revlog-compression-zstd')
871 return supported
876 return supported
872
877
873
878
874 def check_requirements_changes(repo, new_reqs):
879 def check_requirements_changes(repo, new_reqs):
875 old_reqs = repo.requirements
880 old_reqs = repo.requirements
876
881
877 support_removal = supportremovedrequirements(repo)
882 support_removal = supportremovedrequirements(repo)
878 no_remove_reqs = old_reqs - new_reqs - support_removal
883 no_remove_reqs = old_reqs - new_reqs - support_removal
879 if no_remove_reqs:
884 if no_remove_reqs:
880 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
885 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
881 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
886 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
882 raise error.Abort(msg % no_remove_reqs)
887 raise error.Abort(msg % no_remove_reqs)
883
888
884 support_addition = allowednewrequirements(repo)
889 support_addition = allowednewrequirements(repo)
885 no_add_reqs = new_reqs - old_reqs - support_addition
890 no_add_reqs = new_reqs - old_reqs - support_addition
886 if no_add_reqs:
891 if no_add_reqs:
887 m = _(b'cannot upgrade repository; do not support adding requirement: ')
892 m = _(b'cannot upgrade repository; do not support adding requirement: ')
888 no_add_reqs = b', '.join(sorted(no_add_reqs))
893 no_add_reqs = b', '.join(sorted(no_add_reqs))
889 raise error.Abort(m + no_add_reqs)
894 raise error.Abort(m + no_add_reqs)
890
895
891 supported = supporteddestrequirements(repo)
896 supported = supporteddestrequirements(repo)
892 unsupported_reqs = new_reqs - supported
897 unsupported_reqs = new_reqs - supported
893 if unsupported_reqs:
898 if unsupported_reqs:
894 msg = _(
899 msg = _(
895 b'cannot upgrade repository; do not support destination '
900 b'cannot upgrade repository; do not support destination '
896 b'requirement: %s'
901 b'requirement: %s'
897 )
902 )
898 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
903 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
899 raise error.Abort(msg % unsupported_reqs)
904 raise error.Abort(msg % unsupported_reqs)
@@ -1,522 +1,520 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..pycompat import getattr
13 from ..pycompat import getattr
14 from .. import (
14 from .. import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 manifest,
18 manifest,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 revlog,
22 revlog,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27
27
28
28
29 def _revlogfrompath(repo, path):
29 def _revlogfrompath(repo, path):
30 """Obtain a revlog from a repo path.
30 """Obtain a revlog from a repo path.
31
31
32 An instance of the appropriate class is returned.
32 An instance of the appropriate class is returned.
33 """
33 """
34 if path == b'00changelog.i':
34 if path == b'00changelog.i':
35 return changelog.changelog(repo.svfs)
35 return changelog.changelog(repo.svfs)
36 elif path.endswith(b'00manifest.i'):
36 elif path.endswith(b'00manifest.i'):
37 mandir = path[: -len(b'00manifest.i')]
37 mandir = path[: -len(b'00manifest.i')]
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 else:
39 else:
40 # reverse of "/".join(("data", path + ".i"))
40 # reverse of "/".join(("data", path + ".i"))
41 return filelog.filelog(repo.svfs, path[5:-2])
41 return filelog.filelog(repo.svfs, path[5:-2])
42
42
43
43
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 """copy all relevant files for `oldrl` into `destrepo` store
45 """copy all relevant files for `oldrl` into `destrepo` store
46
46
47 Files are copied "as is" without any transformation. The copy is performed
47 Files are copied "as is" without any transformation. The copy is performed
48 without extra checks. Callers are responsible for making sure the copied
48 without extra checks. Callers are responsible for making sure the copied
49 content is compatible with format of the destination repository.
49 content is compatible with format of the destination repository.
50 """
50 """
51 oldrl = getattr(oldrl, '_revlog', oldrl)
51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 newrl = _revlogfrompath(destrepo, unencodedname)
52 newrl = _revlogfrompath(destrepo, unencodedname)
53 newrl = getattr(newrl, '_revlog', newrl)
53 newrl = getattr(newrl, '_revlog', newrl)
54
54
55 oldvfs = oldrl.opener
55 oldvfs = oldrl.opener
56 newvfs = newrl.opener
56 newvfs = newrl.opener
57 oldindex = oldvfs.join(oldrl.indexfile)
57 oldindex = oldvfs.join(oldrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
59 olddata = oldvfs.join(oldrl.datafile)
59 olddata = oldvfs.join(oldrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
61
61
62 with newvfs(newrl.indexfile, b'w'):
62 with newvfs(newrl.indexfile, b'w'):
63 pass # create all the directories
63 pass # create all the directories
64
64
65 util.copyfile(oldindex, newindex)
65 util.copyfile(oldindex, newindex)
66 copydata = oldrl.opener.exists(oldrl.datafile)
66 copydata = oldrl.opener.exists(oldrl.datafile)
67 if copydata:
67 if copydata:
68 util.copyfile(olddata, newdata)
68 util.copyfile(olddata, newdata)
69
69
70 if not (
70 if not (
71 unencodedname.endswith(b'00changelog.i')
71 unencodedname.endswith(b'00changelog.i')
72 or unencodedname.endswith(b'00manifest.i')
72 or unencodedname.endswith(b'00manifest.i')
73 ):
73 ):
74 destrepo.svfs.fncache.add(unencodedname)
74 destrepo.svfs.fncache.add(unencodedname)
75 if copydata:
75 if copydata:
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77
77
78
78
79 UPGRADE_CHANGELOG = b"changelog"
79 UPGRADE_CHANGELOG = b"changelog"
80 UPGRADE_MANIFEST = b"manifest"
80 UPGRADE_MANIFEST = b"manifest"
81 UPGRADE_FILELOGS = b"all-filelogs"
81 UPGRADE_FILELOGS = b"all-filelogs"
82
82
83 UPGRADE_ALL_REVLOGS = frozenset(
83 UPGRADE_ALL_REVLOGS = frozenset(
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 )
85 )
86
86
87
87
88 def getsidedatacompanion(srcrepo, dstrepo):
88 def getsidedatacompanion(srcrepo, dstrepo):
89 sidedatacompanion = None
89 sidedatacompanion = None
90 removedreqs = srcrepo.requirements - dstrepo.requirements
90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93
93
94 def sidedatacompanion(rl, rev):
94 def sidedatacompanion(rl, rev):
95 rl = getattr(rl, '_revlog', rl)
95 rl = getattr(rl, '_revlog', rl)
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 return True, (), {}, 0, 0
97 return True, (), {}, 0, 0
98 return False, (), {}, 0, 0
98 return False, (), {}, 0, 0
99
99
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 return sidedatacompanion
104 return sidedatacompanion
105
105
106
106
107 def matchrevlog(revlogfilter, entry):
107 def matchrevlog(revlogfilter, entry):
108 """check if a revlog is selected for cloning.
108 """check if a revlog is selected for cloning.
109
109
110 In other words, are there any updates which need to be done on revlog
110 In other words, are there any updates which need to be done on revlog
111 or it can be blindly copied.
111 or it can be blindly copied.
112
112
113 The store entry is checked against the passed filter"""
113 The store entry is checked against the passed filter"""
114 if entry.endswith(b'00changelog.i'):
114 if entry.endswith(b'00changelog.i'):
115 return UPGRADE_CHANGELOG in revlogfilter
115 return UPGRADE_CHANGELOG in revlogfilter
116 elif entry.endswith(b'00manifest.i'):
116 elif entry.endswith(b'00manifest.i'):
117 return UPGRADE_MANIFEST in revlogfilter
117 return UPGRADE_MANIFEST in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
119
119
120
120
121 def _perform_clone(
121 def _perform_clone(
122 ui,
122 ui,
123 dstrepo,
123 dstrepo,
124 tr,
124 tr,
125 old_revlog,
125 old_revlog,
126 unencoded,
126 unencoded,
127 upgrade_op,
127 upgrade_op,
128 sidedatacompanion,
128 sidedatacompanion,
129 oncopiedrevision,
129 oncopiedrevision,
130 ):
130 ):
131 """ returns the new revlog object created"""
131 """ returns the new revlog object created"""
132 newrl = None
132 newrl = None
133 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
133 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 ui.note(
134 ui.note(
135 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
135 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 )
136 )
137 newrl = _revlogfrompath(dstrepo, unencoded)
137 newrl = _revlogfrompath(dstrepo, unencoded)
138 old_revlog.clone(
138 old_revlog.clone(
139 tr,
139 tr,
140 newrl,
140 newrl,
141 addrevisioncb=oncopiedrevision,
141 addrevisioncb=oncopiedrevision,
142 deltareuse=upgrade_op.delta_reuse_mode,
142 deltareuse=upgrade_op.delta_reuse_mode,
143 forcedeltabothparents=upgrade_op.has_upgrade_action(
143 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
144 b're-delta-multibase'
145 ),
146 sidedatacompanion=sidedatacompanion,
144 sidedatacompanion=sidedatacompanion,
147 )
145 )
148 else:
146 else:
149 msg = _(b'blindly copying %s containing %i revisions\n')
147 msg = _(b'blindly copying %s containing %i revisions\n')
150 ui.note(msg % (unencoded, len(old_revlog)))
148 ui.note(msg % (unencoded, len(old_revlog)))
151 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
149 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
152
150
153 newrl = _revlogfrompath(dstrepo, unencoded)
151 newrl = _revlogfrompath(dstrepo, unencoded)
154 return newrl
152 return newrl
155
153
156
154
157 def _clonerevlogs(
155 def _clonerevlogs(
158 ui,
156 ui,
159 srcrepo,
157 srcrepo,
160 dstrepo,
158 dstrepo,
161 tr,
159 tr,
162 upgrade_op,
160 upgrade_op,
163 ):
161 ):
164 """Copy revlogs between 2 repos."""
162 """Copy revlogs between 2 repos."""
165 revcount = 0
163 revcount = 0
166 srcsize = 0
164 srcsize = 0
167 srcrawsize = 0
165 srcrawsize = 0
168 dstsize = 0
166 dstsize = 0
169 fcount = 0
167 fcount = 0
170 frevcount = 0
168 frevcount = 0
171 fsrcsize = 0
169 fsrcsize = 0
172 frawsize = 0
170 frawsize = 0
173 fdstsize = 0
171 fdstsize = 0
174 mcount = 0
172 mcount = 0
175 mrevcount = 0
173 mrevcount = 0
176 msrcsize = 0
174 msrcsize = 0
177 mrawsize = 0
175 mrawsize = 0
178 mdstsize = 0
176 mdstsize = 0
179 crevcount = 0
177 crevcount = 0
180 csrcsize = 0
178 csrcsize = 0
181 crawsize = 0
179 crawsize = 0
182 cdstsize = 0
180 cdstsize = 0
183
181
184 alldatafiles = list(srcrepo.store.walk())
182 alldatafiles = list(srcrepo.store.walk())
185 # mapping of data files which needs to be cloned
183 # mapping of data files which needs to be cloned
186 # key is unencoded filename
184 # key is unencoded filename
187 # value is revlog_object_from_srcrepo
185 # value is revlog_object_from_srcrepo
188 manifests = {}
186 manifests = {}
189 changelogs = {}
187 changelogs = {}
190 filelogs = {}
188 filelogs = {}
191
189
192 # Perform a pass to collect metadata. This validates we can open all
190 # Perform a pass to collect metadata. This validates we can open all
193 # source files and allows a unified progress bar to be displayed.
191 # source files and allows a unified progress bar to be displayed.
194 for unencoded, encoded, size in alldatafiles:
192 for unencoded, encoded, size in alldatafiles:
195 if unencoded.endswith(b'.d'):
193 if unencoded.endswith(b'.d'):
196 continue
194 continue
197
195
198 rl = _revlogfrompath(srcrepo, unencoded)
196 rl = _revlogfrompath(srcrepo, unencoded)
199
197
200 info = rl.storageinfo(
198 info = rl.storageinfo(
201 exclusivefiles=True,
199 exclusivefiles=True,
202 revisionscount=True,
200 revisionscount=True,
203 trackedsize=True,
201 trackedsize=True,
204 storedsize=True,
202 storedsize=True,
205 )
203 )
206
204
207 revcount += info[b'revisionscount'] or 0
205 revcount += info[b'revisionscount'] or 0
208 datasize = info[b'storedsize'] or 0
206 datasize = info[b'storedsize'] or 0
209 rawsize = info[b'trackedsize'] or 0
207 rawsize = info[b'trackedsize'] or 0
210
208
211 srcsize += datasize
209 srcsize += datasize
212 srcrawsize += rawsize
210 srcrawsize += rawsize
213
211
214 # This is for the separate progress bars.
212 # This is for the separate progress bars.
215 if isinstance(rl, changelog.changelog):
213 if isinstance(rl, changelog.changelog):
216 changelogs[unencoded] = rl
214 changelogs[unencoded] = rl
217 crevcount += len(rl)
215 crevcount += len(rl)
218 csrcsize += datasize
216 csrcsize += datasize
219 crawsize += rawsize
217 crawsize += rawsize
220 elif isinstance(rl, manifest.manifestrevlog):
218 elif isinstance(rl, manifest.manifestrevlog):
221 manifests[unencoded] = rl
219 manifests[unencoded] = rl
222 mcount += 1
220 mcount += 1
223 mrevcount += len(rl)
221 mrevcount += len(rl)
224 msrcsize += datasize
222 msrcsize += datasize
225 mrawsize += rawsize
223 mrawsize += rawsize
226 elif isinstance(rl, filelog.filelog):
224 elif isinstance(rl, filelog.filelog):
227 filelogs[unencoded] = rl
225 filelogs[unencoded] = rl
228 fcount += 1
226 fcount += 1
229 frevcount += len(rl)
227 frevcount += len(rl)
230 fsrcsize += datasize
228 fsrcsize += datasize
231 frawsize += rawsize
229 frawsize += rawsize
232 else:
230 else:
233 error.ProgrammingError(b'unknown revlog type')
231 error.ProgrammingError(b'unknown revlog type')
234
232
235 if not revcount:
233 if not revcount:
236 return
234 return
237
235
238 ui.status(
236 ui.status(
239 _(
237 _(
240 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
238 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
241 b'%d in changelog)\n'
239 b'%d in changelog)\n'
242 )
240 )
243 % (revcount, frevcount, mrevcount, crevcount)
241 % (revcount, frevcount, mrevcount, crevcount)
244 )
242 )
245 ui.status(
243 ui.status(
246 _(b'migrating %s in store; %s tracked data\n')
244 _(b'migrating %s in store; %s tracked data\n')
247 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
245 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
248 )
246 )
249
247
250 # Used to keep track of progress.
248 # Used to keep track of progress.
251 progress = None
249 progress = None
252
250
253 def oncopiedrevision(rl, rev, node):
251 def oncopiedrevision(rl, rev, node):
254 progress.increment()
252 progress.increment()
255
253
256 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
254 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
257
255
258 # Migrating filelogs
256 # Migrating filelogs
259 ui.status(
257 ui.status(
260 _(
258 _(
261 b'migrating %d filelogs containing %d revisions '
259 b'migrating %d filelogs containing %d revisions '
262 b'(%s in store; %s tracked data)\n'
260 b'(%s in store; %s tracked data)\n'
263 )
261 )
264 % (
262 % (
265 fcount,
263 fcount,
266 frevcount,
264 frevcount,
267 util.bytecount(fsrcsize),
265 util.bytecount(fsrcsize),
268 util.bytecount(frawsize),
266 util.bytecount(frawsize),
269 )
267 )
270 )
268 )
271 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
269 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
272 for unencoded, oldrl in sorted(filelogs.items()):
270 for unencoded, oldrl in sorted(filelogs.items()):
273 newrl = _perform_clone(
271 newrl = _perform_clone(
274 ui,
272 ui,
275 dstrepo,
273 dstrepo,
276 tr,
274 tr,
277 oldrl,
275 oldrl,
278 unencoded,
276 unencoded,
279 upgrade_op,
277 upgrade_op,
280 sidedatacompanion,
278 sidedatacompanion,
281 oncopiedrevision,
279 oncopiedrevision,
282 )
280 )
283 info = newrl.storageinfo(storedsize=True)
281 info = newrl.storageinfo(storedsize=True)
284 fdstsize += info[b'storedsize'] or 0
282 fdstsize += info[b'storedsize'] or 0
285 ui.status(
283 ui.status(
286 _(
284 _(
287 b'finished migrating %d filelog revisions across %d '
285 b'finished migrating %d filelog revisions across %d '
288 b'filelogs; change in size: %s\n'
286 b'filelogs; change in size: %s\n'
289 )
287 )
290 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
288 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
291 )
289 )
292
290
293 # Migrating manifests
291 # Migrating manifests
294 ui.status(
292 ui.status(
295 _(
293 _(
296 b'migrating %d manifests containing %d revisions '
294 b'migrating %d manifests containing %d revisions '
297 b'(%s in store; %s tracked data)\n'
295 b'(%s in store; %s tracked data)\n'
298 )
296 )
299 % (
297 % (
300 mcount,
298 mcount,
301 mrevcount,
299 mrevcount,
302 util.bytecount(msrcsize),
300 util.bytecount(msrcsize),
303 util.bytecount(mrawsize),
301 util.bytecount(mrawsize),
304 )
302 )
305 )
303 )
306 if progress:
304 if progress:
307 progress.complete()
305 progress.complete()
308 progress = srcrepo.ui.makeprogress(
306 progress = srcrepo.ui.makeprogress(
309 _(b'manifest revisions'), total=mrevcount
307 _(b'manifest revisions'), total=mrevcount
310 )
308 )
311 for unencoded, oldrl in sorted(manifests.items()):
309 for unencoded, oldrl in sorted(manifests.items()):
312 newrl = _perform_clone(
310 newrl = _perform_clone(
313 ui,
311 ui,
314 dstrepo,
312 dstrepo,
315 tr,
313 tr,
316 oldrl,
314 oldrl,
317 unencoded,
315 unencoded,
318 upgrade_op,
316 upgrade_op,
319 sidedatacompanion,
317 sidedatacompanion,
320 oncopiedrevision,
318 oncopiedrevision,
321 )
319 )
322 info = newrl.storageinfo(storedsize=True)
320 info = newrl.storageinfo(storedsize=True)
323 mdstsize += info[b'storedsize'] or 0
321 mdstsize += info[b'storedsize'] or 0
324 ui.status(
322 ui.status(
325 _(
323 _(
326 b'finished migrating %d manifest revisions across %d '
324 b'finished migrating %d manifest revisions across %d '
327 b'manifests; change in size: %s\n'
325 b'manifests; change in size: %s\n'
328 )
326 )
329 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
327 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
330 )
328 )
331
329
332 # Migrating changelog
330 # Migrating changelog
333 ui.status(
331 ui.status(
334 _(
332 _(
335 b'migrating changelog containing %d revisions '
333 b'migrating changelog containing %d revisions '
336 b'(%s in store; %s tracked data)\n'
334 b'(%s in store; %s tracked data)\n'
337 )
335 )
338 % (
336 % (
339 crevcount,
337 crevcount,
340 util.bytecount(csrcsize),
338 util.bytecount(csrcsize),
341 util.bytecount(crawsize),
339 util.bytecount(crawsize),
342 )
340 )
343 )
341 )
344 if progress:
342 if progress:
345 progress.complete()
343 progress.complete()
346 progress = srcrepo.ui.makeprogress(
344 progress = srcrepo.ui.makeprogress(
347 _(b'changelog revisions'), total=crevcount
345 _(b'changelog revisions'), total=crevcount
348 )
346 )
349 for unencoded, oldrl in sorted(changelogs.items()):
347 for unencoded, oldrl in sorted(changelogs.items()):
350 newrl = _perform_clone(
348 newrl = _perform_clone(
351 ui,
349 ui,
352 dstrepo,
350 dstrepo,
353 tr,
351 tr,
354 oldrl,
352 oldrl,
355 unencoded,
353 unencoded,
356 upgrade_op,
354 upgrade_op,
357 sidedatacompanion,
355 sidedatacompanion,
358 oncopiedrevision,
356 oncopiedrevision,
359 )
357 )
360 info = newrl.storageinfo(storedsize=True)
358 info = newrl.storageinfo(storedsize=True)
361 cdstsize += info[b'storedsize'] or 0
359 cdstsize += info[b'storedsize'] or 0
362 progress.complete()
360 progress.complete()
363 ui.status(
361 ui.status(
364 _(
362 _(
365 b'finished migrating %d changelog revisions; change in size: '
363 b'finished migrating %d changelog revisions; change in size: '
366 b'%s\n'
364 b'%s\n'
367 )
365 )
368 % (crevcount, util.bytecount(cdstsize - csrcsize))
366 % (crevcount, util.bytecount(cdstsize - csrcsize))
369 )
367 )
370
368
371 dstsize = fdstsize + mdstsize + cdstsize
369 dstsize = fdstsize + mdstsize + cdstsize
372 ui.status(
370 ui.status(
373 _(
371 _(
374 b'finished migrating %d total revisions; total change in store '
372 b'finished migrating %d total revisions; total change in store '
375 b'size: %s\n'
373 b'size: %s\n'
376 )
374 )
377 % (revcount, util.bytecount(dstsize - srcsize))
375 % (revcount, util.bytecount(dstsize - srcsize))
378 )
376 )
379
377
380
378
381 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
379 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
382 """Determine whether to copy a store file during upgrade.
380 """Determine whether to copy a store file during upgrade.
383
381
384 This function is called when migrating store files from ``srcrepo`` to
382 This function is called when migrating store files from ``srcrepo`` to
385 ``dstrepo`` as part of upgrading a repository.
383 ``dstrepo`` as part of upgrading a repository.
386
384
387 Args:
385 Args:
388 srcrepo: repo we are copying from
386 srcrepo: repo we are copying from
389 dstrepo: repo we are copying to
387 dstrepo: repo we are copying to
390 requirements: set of requirements for ``dstrepo``
388 requirements: set of requirements for ``dstrepo``
391 path: store file being examined
389 path: store file being examined
392 mode: the ``ST_MODE`` file type of ``path``
390 mode: the ``ST_MODE`` file type of ``path``
393 st: ``stat`` data structure for ``path``
391 st: ``stat`` data structure for ``path``
394
392
395 Function should return ``True`` if the file is to be copied.
393 Function should return ``True`` if the file is to be copied.
396 """
394 """
397 # Skip revlogs.
395 # Skip revlogs.
398 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
396 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
399 return False
397 return False
400 # Skip transaction related files.
398 # Skip transaction related files.
401 if path.startswith(b'undo'):
399 if path.startswith(b'undo'):
402 return False
400 return False
403 # Only copy regular files.
401 # Only copy regular files.
404 if mode != stat.S_IFREG:
402 if mode != stat.S_IFREG:
405 return False
403 return False
406 # Skip other skipped files.
404 # Skip other skipped files.
407 if path in (b'lock', b'fncache'):
405 if path in (b'lock', b'fncache'):
408 return False
406 return False
409
407
410 return True
408 return True
411
409
412
410
413 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
411 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
414 """Hook point for extensions to perform additional actions during upgrade.
412 """Hook point for extensions to perform additional actions during upgrade.
415
413
416 This function is called after revlogs and store files have been copied but
414 This function is called after revlogs and store files have been copied but
417 before the new store is swapped into the original location.
415 before the new store is swapped into the original location.
418 """
416 """
419
417
420
418
421 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
419 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
422 """Do the low-level work of upgrading a repository.
420 """Do the low-level work of upgrading a repository.
423
421
424 The upgrade is effectively performed as a copy between a source
422 The upgrade is effectively performed as a copy between a source
425 repository and a temporary destination repository.
423 repository and a temporary destination repository.
426
424
427 The source repository is unmodified for as long as possible so the
425 The source repository is unmodified for as long as possible so the
428 upgrade can abort at any time without causing loss of service for
426 upgrade can abort at any time without causing loss of service for
429 readers and without corrupting the source repository.
427 readers and without corrupting the source repository.
430 """
428 """
431 assert srcrepo.currentwlock()
429 assert srcrepo.currentwlock()
432 assert dstrepo.currentwlock()
430 assert dstrepo.currentwlock()
433
431
434 ui.status(
432 ui.status(
435 _(
433 _(
436 b'(it is safe to interrupt this process any time before '
434 b'(it is safe to interrupt this process any time before '
437 b'data migration completes)\n'
435 b'data migration completes)\n'
438 )
436 )
439 )
437 )
440
438
441 with dstrepo.transaction(b'upgrade') as tr:
439 with dstrepo.transaction(b'upgrade') as tr:
442 _clonerevlogs(
440 _clonerevlogs(
443 ui,
441 ui,
444 srcrepo,
442 srcrepo,
445 dstrepo,
443 dstrepo,
446 tr,
444 tr,
447 upgrade_op,
445 upgrade_op,
448 )
446 )
449
447
450 # Now copy other files in the store directory.
448 # Now copy other files in the store directory.
451 # The sorted() makes execution deterministic.
449 # The sorted() makes execution deterministic.
452 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
450 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
453 if not _filterstorefile(
451 if not _filterstorefile(
454 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
452 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
455 ):
453 ):
456 continue
454 continue
457
455
458 srcrepo.ui.status(_(b'copying %s\n') % p)
456 srcrepo.ui.status(_(b'copying %s\n') % p)
459 src = srcrepo.store.rawvfs.join(p)
457 src = srcrepo.store.rawvfs.join(p)
460 dst = dstrepo.store.rawvfs.join(p)
458 dst = dstrepo.store.rawvfs.join(p)
461 util.copyfile(src, dst, copystat=True)
459 util.copyfile(src, dst, copystat=True)
462
460
463 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
461 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
464
462
465 ui.status(_(b'data fully migrated to temporary repository\n'))
463 ui.status(_(b'data fully migrated to temporary repository\n'))
466
464
467 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
465 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
468 backupvfs = vfsmod.vfs(backuppath)
466 backupvfs = vfsmod.vfs(backuppath)
469
467
470 # Make a backup of requires file first, as it is the first to be modified.
468 # Make a backup of requires file first, as it is the first to be modified.
471 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
469 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
472
470
473 # We install an arbitrary requirement that clients must not support
471 # We install an arbitrary requirement that clients must not support
474 # as a mechanism to lock out new clients during the data swap. This is
472 # as a mechanism to lock out new clients during the data swap. This is
475 # better than allowing a client to continue while the repository is in
473 # better than allowing a client to continue while the repository is in
476 # an inconsistent state.
474 # an inconsistent state.
477 ui.status(
475 ui.status(
478 _(
476 _(
479 b'marking source repository as being upgraded; clients will be '
477 b'marking source repository as being upgraded; clients will be '
480 b'unable to read from repository\n'
478 b'unable to read from repository\n'
481 )
479 )
482 )
480 )
483 scmutil.writereporequirements(
481 scmutil.writereporequirements(
484 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
482 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
485 )
483 )
486
484
487 ui.status(_(b'starting in-place swap of repository data\n'))
485 ui.status(_(b'starting in-place swap of repository data\n'))
488 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
486 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
489
487
490 # Now swap in the new store directory. Doing it as a rename should make
488 # Now swap in the new store directory. Doing it as a rename should make
491 # the operation nearly instantaneous and atomic (at least in well-behaved
489 # the operation nearly instantaneous and atomic (at least in well-behaved
492 # environments).
490 # environments).
493 ui.status(_(b'replacing store...\n'))
491 ui.status(_(b'replacing store...\n'))
494 tstart = util.timer()
492 tstart = util.timer()
495 util.rename(srcrepo.spath, backupvfs.join(b'store'))
493 util.rename(srcrepo.spath, backupvfs.join(b'store'))
496 util.rename(dstrepo.spath, srcrepo.spath)
494 util.rename(dstrepo.spath, srcrepo.spath)
497 elapsed = util.timer() - tstart
495 elapsed = util.timer() - tstart
498 ui.status(
496 ui.status(
499 _(
497 _(
500 b'store replacement complete; repository was inconsistent for '
498 b'store replacement complete; repository was inconsistent for '
501 b'%0.1fs\n'
499 b'%0.1fs\n'
502 )
500 )
503 % elapsed
501 % elapsed
504 )
502 )
505
503
506 # We first write the requirements file. Any new requirements will lock
504 # We first write the requirements file. Any new requirements will lock
507 # out legacy clients.
505 # out legacy clients.
508 ui.status(
506 ui.status(
509 _(
507 _(
510 b'finalizing requirements file and making repository readable '
508 b'finalizing requirements file and making repository readable '
511 b'again\n'
509 b'again\n'
512 )
510 )
513 )
511 )
514 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
512 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
515
513
516 # The lock file from the old store won't be removed because nothing has a
514 # The lock file from the old store won't be removed because nothing has a
517 # reference to its new location. So clean it up manually. Alternatively, we
515 # reference to its new location. So clean it up manually. Alternatively, we
518 # could update srcrepo.svfs and other variables to point to the new
516 # could update srcrepo.svfs and other variables to point to the new
519 # location. This is simpler.
517 # location. This is simpler.
520 backupvfs.unlink(b'store/lock')
518 backupvfs.unlink(b'store/lock')
521
519
522 return backuppath
520 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now