##// END OF EJS Templates
actions: store deltareuse mode of whole operation in UpgradeOperation...
Pulkit Goyal -
r46832:82f3ee1a default
parent child Browse files
Show More
@@ -1,887 +1,899 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import (
11 from .. import (
12 error,
12 error,
13 localrepo,
13 localrepo,
14 requirements,
14 requirements,
15 revlog,
15 util,
16 util,
16 )
17 )
17
18
18 from ..utils import compression
19 from ..utils import compression
19
20
20 # list of requirements that request a clone of all revlog if added/removed
21 # list of requirements that request a clone of all revlog if added/removed
21 RECLONES_REQUIREMENTS = {
22 RECLONES_REQUIREMENTS = {
22 b'generaldelta',
23 b'generaldelta',
23 requirements.SPARSEREVLOG_REQUIREMENT,
24 requirements.SPARSEREVLOG_REQUIREMENT,
24 }
25 }
25
26
26
27
27 def preservedrequirements(repo):
28 def preservedrequirements(repo):
28 return set()
29 return set()
29
30
30
31
31 FORMAT_VARIANT = b'deficiency'
32 FORMAT_VARIANT = b'deficiency'
32 OPTIMISATION = b'optimization'
33 OPTIMISATION = b'optimization'
33
34
34
35
35 class improvement(object):
36 class improvement(object):
36 """Represents an improvement that can be made as part of an upgrade.
37 """Represents an improvement that can be made as part of an upgrade.
37
38
38 The following attributes are defined on each instance:
39 The following attributes are defined on each instance:
39
40
40 name
41 name
41 Machine-readable string uniquely identifying this improvement. It
42 Machine-readable string uniquely identifying this improvement. It
42 will be mapped to an action later in the upgrade process.
43 will be mapped to an action later in the upgrade process.
43
44
44 type
45 type
45 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
46 Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
46 A format variant is where we change the storage format. Not all format
47 A format variant is where we change the storage format. Not all format
47 variant changes are an obvious problem.
48 variant changes are an obvious problem.
48 An optimization is an action (sometimes optional) that
49 An optimization is an action (sometimes optional) that
49 can be taken to further improve the state of the repository.
50 can be taken to further improve the state of the repository.
50
51
51 description
52 description
52 Message intended for humans explaining the improvement in more detail,
53 Message intended for humans explaining the improvement in more detail,
53 including the implications of it. For ``FORMAT_VARIANT`` types, should be
54 including the implications of it. For ``FORMAT_VARIANT`` types, should be
54 worded in the present tense. For ``OPTIMISATION`` types, should be
55 worded in the present tense. For ``OPTIMISATION`` types, should be
55 worded in the future tense.
56 worded in the future tense.
56
57
57 upgrademessage
58 upgrademessage
58 Message intended for humans explaining what an upgrade addressing this
59 Message intended for humans explaining what an upgrade addressing this
59 issue will do. Should be worded in the future tense.
60 issue will do. Should be worded in the future tense.
60
61
61 postupgrademessage
62 postupgrademessage
62 Message intended for humans which will be shown post an upgrade
63 Message intended for humans which will be shown post an upgrade
63 operation when the improvement will be added
64 operation when the improvement will be added
64
65
65 postdowngrademessage
66 postdowngrademessage
66 Message intended for humans which will be shown post an upgrade
67 Message intended for humans which will be shown post an upgrade
67 operation in which this improvement was removed
68 operation in which this improvement was removed
68 """
69 """
69
70
70 def __init__(self, name, type, description, upgrademessage):
71 def __init__(self, name, type, description, upgrademessage):
71 self.name = name
72 self.name = name
72 self.type = type
73 self.type = type
73 self.description = description
74 self.description = description
74 self.upgrademessage = upgrademessage
75 self.upgrademessage = upgrademessage
75 self.postupgrademessage = None
76 self.postupgrademessage = None
76 self.postdowngrademessage = None
77 self.postdowngrademessage = None
77
78
78 def __eq__(self, other):
79 def __eq__(self, other):
79 if not isinstance(other, improvement):
80 if not isinstance(other, improvement):
80 # This is what python tell use to do
81 # This is what python tell use to do
81 return NotImplemented
82 return NotImplemented
82 return self.name == other.name
83 return self.name == other.name
83
84
84 def __ne__(self, other):
85 def __ne__(self, other):
85 return not (self == other)
86 return not (self == other)
86
87
87 def __hash__(self):
88 def __hash__(self):
88 return hash(self.name)
89 return hash(self.name)
89
90
90
91
91 allformatvariant = []
92 allformatvariant = []
92
93
93
94
94 def registerformatvariant(cls):
95 def registerformatvariant(cls):
95 allformatvariant.append(cls)
96 allformatvariant.append(cls)
96 return cls
97 return cls
97
98
98
99
99 class formatvariant(improvement):
100 class formatvariant(improvement):
100 """an improvement subclass dedicated to repository format"""
101 """an improvement subclass dedicated to repository format"""
101
102
102 type = FORMAT_VARIANT
103 type = FORMAT_VARIANT
103 ### The following attributes should be defined for each class:
104 ### The following attributes should be defined for each class:
104
105
105 # machine-readable string uniquely identifying this improvement. it will be
106 # machine-readable string uniquely identifying this improvement. it will be
106 # mapped to an action later in the upgrade process.
107 # mapped to an action later in the upgrade process.
107 name = None
108 name = None
108
109
109 # message intended for humans explaining the improvement in more detail,
110 # message intended for humans explaining the improvement in more detail,
110 # including the implications of it ``FORMAT_VARIANT`` types, should be
111 # including the implications of it ``FORMAT_VARIANT`` types, should be
111 # worded
112 # worded
112 # in the present tense.
113 # in the present tense.
113 description = None
114 description = None
114
115
115 # message intended for humans explaining what an upgrade addressing this
116 # message intended for humans explaining what an upgrade addressing this
116 # issue will do. should be worded in the future tense.
117 # issue will do. should be worded in the future tense.
117 upgrademessage = None
118 upgrademessage = None
118
119
119 # value of current Mercurial default for new repository
120 # value of current Mercurial default for new repository
120 default = None
121 default = None
121
122
122 # Message intended for humans which will be shown post an upgrade
123 # Message intended for humans which will be shown post an upgrade
123 # operation when the improvement will be added
124 # operation when the improvement will be added
124 postupgrademessage = None
125 postupgrademessage = None
125
126
126 # Message intended for humans which will be shown post an upgrade
127 # Message intended for humans which will be shown post an upgrade
127 # operation in which this improvement was removed
128 # operation in which this improvement was removed
128 postdowngrademessage = None
129 postdowngrademessage = None
129
130
130 def __init__(self):
131 def __init__(self):
131 raise NotImplementedError()
132 raise NotImplementedError()
132
133
133 @staticmethod
134 @staticmethod
134 def fromrepo(repo):
135 def fromrepo(repo):
135 """current value of the variant in the repository"""
136 """current value of the variant in the repository"""
136 raise NotImplementedError()
137 raise NotImplementedError()
137
138
138 @staticmethod
139 @staticmethod
139 def fromconfig(repo):
140 def fromconfig(repo):
140 """current value of the variant in the configuration"""
141 """current value of the variant in the configuration"""
141 raise NotImplementedError()
142 raise NotImplementedError()
142
143
143
144
144 class requirementformatvariant(formatvariant):
145 class requirementformatvariant(formatvariant):
145 """formatvariant based on a 'requirement' name.
146 """formatvariant based on a 'requirement' name.
146
147
147 Many format variant are controlled by a 'requirement'. We define a small
148 Many format variant are controlled by a 'requirement'. We define a small
148 subclass to factor the code.
149 subclass to factor the code.
149 """
150 """
150
151
151 # the requirement that control this format variant
152 # the requirement that control this format variant
152 _requirement = None
153 _requirement = None
153
154
154 @staticmethod
155 @staticmethod
155 def _newreporequirements(ui):
156 def _newreporequirements(ui):
156 return localrepo.newreporequirements(
157 return localrepo.newreporequirements(
157 ui, localrepo.defaultcreateopts(ui)
158 ui, localrepo.defaultcreateopts(ui)
158 )
159 )
159
160
160 @classmethod
161 @classmethod
161 def fromrepo(cls, repo):
162 def fromrepo(cls, repo):
162 assert cls._requirement is not None
163 assert cls._requirement is not None
163 return cls._requirement in repo.requirements
164 return cls._requirement in repo.requirements
164
165
165 @classmethod
166 @classmethod
166 def fromconfig(cls, repo):
167 def fromconfig(cls, repo):
167 assert cls._requirement is not None
168 assert cls._requirement is not None
168 return cls._requirement in cls._newreporequirements(repo.ui)
169 return cls._requirement in cls._newreporequirements(repo.ui)
169
170
170
171
171 @registerformatvariant
172 @registerformatvariant
172 class fncache(requirementformatvariant):
173 class fncache(requirementformatvariant):
173 name = b'fncache'
174 name = b'fncache'
174
175
175 _requirement = b'fncache'
176 _requirement = b'fncache'
176
177
177 default = True
178 default = True
178
179
179 description = _(
180 description = _(
180 b'long and reserved filenames may not work correctly; '
181 b'long and reserved filenames may not work correctly; '
181 b'repository performance is sub-optimal'
182 b'repository performance is sub-optimal'
182 )
183 )
183
184
184 upgrademessage = _(
185 upgrademessage = _(
185 b'repository will be more resilient to storing '
186 b'repository will be more resilient to storing '
186 b'certain paths and performance of certain '
187 b'certain paths and performance of certain '
187 b'operations should be improved'
188 b'operations should be improved'
188 )
189 )
189
190
190
191
191 @registerformatvariant
192 @registerformatvariant
192 class dotencode(requirementformatvariant):
193 class dotencode(requirementformatvariant):
193 name = b'dotencode'
194 name = b'dotencode'
194
195
195 _requirement = b'dotencode'
196 _requirement = b'dotencode'
196
197
197 default = True
198 default = True
198
199
199 description = _(
200 description = _(
200 b'storage of filenames beginning with a period or '
201 b'storage of filenames beginning with a period or '
201 b'space may not work correctly'
202 b'space may not work correctly'
202 )
203 )
203
204
204 upgrademessage = _(
205 upgrademessage = _(
205 b'repository will be better able to store files '
206 b'repository will be better able to store files '
206 b'beginning with a space or period'
207 b'beginning with a space or period'
207 )
208 )
208
209
209
210
210 @registerformatvariant
211 @registerformatvariant
211 class generaldelta(requirementformatvariant):
212 class generaldelta(requirementformatvariant):
212 name = b'generaldelta'
213 name = b'generaldelta'
213
214
214 _requirement = b'generaldelta'
215 _requirement = b'generaldelta'
215
216
216 default = True
217 default = True
217
218
218 description = _(
219 description = _(
219 b'deltas within internal storage are unable to '
220 b'deltas within internal storage are unable to '
220 b'choose optimal revisions; repository is larger and '
221 b'choose optimal revisions; repository is larger and '
221 b'slower than it could be; interaction with other '
222 b'slower than it could be; interaction with other '
222 b'repositories may require extra network and CPU '
223 b'repositories may require extra network and CPU '
223 b'resources, making "hg push" and "hg pull" slower'
224 b'resources, making "hg push" and "hg pull" slower'
224 )
225 )
225
226
226 upgrademessage = _(
227 upgrademessage = _(
227 b'repository storage will be able to create '
228 b'repository storage will be able to create '
228 b'optimal deltas; new repository data will be '
229 b'optimal deltas; new repository data will be '
229 b'smaller and read times should decrease; '
230 b'smaller and read times should decrease; '
230 b'interacting with other repositories using this '
231 b'interacting with other repositories using this '
231 b'storage model should require less network and '
232 b'storage model should require less network and '
232 b'CPU resources, making "hg push" and "hg pull" '
233 b'CPU resources, making "hg push" and "hg pull" '
233 b'faster'
234 b'faster'
234 )
235 )
235
236
236
237
237 @registerformatvariant
238 @registerformatvariant
238 class sharesafe(requirementformatvariant):
239 class sharesafe(requirementformatvariant):
239 name = b'exp-sharesafe'
240 name = b'exp-sharesafe'
240 _requirement = requirements.SHARESAFE_REQUIREMENT
241 _requirement = requirements.SHARESAFE_REQUIREMENT
241
242
242 default = False
243 default = False
243
244
244 description = _(
245 description = _(
245 b'old shared repositories do not share source repository '
246 b'old shared repositories do not share source repository '
246 b'requirements and config. This leads to various problems '
247 b'requirements and config. This leads to various problems '
247 b'when the source repository format is upgraded or some new '
248 b'when the source repository format is upgraded or some new '
248 b'extensions are enabled.'
249 b'extensions are enabled.'
249 )
250 )
250
251
251 upgrademessage = _(
252 upgrademessage = _(
252 b'Upgrades a repository to share-safe format so that future '
253 b'Upgrades a repository to share-safe format so that future '
253 b'shares of this repository share its requirements and configs.'
254 b'shares of this repository share its requirements and configs.'
254 )
255 )
255
256
256 postdowngrademessage = _(
257 postdowngrademessage = _(
257 b'repository downgraded to not use share safe mode, '
258 b'repository downgraded to not use share safe mode, '
258 b'existing shares will not work and needs to'
259 b'existing shares will not work and needs to'
259 b' be reshared.'
260 b' be reshared.'
260 )
261 )
261
262
262 postupgrademessage = _(
263 postupgrademessage = _(
263 b'repository upgraded to share safe mode, existing'
264 b'repository upgraded to share safe mode, existing'
264 b' shares will still work in old non-safe mode. '
265 b' shares will still work in old non-safe mode. '
265 b'Re-share existing shares to use them in safe mode'
266 b'Re-share existing shares to use them in safe mode'
266 b' New shares will be created in safe mode.'
267 b' New shares will be created in safe mode.'
267 )
268 )
268
269
269
270
270 @registerformatvariant
271 @registerformatvariant
271 class sparserevlog(requirementformatvariant):
272 class sparserevlog(requirementformatvariant):
272 name = b'sparserevlog'
273 name = b'sparserevlog'
273
274
274 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
275 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
275
276
276 default = True
277 default = True
277
278
278 description = _(
279 description = _(
279 b'in order to limit disk reading and memory usage on older '
280 b'in order to limit disk reading and memory usage on older '
280 b'version, the span of a delta chain from its root to its '
281 b'version, the span of a delta chain from its root to its '
281 b'end is limited, whatever the relevant data in this span. '
282 b'end is limited, whatever the relevant data in this span. '
282 b'This can severly limit Mercurial ability to build good '
283 b'This can severly limit Mercurial ability to build good '
283 b'chain of delta resulting is much more storage space being '
284 b'chain of delta resulting is much more storage space being '
284 b'taken and limit reusability of on disk delta during '
285 b'taken and limit reusability of on disk delta during '
285 b'exchange.'
286 b'exchange.'
286 )
287 )
287
288
288 upgrademessage = _(
289 upgrademessage = _(
289 b'Revlog supports delta chain with more unused data '
290 b'Revlog supports delta chain with more unused data '
290 b'between payload. These gaps will be skipped at read '
291 b'between payload. These gaps will be skipped at read '
291 b'time. This allows for better delta chains, making a '
292 b'time. This allows for better delta chains, making a '
292 b'better compression and faster exchange with server.'
293 b'better compression and faster exchange with server.'
293 )
294 )
294
295
295
296
296 @registerformatvariant
297 @registerformatvariant
297 class sidedata(requirementformatvariant):
298 class sidedata(requirementformatvariant):
298 name = b'sidedata'
299 name = b'sidedata'
299
300
300 _requirement = requirements.SIDEDATA_REQUIREMENT
301 _requirement = requirements.SIDEDATA_REQUIREMENT
301
302
302 default = False
303 default = False
303
304
304 description = _(
305 description = _(
305 b'Allows storage of extra data alongside a revision, '
306 b'Allows storage of extra data alongside a revision, '
306 b'unlocking various caching options.'
307 b'unlocking various caching options.'
307 )
308 )
308
309
309 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
310 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
310
311
311
312
312 @registerformatvariant
313 @registerformatvariant
313 class persistentnodemap(requirementformatvariant):
314 class persistentnodemap(requirementformatvariant):
314 name = b'persistent-nodemap'
315 name = b'persistent-nodemap'
315
316
316 _requirement = requirements.NODEMAP_REQUIREMENT
317 _requirement = requirements.NODEMAP_REQUIREMENT
317
318
318 default = False
319 default = False
319
320
320 description = _(
321 description = _(
321 b'persist the node -> rev mapping on disk to speedup lookup'
322 b'persist the node -> rev mapping on disk to speedup lookup'
322 )
323 )
323
324
324 upgrademessage = _(b'Speedup revision lookup by node id.')
325 upgrademessage = _(b'Speedup revision lookup by node id.')
325
326
326
327
327 @registerformatvariant
328 @registerformatvariant
328 class copiessdc(requirementformatvariant):
329 class copiessdc(requirementformatvariant):
329 name = b'copies-sdc'
330 name = b'copies-sdc'
330
331
331 _requirement = requirements.COPIESSDC_REQUIREMENT
332 _requirement = requirements.COPIESSDC_REQUIREMENT
332
333
333 default = False
334 default = False
334
335
335 description = _(b'Stores copies information alongside changesets.')
336 description = _(b'Stores copies information alongside changesets.')
336
337
337 upgrademessage = _(
338 upgrademessage = _(
338 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
339 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
339 )
340 )
340
341
341
342
342 @registerformatvariant
343 @registerformatvariant
343 class removecldeltachain(formatvariant):
344 class removecldeltachain(formatvariant):
344 name = b'plain-cl-delta'
345 name = b'plain-cl-delta'
345
346
346 default = True
347 default = True
347
348
348 description = _(
349 description = _(
349 b'changelog storage is using deltas instead of '
350 b'changelog storage is using deltas instead of '
350 b'raw entries; changelog reading and any '
351 b'raw entries; changelog reading and any '
351 b'operation relying on changelog data are slower '
352 b'operation relying on changelog data are slower '
352 b'than they could be'
353 b'than they could be'
353 )
354 )
354
355
355 upgrademessage = _(
356 upgrademessage = _(
356 b'changelog storage will be reformated to '
357 b'changelog storage will be reformated to '
357 b'store raw entries; changelog reading will be '
358 b'store raw entries; changelog reading will be '
358 b'faster; changelog size may be reduced'
359 b'faster; changelog size may be reduced'
359 )
360 )
360
361
361 @staticmethod
362 @staticmethod
362 def fromrepo(repo):
363 def fromrepo(repo):
363 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
364 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
364 # changelogs with deltas.
365 # changelogs with deltas.
365 cl = repo.changelog
366 cl = repo.changelog
366 chainbase = cl.chainbase
367 chainbase = cl.chainbase
367 return all(rev == chainbase(rev) for rev in cl)
368 return all(rev == chainbase(rev) for rev in cl)
368
369
369 @staticmethod
370 @staticmethod
370 def fromconfig(repo):
371 def fromconfig(repo):
371 return True
372 return True
372
373
373
374
374 @registerformatvariant
375 @registerformatvariant
375 class compressionengine(formatvariant):
376 class compressionengine(formatvariant):
376 name = b'compression'
377 name = b'compression'
377 default = b'zlib'
378 default = b'zlib'
378
379
379 description = _(
380 description = _(
380 b'Compresion algorithm used to compress data. '
381 b'Compresion algorithm used to compress data. '
381 b'Some engine are faster than other'
382 b'Some engine are faster than other'
382 )
383 )
383
384
384 upgrademessage = _(
385 upgrademessage = _(
385 b'revlog content will be recompressed with the new algorithm.'
386 b'revlog content will be recompressed with the new algorithm.'
386 )
387 )
387
388
388 @classmethod
389 @classmethod
389 def fromrepo(cls, repo):
390 def fromrepo(cls, repo):
390 # we allow multiple compression engine requirement to co-exist because
391 # we allow multiple compression engine requirement to co-exist because
391 # strickly speaking, revlog seems to support mixed compression style.
392 # strickly speaking, revlog seems to support mixed compression style.
392 #
393 #
393 # The compression used for new entries will be "the last one"
394 # The compression used for new entries will be "the last one"
394 compression = b'zlib'
395 compression = b'zlib'
395 for req in repo.requirements:
396 for req in repo.requirements:
396 prefix = req.startswith
397 prefix = req.startswith
397 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
398 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
398 compression = req.split(b'-', 2)[2]
399 compression = req.split(b'-', 2)[2]
399 return compression
400 return compression
400
401
401 @classmethod
402 @classmethod
402 def fromconfig(cls, repo):
403 def fromconfig(cls, repo):
403 compengines = repo.ui.configlist(b'format', b'revlog-compression')
404 compengines = repo.ui.configlist(b'format', b'revlog-compression')
404 # return the first valid value as the selection code would do
405 # return the first valid value as the selection code would do
405 for comp in compengines:
406 for comp in compengines:
406 if comp in util.compengines:
407 if comp in util.compengines:
407 return comp
408 return comp
408
409
409 # no valide compression found lets display it all for clarity
410 # no valide compression found lets display it all for clarity
410 return b','.join(compengines)
411 return b','.join(compengines)
411
412
412
413
413 @registerformatvariant
414 @registerformatvariant
414 class compressionlevel(formatvariant):
415 class compressionlevel(formatvariant):
415 name = b'compression-level'
416 name = b'compression-level'
416 default = b'default'
417 default = b'default'
417
418
418 description = _(b'compression level')
419 description = _(b'compression level')
419
420
420 upgrademessage = _(b'revlog content will be recompressed')
421 upgrademessage = _(b'revlog content will be recompressed')
421
422
422 @classmethod
423 @classmethod
423 def fromrepo(cls, repo):
424 def fromrepo(cls, repo):
424 comp = compressionengine.fromrepo(repo)
425 comp = compressionengine.fromrepo(repo)
425 level = None
426 level = None
426 if comp == b'zlib':
427 if comp == b'zlib':
427 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
428 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
428 elif comp == b'zstd':
429 elif comp == b'zstd':
429 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
430 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
430 if level is None:
431 if level is None:
431 return b'default'
432 return b'default'
432 return bytes(level)
433 return bytes(level)
433
434
434 @classmethod
435 @classmethod
435 def fromconfig(cls, repo):
436 def fromconfig(cls, repo):
436 comp = compressionengine.fromconfig(repo)
437 comp = compressionengine.fromconfig(repo)
437 level = None
438 level = None
438 if comp == b'zlib':
439 if comp == b'zlib':
439 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
440 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
440 elif comp == b'zstd':
441 elif comp == b'zstd':
441 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
442 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
442 if level is None:
443 if level is None:
443 return b'default'
444 return b'default'
444 return bytes(level)
445 return bytes(level)
445
446
446
447
447 def find_format_upgrades(repo):
448 def find_format_upgrades(repo):
448 """returns a list of format upgrades which can be perform on the repo"""
449 """returns a list of format upgrades which can be perform on the repo"""
449 upgrades = []
450 upgrades = []
450
451
451 # We could detect lack of revlogv1 and store here, but they were added
452 # We could detect lack of revlogv1 and store here, but they were added
452 # in 0.9.2 and we don't support upgrading repos without these
453 # in 0.9.2 and we don't support upgrading repos without these
453 # requirements, so let's not bother.
454 # requirements, so let's not bother.
454
455
455 for fv in allformatvariant:
456 for fv in allformatvariant:
456 if not fv.fromrepo(repo):
457 if not fv.fromrepo(repo):
457 upgrades.append(fv)
458 upgrades.append(fv)
458
459
459 return upgrades
460 return upgrades
460
461
461
462
462 def find_format_downgrades(repo):
463 def find_format_downgrades(repo):
463 """returns a list of format downgrades which will be performed on the repo
464 """returns a list of format downgrades which will be performed on the repo
464 because of disabled config option for them"""
465 because of disabled config option for them"""
465
466
466 downgrades = []
467 downgrades = []
467
468
468 for fv in allformatvariant:
469 for fv in allformatvariant:
469 # format variant exist in repo but does not exist in new repository
470 # format variant exist in repo but does not exist in new repository
470 # config
471 # config
471 if fv.fromrepo(repo) and not fv.fromconfig(repo):
472 if fv.fromrepo(repo) and not fv.fromconfig(repo):
472 downgrades.append(fv)
473 downgrades.append(fv)
473
474
474 return downgrades
475 return downgrades
475
476
476
477
477 ALL_OPTIMISATIONS = []
478 ALL_OPTIMISATIONS = []
478
479
479
480
480 def register_optimization(obj):
481 def register_optimization(obj):
481 ALL_OPTIMISATIONS.append(obj)
482 ALL_OPTIMISATIONS.append(obj)
482 return obj
483 return obj
483
484
484
485
485 register_optimization(
486 register_optimization(
486 improvement(
487 improvement(
487 name=b're-delta-parent',
488 name=b're-delta-parent',
488 type=OPTIMISATION,
489 type=OPTIMISATION,
489 description=_(
490 description=_(
490 b'deltas within internal storage will be recalculated to '
491 b'deltas within internal storage will be recalculated to '
491 b'choose an optimal base revision where this was not '
492 b'choose an optimal base revision where this was not '
492 b'already done; the size of the repository may shrink and '
493 b'already done; the size of the repository may shrink and '
493 b'various operations may become faster; the first time '
494 b'various operations may become faster; the first time '
494 b'this optimization is performed could slow down upgrade '
495 b'this optimization is performed could slow down upgrade '
495 b'execution considerably; subsequent invocations should '
496 b'execution considerably; subsequent invocations should '
496 b'not run noticeably slower'
497 b'not run noticeably slower'
497 ),
498 ),
498 upgrademessage=_(
499 upgrademessage=_(
499 b'deltas within internal storage will choose a new '
500 b'deltas within internal storage will choose a new '
500 b'base revision if needed'
501 b'base revision if needed'
501 ),
502 ),
502 )
503 )
503 )
504 )
504
505
505 register_optimization(
506 register_optimization(
506 improvement(
507 improvement(
507 name=b're-delta-multibase',
508 name=b're-delta-multibase',
508 type=OPTIMISATION,
509 type=OPTIMISATION,
509 description=_(
510 description=_(
510 b'deltas within internal storage will be recalculated '
511 b'deltas within internal storage will be recalculated '
511 b'against multiple base revision and the smallest '
512 b'against multiple base revision and the smallest '
512 b'difference will be used; the size of the repository may '
513 b'difference will be used; the size of the repository may '
513 b'shrink significantly when there are many merges; this '
514 b'shrink significantly when there are many merges; this '
514 b'optimization will slow down execution in proportion to '
515 b'optimization will slow down execution in proportion to '
515 b'the number of merges in the repository and the amount '
516 b'the number of merges in the repository and the amount '
516 b'of files in the repository; this slow down should not '
517 b'of files in the repository; this slow down should not '
517 b'be significant unless there are tens of thousands of '
518 b'be significant unless there are tens of thousands of '
518 b'files and thousands of merges'
519 b'files and thousands of merges'
519 ),
520 ),
520 upgrademessage=_(
521 upgrademessage=_(
521 b'deltas within internal storage will choose an '
522 b'deltas within internal storage will choose an '
522 b'optimal delta by computing deltas against multiple '
523 b'optimal delta by computing deltas against multiple '
523 b'parents; may slow down execution time '
524 b'parents; may slow down execution time '
524 b'significantly'
525 b'significantly'
525 ),
526 ),
526 )
527 )
527 )
528 )
528
529
529 register_optimization(
530 register_optimization(
530 improvement(
531 improvement(
531 name=b're-delta-all',
532 name=b're-delta-all',
532 type=OPTIMISATION,
533 type=OPTIMISATION,
533 description=_(
534 description=_(
534 b'deltas within internal storage will always be '
535 b'deltas within internal storage will always be '
535 b'recalculated without reusing prior deltas; this will '
536 b'recalculated without reusing prior deltas; this will '
536 b'likely make execution run several times slower; this '
537 b'likely make execution run several times slower; this '
537 b'optimization is typically not needed'
538 b'optimization is typically not needed'
538 ),
539 ),
539 upgrademessage=_(
540 upgrademessage=_(
540 b'deltas within internal storage will be fully '
541 b'deltas within internal storage will be fully '
541 b'recomputed; this will likely drastically slow down '
542 b'recomputed; this will likely drastically slow down '
542 b'execution time'
543 b'execution time'
543 ),
544 ),
544 )
545 )
545 )
546 )
546
547
547 register_optimization(
548 register_optimization(
548 improvement(
549 improvement(
549 name=b're-delta-fulladd',
550 name=b're-delta-fulladd',
550 type=OPTIMISATION,
551 type=OPTIMISATION,
551 description=_(
552 description=_(
552 b'every revision will be re-added as if it was new '
553 b'every revision will be re-added as if it was new '
553 b'content. It will go through the full storage '
554 b'content. It will go through the full storage '
554 b'mechanism giving extensions a chance to process it '
555 b'mechanism giving extensions a chance to process it '
555 b'(eg. lfs). This is similar to "re-delta-all" but even '
556 b'(eg. lfs). This is similar to "re-delta-all" but even '
556 b'slower since more logic is involved.'
557 b'slower since more logic is involved.'
557 ),
558 ),
558 upgrademessage=_(
559 upgrademessage=_(
559 b'each revision will be added as new content to the '
560 b'each revision will be added as new content to the '
560 b'internal storage; this will likely drastically slow '
561 b'internal storage; this will likely drastically slow '
561 b'down execution time, but some extensions might need '
562 b'down execution time, but some extensions might need '
562 b'it'
563 b'it'
563 ),
564 ),
564 )
565 )
565 )
566 )
566
567
567
568
568 def findoptimizations(repo):
569 def findoptimizations(repo):
569 """Determine optimisation that could be used during upgrade"""
570 """Determine optimisation that could be used during upgrade"""
570 # These are unconditionally added. There is logic later that figures out
571 # These are unconditionally added. There is logic later that figures out
571 # which ones to apply.
572 # which ones to apply.
572 return list(ALL_OPTIMISATIONS)
573 return list(ALL_OPTIMISATIONS)
573
574
574
575
575 def determine_upgrade_actions(
576 def determine_upgrade_actions(
576 repo, format_upgrades, optimizations, sourcereqs, destreqs
577 repo, format_upgrades, optimizations, sourcereqs, destreqs
577 ):
578 ):
578 """Determine upgrade actions that will be performed.
579 """Determine upgrade actions that will be performed.
579
580
580 Given a list of improvements as returned by ``find_format_upgrades`` and
581 Given a list of improvements as returned by ``find_format_upgrades`` and
581 ``findoptimizations``, determine the list of upgrade actions that
582 ``findoptimizations``, determine the list of upgrade actions that
582 will be performed.
583 will be performed.
583
584
584 The role of this function is to filter improvements if needed, apply
585 The role of this function is to filter improvements if needed, apply
585 recommended optimizations from the improvements list that make sense,
586 recommended optimizations from the improvements list that make sense,
586 etc.
587 etc.
587
588
588 Returns a list of action names.
589 Returns a list of action names.
589 """
590 """
590 newactions = []
591 newactions = []
591
592
592 for d in format_upgrades:
593 for d in format_upgrades:
593 name = d._requirement
594 name = d._requirement
594
595
595 # If the action is a requirement that doesn't show up in the
596 # If the action is a requirement that doesn't show up in the
596 # destination requirements, prune the action.
597 # destination requirements, prune the action.
597 if name is not None and name not in destreqs:
598 if name is not None and name not in destreqs:
598 continue
599 continue
599
600
600 newactions.append(d)
601 newactions.append(d)
601
602
602 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
603 newactions.extend(o for o in sorted(optimizations) if o not in newactions)
603
604
604 # FUTURE consider adding some optimizations here for certain transitions.
605 # FUTURE consider adding some optimizations here for certain transitions.
605 # e.g. adding generaldelta could schedule parent redeltas.
606 # e.g. adding generaldelta could schedule parent redeltas.
606
607
607 return newactions
608 return newactions
608
609
609
610
610 class UpgradeOperation(object):
611 class UpgradeOperation(object):
611 """represent the work to be done during an upgrade"""
612 """represent the work to be done during an upgrade"""
612
613
613 def __init__(
614 def __init__(
614 self,
615 self,
615 ui,
616 ui,
616 new_requirements,
617 new_requirements,
617 current_requirements,
618 current_requirements,
618 upgrade_actions,
619 upgrade_actions,
619 removed_actions,
620 removed_actions,
620 revlogs_to_process,
621 revlogs_to_process,
621 ):
622 ):
622 self.ui = ui
623 self.ui = ui
623 self.new_requirements = new_requirements
624 self.new_requirements = new_requirements
624 self.current_requirements = current_requirements
625 self.current_requirements = current_requirements
625 # list of upgrade actions the operation will perform
626 # list of upgrade actions the operation will perform
626 self.upgrade_actions = upgrade_actions
627 self.upgrade_actions = upgrade_actions
627 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
628 self._upgrade_actions_names = set([a.name for a in upgrade_actions])
628 self.removed_actions = removed_actions
629 self.removed_actions = removed_actions
629 self.revlogs_to_process = revlogs_to_process
630 self.revlogs_to_process = revlogs_to_process
630 # requirements which will be added by the operation
631 # requirements which will be added by the operation
631 self._added_requirements = (
632 self._added_requirements = (
632 self.new_requirements - self.current_requirements
633 self.new_requirements - self.current_requirements
633 )
634 )
634 # requirements which will be removed by the operation
635 # requirements which will be removed by the operation
635 self._removed_requirements = (
636 self._removed_requirements = (
636 self.current_requirements - self.new_requirements
637 self.current_requirements - self.new_requirements
637 )
638 )
638 # requirements which will be preserved by the operation
639 # requirements which will be preserved by the operation
639 self._preserved_requirements = (
640 self._preserved_requirements = (
640 self.current_requirements & self.new_requirements
641 self.current_requirements & self.new_requirements
641 )
642 )
642 # optimizations which are not used and it's recommended that they
643 # optimizations which are not used and it's recommended that they
643 # should use them
644 # should use them
644 all_optimizations = findoptimizations(None)
645 all_optimizations = findoptimizations(None)
645 self.unused_optimizations = [
646 self.unused_optimizations = [
646 i for i in all_optimizations if i not in self.upgrade_actions
647 i for i in all_optimizations if i not in self.upgrade_actions
647 ]
648 ]
648
649
650 # delta reuse mode of this upgrade operation
651 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
652 if b're-delta-all' in self._upgrade_actions_names:
653 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
654 elif b're-delta-parent' in self._upgrade_actions_names:
655 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
656 elif b're-delta-multibase' in self._upgrade_actions_names:
657 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
658 elif b're-delta-fulladd' in self._upgrade_actions_names:
659 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
660
649 def _write_labeled(self, l, label):
661 def _write_labeled(self, l, label):
650 """
662 """
651 Utility function to aid writing of a list under one label
663 Utility function to aid writing of a list under one label
652 """
664 """
653 first = True
665 first = True
654 for r in sorted(l):
666 for r in sorted(l):
655 if not first:
667 if not first:
656 self.ui.write(b', ')
668 self.ui.write(b', ')
657 self.ui.write(r, label=label)
669 self.ui.write(r, label=label)
658 first = False
670 first = False
659
671
660 def print_requirements(self):
672 def print_requirements(self):
661 self.ui.write(_(b'requirements\n'))
673 self.ui.write(_(b'requirements\n'))
662 self.ui.write(_(b' preserved: '))
674 self.ui.write(_(b' preserved: '))
663 self._write_labeled(
675 self._write_labeled(
664 self._preserved_requirements, "upgrade-repo.requirement.preserved"
676 self._preserved_requirements, "upgrade-repo.requirement.preserved"
665 )
677 )
666 self.ui.write((b'\n'))
678 self.ui.write((b'\n'))
667 if self._removed_requirements:
679 if self._removed_requirements:
668 self.ui.write(_(b' removed: '))
680 self.ui.write(_(b' removed: '))
669 self._write_labeled(
681 self._write_labeled(
670 self._removed_requirements, "upgrade-repo.requirement.removed"
682 self._removed_requirements, "upgrade-repo.requirement.removed"
671 )
683 )
672 self.ui.write((b'\n'))
684 self.ui.write((b'\n'))
673 if self._added_requirements:
685 if self._added_requirements:
674 self.ui.write(_(b' added: '))
686 self.ui.write(_(b' added: '))
675 self._write_labeled(
687 self._write_labeled(
676 self._added_requirements, "upgrade-repo.requirement.added"
688 self._added_requirements, "upgrade-repo.requirement.added"
677 )
689 )
678 self.ui.write((b'\n'))
690 self.ui.write((b'\n'))
679 self.ui.write(b'\n')
691 self.ui.write(b'\n')
680
692
681 def print_optimisations(self):
693 def print_optimisations(self):
682 optimisations = [
694 optimisations = [
683 a for a in self.upgrade_actions if a.type == OPTIMISATION
695 a for a in self.upgrade_actions if a.type == OPTIMISATION
684 ]
696 ]
685 optimisations.sort(key=lambda a: a.name)
697 optimisations.sort(key=lambda a: a.name)
686 if optimisations:
698 if optimisations:
687 self.ui.write(_(b'optimisations: '))
699 self.ui.write(_(b'optimisations: '))
688 self._write_labeled(
700 self._write_labeled(
689 [a.name for a in optimisations],
701 [a.name for a in optimisations],
690 "upgrade-repo.optimisation.performed",
702 "upgrade-repo.optimisation.performed",
691 )
703 )
692 self.ui.write(b'\n\n')
704 self.ui.write(b'\n\n')
693
705
694 def print_upgrade_actions(self):
706 def print_upgrade_actions(self):
695 for a in self.upgrade_actions:
707 for a in self.upgrade_actions:
696 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
708 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
697
709
698 def print_affected_revlogs(self):
710 def print_affected_revlogs(self):
699 if not self.revlogs_to_process:
711 if not self.revlogs_to_process:
700 self.ui.write((b'no revlogs to process\n'))
712 self.ui.write((b'no revlogs to process\n'))
701 else:
713 else:
702 self.ui.write((b'processed revlogs:\n'))
714 self.ui.write((b'processed revlogs:\n'))
703 for r in sorted(self.revlogs_to_process):
715 for r in sorted(self.revlogs_to_process):
704 self.ui.write((b' - %s\n' % r))
716 self.ui.write((b' - %s\n' % r))
705 self.ui.write((b'\n'))
717 self.ui.write((b'\n'))
706
718
707 def print_unused_optimizations(self):
719 def print_unused_optimizations(self):
708 for i in self.unused_optimizations:
720 for i in self.unused_optimizations:
709 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
721 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
710
722
711 def has_upgrade_action(self, name):
723 def has_upgrade_action(self, name):
712 """ Check whether the upgrade operation will perform this action """
724 """ Check whether the upgrade operation will perform this action """
713 return name in self._upgrade_actions_names
725 return name in self._upgrade_actions_names
714
726
715 def print_post_op_messages(self):
727 def print_post_op_messages(self):
716 """ print post upgrade operation warning messages """
728 """ print post upgrade operation warning messages """
717 for a in self.upgrade_actions:
729 for a in self.upgrade_actions:
718 if a.postupgrademessage is not None:
730 if a.postupgrademessage is not None:
719 self.ui.warn(b'%s\n' % a.postupgrademessage)
731 self.ui.warn(b'%s\n' % a.postupgrademessage)
720 for a in self.removed_actions:
732 for a in self.removed_actions:
721 if a.postdowngrademessage is not None:
733 if a.postdowngrademessage is not None:
722 self.ui.warn(b'%s\n' % a.postdowngrademessage)
734 self.ui.warn(b'%s\n' % a.postdowngrademessage)
723
735
724
736
725 ### Code checking if a repository can got through the upgrade process at all. #
737 ### Code checking if a repository can got through the upgrade process at all. #
726
738
727
739
728 def requiredsourcerequirements(repo):
740 def requiredsourcerequirements(repo):
729 """Obtain requirements required to be present to upgrade a repo.
741 """Obtain requirements required to be present to upgrade a repo.
730
742
731 An upgrade will not be allowed if the repository doesn't have the
743 An upgrade will not be allowed if the repository doesn't have the
732 requirements returned by this function.
744 requirements returned by this function.
733 """
745 """
734 return {
746 return {
735 # Introduced in Mercurial 0.9.2.
747 # Introduced in Mercurial 0.9.2.
736 b'revlogv1',
748 b'revlogv1',
737 # Introduced in Mercurial 0.9.2.
749 # Introduced in Mercurial 0.9.2.
738 b'store',
750 b'store',
739 }
751 }
740
752
741
753
742 def blocksourcerequirements(repo):
754 def blocksourcerequirements(repo):
743 """Obtain requirements that will prevent an upgrade from occurring.
755 """Obtain requirements that will prevent an upgrade from occurring.
744
756
745 An upgrade cannot be performed if the source repository contains a
757 An upgrade cannot be performed if the source repository contains a
746 requirements in the returned set.
758 requirements in the returned set.
747 """
759 """
748 return {
760 return {
749 # The upgrade code does not yet support these experimental features.
761 # The upgrade code does not yet support these experimental features.
750 # This is an artificial limitation.
762 # This is an artificial limitation.
751 requirements.TREEMANIFEST_REQUIREMENT,
763 requirements.TREEMANIFEST_REQUIREMENT,
752 # This was a precursor to generaldelta and was never enabled by default.
764 # This was a precursor to generaldelta and was never enabled by default.
753 # It should (hopefully) not exist in the wild.
765 # It should (hopefully) not exist in the wild.
754 b'parentdelta',
766 b'parentdelta',
755 # Upgrade should operate on the actual store, not the shared link.
767 # Upgrade should operate on the actual store, not the shared link.
756 requirements.SHARED_REQUIREMENT,
768 requirements.SHARED_REQUIREMENT,
757 }
769 }
758
770
759
771
760 def check_source_requirements(repo):
772 def check_source_requirements(repo):
761 """Ensure that no existing requirements prevent the repository upgrade"""
773 """Ensure that no existing requirements prevent the repository upgrade"""
762
774
763 required = requiredsourcerequirements(repo)
775 required = requiredsourcerequirements(repo)
764 missingreqs = required - repo.requirements
776 missingreqs = required - repo.requirements
765 if missingreqs:
777 if missingreqs:
766 msg = _(b'cannot upgrade repository; requirement missing: %s')
778 msg = _(b'cannot upgrade repository; requirement missing: %s')
767 missingreqs = b', '.join(sorted(missingreqs))
779 missingreqs = b', '.join(sorted(missingreqs))
768 raise error.Abort(msg % missingreqs)
780 raise error.Abort(msg % missingreqs)
769
781
770 blocking = blocksourcerequirements(repo)
782 blocking = blocksourcerequirements(repo)
771 blockingreqs = blocking & repo.requirements
783 blockingreqs = blocking & repo.requirements
772 if blockingreqs:
784 if blockingreqs:
773 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
785 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
774 blockingreqs = b', '.join(sorted(blockingreqs))
786 blockingreqs = b', '.join(sorted(blockingreqs))
775 raise error.Abort(m % blockingreqs)
787 raise error.Abort(m % blockingreqs)
776
788
777
789
778 ### Verify the validity of the planned requirement changes ####################
790 ### Verify the validity of the planned requirement changes ####################
779
791
780
792
781 def supportremovedrequirements(repo):
793 def supportremovedrequirements(repo):
782 """Obtain requirements that can be removed during an upgrade.
794 """Obtain requirements that can be removed during an upgrade.
783
795
784 If an upgrade were to create a repository that dropped a requirement,
796 If an upgrade were to create a repository that dropped a requirement,
785 the dropped requirement must appear in the returned set for the upgrade
797 the dropped requirement must appear in the returned set for the upgrade
786 to be allowed.
798 to be allowed.
787 """
799 """
788 supported = {
800 supported = {
789 requirements.SPARSEREVLOG_REQUIREMENT,
801 requirements.SPARSEREVLOG_REQUIREMENT,
790 requirements.SIDEDATA_REQUIREMENT,
802 requirements.SIDEDATA_REQUIREMENT,
791 requirements.COPIESSDC_REQUIREMENT,
803 requirements.COPIESSDC_REQUIREMENT,
792 requirements.NODEMAP_REQUIREMENT,
804 requirements.NODEMAP_REQUIREMENT,
793 requirements.SHARESAFE_REQUIREMENT,
805 requirements.SHARESAFE_REQUIREMENT,
794 }
806 }
795 for name in compression.compengines:
807 for name in compression.compengines:
796 engine = compression.compengines[name]
808 engine = compression.compengines[name]
797 if engine.available() and engine.revlogheader():
809 if engine.available() and engine.revlogheader():
798 supported.add(b'exp-compression-%s' % name)
810 supported.add(b'exp-compression-%s' % name)
799 if engine.name() == b'zstd':
811 if engine.name() == b'zstd':
800 supported.add(b'revlog-compression-zstd')
812 supported.add(b'revlog-compression-zstd')
801 return supported
813 return supported
802
814
803
815
804 def supporteddestrequirements(repo):
816 def supporteddestrequirements(repo):
805 """Obtain requirements that upgrade supports in the destination.
817 """Obtain requirements that upgrade supports in the destination.
806
818
807 If the result of the upgrade would create requirements not in this set,
819 If the result of the upgrade would create requirements not in this set,
808 the upgrade is disallowed.
820 the upgrade is disallowed.
809
821
810 Extensions should monkeypatch this to add their custom requirements.
822 Extensions should monkeypatch this to add their custom requirements.
811 """
823 """
812 supported = {
824 supported = {
813 b'dotencode',
825 b'dotencode',
814 b'fncache',
826 b'fncache',
815 b'generaldelta',
827 b'generaldelta',
816 b'revlogv1',
828 b'revlogv1',
817 b'store',
829 b'store',
818 requirements.SPARSEREVLOG_REQUIREMENT,
830 requirements.SPARSEREVLOG_REQUIREMENT,
819 requirements.SIDEDATA_REQUIREMENT,
831 requirements.SIDEDATA_REQUIREMENT,
820 requirements.COPIESSDC_REQUIREMENT,
832 requirements.COPIESSDC_REQUIREMENT,
821 requirements.NODEMAP_REQUIREMENT,
833 requirements.NODEMAP_REQUIREMENT,
822 requirements.SHARESAFE_REQUIREMENT,
834 requirements.SHARESAFE_REQUIREMENT,
823 }
835 }
824 for name in compression.compengines:
836 for name in compression.compengines:
825 engine = compression.compengines[name]
837 engine = compression.compengines[name]
826 if engine.available() and engine.revlogheader():
838 if engine.available() and engine.revlogheader():
827 supported.add(b'exp-compression-%s' % name)
839 supported.add(b'exp-compression-%s' % name)
828 if engine.name() == b'zstd':
840 if engine.name() == b'zstd':
829 supported.add(b'revlog-compression-zstd')
841 supported.add(b'revlog-compression-zstd')
830 return supported
842 return supported
831
843
832
844
833 def allowednewrequirements(repo):
845 def allowednewrequirements(repo):
834 """Obtain requirements that can be added to a repository during upgrade.
846 """Obtain requirements that can be added to a repository during upgrade.
835
847
836 This is used to disallow proposed requirements from being added when
848 This is used to disallow proposed requirements from being added when
837 they weren't present before.
849 they weren't present before.
838
850
839 We use a list of allowed requirement additions instead of a list of known
851 We use a list of allowed requirement additions instead of a list of known
840 bad additions because the whitelist approach is safer and will prevent
852 bad additions because the whitelist approach is safer and will prevent
841 future, unknown requirements from accidentally being added.
853 future, unknown requirements from accidentally being added.
842 """
854 """
843 supported = {
855 supported = {
844 b'dotencode',
856 b'dotencode',
845 b'fncache',
857 b'fncache',
846 b'generaldelta',
858 b'generaldelta',
847 requirements.SPARSEREVLOG_REQUIREMENT,
859 requirements.SPARSEREVLOG_REQUIREMENT,
848 requirements.SIDEDATA_REQUIREMENT,
860 requirements.SIDEDATA_REQUIREMENT,
849 requirements.COPIESSDC_REQUIREMENT,
861 requirements.COPIESSDC_REQUIREMENT,
850 requirements.NODEMAP_REQUIREMENT,
862 requirements.NODEMAP_REQUIREMENT,
851 requirements.SHARESAFE_REQUIREMENT,
863 requirements.SHARESAFE_REQUIREMENT,
852 }
864 }
853 for name in compression.compengines:
865 for name in compression.compengines:
854 engine = compression.compengines[name]
866 engine = compression.compengines[name]
855 if engine.available() and engine.revlogheader():
867 if engine.available() and engine.revlogheader():
856 supported.add(b'exp-compression-%s' % name)
868 supported.add(b'exp-compression-%s' % name)
857 if engine.name() == b'zstd':
869 if engine.name() == b'zstd':
858 supported.add(b'revlog-compression-zstd')
870 supported.add(b'revlog-compression-zstd')
859 return supported
871 return supported
860
872
861
873
862 def check_requirements_changes(repo, new_reqs):
874 def check_requirements_changes(repo, new_reqs):
863 old_reqs = repo.requirements
875 old_reqs = repo.requirements
864
876
865 support_removal = supportremovedrequirements(repo)
877 support_removal = supportremovedrequirements(repo)
866 no_remove_reqs = old_reqs - new_reqs - support_removal
878 no_remove_reqs = old_reqs - new_reqs - support_removal
867 if no_remove_reqs:
879 if no_remove_reqs:
868 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
880 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
869 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
881 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
870 raise error.Abort(msg % no_remove_reqs)
882 raise error.Abort(msg % no_remove_reqs)
871
883
872 support_addition = allowednewrequirements(repo)
884 support_addition = allowednewrequirements(repo)
873 no_add_reqs = new_reqs - old_reqs - support_addition
885 no_add_reqs = new_reqs - old_reqs - support_addition
874 if no_add_reqs:
886 if no_add_reqs:
875 m = _(b'cannot upgrade repository; do not support adding requirement: ')
887 m = _(b'cannot upgrade repository; do not support adding requirement: ')
876 no_add_reqs = b', '.join(sorted(no_add_reqs))
888 no_add_reqs = b', '.join(sorted(no_add_reqs))
877 raise error.Abort(m + no_add_reqs)
889 raise error.Abort(m + no_add_reqs)
878
890
879 supported = supporteddestrequirements(repo)
891 supported = supporteddestrequirements(repo)
880 unsupported_reqs = new_reqs - supported
892 unsupported_reqs = new_reqs - supported
881 if unsupported_reqs:
893 if unsupported_reqs:
882 msg = _(
894 msg = _(
883 b'cannot upgrade repository; do not support destination '
895 b'cannot upgrade repository; do not support destination '
884 b'requirement: %s'
896 b'requirement: %s'
885 )
897 )
886 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
898 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
887 raise error.Abort(msg % unsupported_reqs)
899 raise error.Abort(msg % unsupported_reqs)
@@ -1,543 +1,532 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..pycompat import getattr
13 from ..pycompat import getattr
14 from .. import (
14 from .. import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 manifest,
18 manifest,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 revlog,
22 revlog,
23 scmutil,
23 scmutil,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27
27
28
28
29 def _revlogfrompath(repo, path):
29 def _revlogfrompath(repo, path):
30 """Obtain a revlog from a repo path.
30 """Obtain a revlog from a repo path.
31
31
32 An instance of the appropriate class is returned.
32 An instance of the appropriate class is returned.
33 """
33 """
34 if path == b'00changelog.i':
34 if path == b'00changelog.i':
35 return changelog.changelog(repo.svfs)
35 return changelog.changelog(repo.svfs)
36 elif path.endswith(b'00manifest.i'):
36 elif path.endswith(b'00manifest.i'):
37 mandir = path[: -len(b'00manifest.i')]
37 mandir = path[: -len(b'00manifest.i')]
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 else:
39 else:
40 # reverse of "/".join(("data", path + ".i"))
40 # reverse of "/".join(("data", path + ".i"))
41 return filelog.filelog(repo.svfs, path[5:-2])
41 return filelog.filelog(repo.svfs, path[5:-2])
42
42
43
43
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 """copy all relevant files for `oldrl` into `destrepo` store
45 """copy all relevant files for `oldrl` into `destrepo` store
46
46
47 Files are copied "as is" without any transformation. The copy is performed
47 Files are copied "as is" without any transformation. The copy is performed
48 without extra checks. Callers are responsible for making sure the copied
48 without extra checks. Callers are responsible for making sure the copied
49 content is compatible with format of the destination repository.
49 content is compatible with format of the destination repository.
50 """
50 """
51 oldrl = getattr(oldrl, '_revlog', oldrl)
51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 newrl = _revlogfrompath(destrepo, unencodedname)
52 newrl = _revlogfrompath(destrepo, unencodedname)
53 newrl = getattr(newrl, '_revlog', newrl)
53 newrl = getattr(newrl, '_revlog', newrl)
54
54
55 oldvfs = oldrl.opener
55 oldvfs = oldrl.opener
56 newvfs = newrl.opener
56 newvfs = newrl.opener
57 oldindex = oldvfs.join(oldrl.indexfile)
57 oldindex = oldvfs.join(oldrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
58 newindex = newvfs.join(newrl.indexfile)
59 olddata = oldvfs.join(oldrl.datafile)
59 olddata = oldvfs.join(oldrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
60 newdata = newvfs.join(newrl.datafile)
61
61
62 with newvfs(newrl.indexfile, b'w'):
62 with newvfs(newrl.indexfile, b'w'):
63 pass # create all the directories
63 pass # create all the directories
64
64
65 util.copyfile(oldindex, newindex)
65 util.copyfile(oldindex, newindex)
66 copydata = oldrl.opener.exists(oldrl.datafile)
66 copydata = oldrl.opener.exists(oldrl.datafile)
67 if copydata:
67 if copydata:
68 util.copyfile(olddata, newdata)
68 util.copyfile(olddata, newdata)
69
69
70 if not (
70 if not (
71 unencodedname.endswith(b'00changelog.i')
71 unencodedname.endswith(b'00changelog.i')
72 or unencodedname.endswith(b'00manifest.i')
72 or unencodedname.endswith(b'00manifest.i')
73 ):
73 ):
74 destrepo.svfs.fncache.add(unencodedname)
74 destrepo.svfs.fncache.add(unencodedname)
75 if copydata:
75 if copydata:
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77
77
78
78
79 UPGRADE_CHANGELOG = b"changelog"
79 UPGRADE_CHANGELOG = b"changelog"
80 UPGRADE_MANIFEST = b"manifest"
80 UPGRADE_MANIFEST = b"manifest"
81 UPGRADE_FILELOGS = b"all-filelogs"
81 UPGRADE_FILELOGS = b"all-filelogs"
82
82
83 UPGRADE_ALL_REVLOGS = frozenset(
83 UPGRADE_ALL_REVLOGS = frozenset(
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 )
85 )
86
86
87
87
88 def getsidedatacompanion(srcrepo, dstrepo):
88 def getsidedatacompanion(srcrepo, dstrepo):
89 sidedatacompanion = None
89 sidedatacompanion = None
90 removedreqs = srcrepo.requirements - dstrepo.requirements
90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93
93
94 def sidedatacompanion(rl, rev):
94 def sidedatacompanion(rl, rev):
95 rl = getattr(rl, '_revlog', rl)
95 rl = getattr(rl, '_revlog', rl)
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 return True, (), {}, 0, 0
97 return True, (), {}, 0, 0
98 return False, (), {}, 0, 0
98 return False, (), {}, 0, 0
99
99
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 return sidedatacompanion
104 return sidedatacompanion
105
105
106
106
107 def matchrevlog(revlogfilter, entry):
107 def matchrevlog(revlogfilter, entry):
108 """check if a revlog is selected for cloning.
108 """check if a revlog is selected for cloning.
109
109
110 In other words, are there any updates which need to be done on revlog
110 In other words, are there any updates which need to be done on revlog
111 or it can be blindly copied.
111 or it can be blindly copied.
112
112
113 The store entry is checked against the passed filter"""
113 The store entry is checked against the passed filter"""
114 if entry.endswith(b'00changelog.i'):
114 if entry.endswith(b'00changelog.i'):
115 return UPGRADE_CHANGELOG in revlogfilter
115 return UPGRADE_CHANGELOG in revlogfilter
116 elif entry.endswith(b'00manifest.i'):
116 elif entry.endswith(b'00manifest.i'):
117 return UPGRADE_MANIFEST in revlogfilter
117 return UPGRADE_MANIFEST in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
118 return UPGRADE_FILELOGS in revlogfilter
119
119
120
120
121 def _perform_clone(
121 def _perform_clone(
122 ui,
122 ui,
123 dstrepo,
123 dstrepo,
124 tr,
124 tr,
125 old_revlog,
125 old_revlog,
126 unencoded,
126 unencoded,
127 deltareuse,
127 deltareuse,
128 forcedeltabothparents,
128 forcedeltabothparents,
129 revlogs,
129 revlogs,
130 sidedatacompanion,
130 sidedatacompanion,
131 oncopiedrevision,
131 oncopiedrevision,
132 ):
132 ):
133 """ returns the new revlog object created"""
133 """ returns the new revlog object created"""
134 newrl = None
134 newrl = None
135 if matchrevlog(revlogs, unencoded):
135 if matchrevlog(revlogs, unencoded):
136 ui.note(
136 ui.note(
137 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
137 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
138 )
138 )
139 newrl = _revlogfrompath(dstrepo, unencoded)
139 newrl = _revlogfrompath(dstrepo, unencoded)
140 old_revlog.clone(
140 old_revlog.clone(
141 tr,
141 tr,
142 newrl,
142 newrl,
143 addrevisioncb=oncopiedrevision,
143 addrevisioncb=oncopiedrevision,
144 deltareuse=deltareuse,
144 deltareuse=deltareuse,
145 forcedeltabothparents=forcedeltabothparents,
145 forcedeltabothparents=forcedeltabothparents,
146 sidedatacompanion=sidedatacompanion,
146 sidedatacompanion=sidedatacompanion,
147 )
147 )
148 else:
148 else:
149 msg = _(b'blindly copying %s containing %i revisions\n')
149 msg = _(b'blindly copying %s containing %i revisions\n')
150 ui.note(msg % (unencoded, len(old_revlog)))
150 ui.note(msg % (unencoded, len(old_revlog)))
151 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
151 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
152
152
153 newrl = _revlogfrompath(dstrepo, unencoded)
153 newrl = _revlogfrompath(dstrepo, unencoded)
154 return newrl
154 return newrl
155
155
156
156
157 def _clonerevlogs(
157 def _clonerevlogs(
158 ui,
158 ui,
159 srcrepo,
159 srcrepo,
160 dstrepo,
160 dstrepo,
161 tr,
161 tr,
162 deltareuse,
162 deltareuse,
163 forcedeltabothparents,
163 forcedeltabothparents,
164 revlogs=UPGRADE_ALL_REVLOGS,
164 revlogs=UPGRADE_ALL_REVLOGS,
165 ):
165 ):
166 """Copy revlogs between 2 repos."""
166 """Copy revlogs between 2 repos."""
167 revcount = 0
167 revcount = 0
168 srcsize = 0
168 srcsize = 0
169 srcrawsize = 0
169 srcrawsize = 0
170 dstsize = 0
170 dstsize = 0
171 fcount = 0
171 fcount = 0
172 frevcount = 0
172 frevcount = 0
173 fsrcsize = 0
173 fsrcsize = 0
174 frawsize = 0
174 frawsize = 0
175 fdstsize = 0
175 fdstsize = 0
176 mcount = 0
176 mcount = 0
177 mrevcount = 0
177 mrevcount = 0
178 msrcsize = 0
178 msrcsize = 0
179 mrawsize = 0
179 mrawsize = 0
180 mdstsize = 0
180 mdstsize = 0
181 crevcount = 0
181 crevcount = 0
182 csrcsize = 0
182 csrcsize = 0
183 crawsize = 0
183 crawsize = 0
184 cdstsize = 0
184 cdstsize = 0
185
185
186 alldatafiles = list(srcrepo.store.walk())
186 alldatafiles = list(srcrepo.store.walk())
187 # mapping of data files which needs to be cloned
187 # mapping of data files which needs to be cloned
188 # key is unencoded filename
188 # key is unencoded filename
189 # value is revlog_object_from_srcrepo
189 # value is revlog_object_from_srcrepo
190 manifests = {}
190 manifests = {}
191 changelogs = {}
191 changelogs = {}
192 filelogs = {}
192 filelogs = {}
193
193
194 # Perform a pass to collect metadata. This validates we can open all
194 # Perform a pass to collect metadata. This validates we can open all
195 # source files and allows a unified progress bar to be displayed.
195 # source files and allows a unified progress bar to be displayed.
196 for unencoded, encoded, size in alldatafiles:
196 for unencoded, encoded, size in alldatafiles:
197 if unencoded.endswith(b'.d'):
197 if unencoded.endswith(b'.d'):
198 continue
198 continue
199
199
200 rl = _revlogfrompath(srcrepo, unencoded)
200 rl = _revlogfrompath(srcrepo, unencoded)
201
201
202 info = rl.storageinfo(
202 info = rl.storageinfo(
203 exclusivefiles=True,
203 exclusivefiles=True,
204 revisionscount=True,
204 revisionscount=True,
205 trackedsize=True,
205 trackedsize=True,
206 storedsize=True,
206 storedsize=True,
207 )
207 )
208
208
209 revcount += info[b'revisionscount'] or 0
209 revcount += info[b'revisionscount'] or 0
210 datasize = info[b'storedsize'] or 0
210 datasize = info[b'storedsize'] or 0
211 rawsize = info[b'trackedsize'] or 0
211 rawsize = info[b'trackedsize'] or 0
212
212
213 srcsize += datasize
213 srcsize += datasize
214 srcrawsize += rawsize
214 srcrawsize += rawsize
215
215
216 # This is for the separate progress bars.
216 # This is for the separate progress bars.
217 if isinstance(rl, changelog.changelog):
217 if isinstance(rl, changelog.changelog):
218 changelogs[unencoded] = rl
218 changelogs[unencoded] = rl
219 crevcount += len(rl)
219 crevcount += len(rl)
220 csrcsize += datasize
220 csrcsize += datasize
221 crawsize += rawsize
221 crawsize += rawsize
222 elif isinstance(rl, manifest.manifestrevlog):
222 elif isinstance(rl, manifest.manifestrevlog):
223 manifests[unencoded] = rl
223 manifests[unencoded] = rl
224 mcount += 1
224 mcount += 1
225 mrevcount += len(rl)
225 mrevcount += len(rl)
226 msrcsize += datasize
226 msrcsize += datasize
227 mrawsize += rawsize
227 mrawsize += rawsize
228 elif isinstance(rl, filelog.filelog):
228 elif isinstance(rl, filelog.filelog):
229 filelogs[unencoded] = rl
229 filelogs[unencoded] = rl
230 fcount += 1
230 fcount += 1
231 frevcount += len(rl)
231 frevcount += len(rl)
232 fsrcsize += datasize
232 fsrcsize += datasize
233 frawsize += rawsize
233 frawsize += rawsize
234 else:
234 else:
235 error.ProgrammingError(b'unknown revlog type')
235 error.ProgrammingError(b'unknown revlog type')
236
236
237 if not revcount:
237 if not revcount:
238 return
238 return
239
239
240 ui.status(
240 ui.status(
241 _(
241 _(
242 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
242 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
243 b'%d in changelog)\n'
243 b'%d in changelog)\n'
244 )
244 )
245 % (revcount, frevcount, mrevcount, crevcount)
245 % (revcount, frevcount, mrevcount, crevcount)
246 )
246 )
247 ui.status(
247 ui.status(
248 _(b'migrating %s in store; %s tracked data\n')
248 _(b'migrating %s in store; %s tracked data\n')
249 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
249 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
250 )
250 )
251
251
252 # Used to keep track of progress.
252 # Used to keep track of progress.
253 progress = None
253 progress = None
254
254
255 def oncopiedrevision(rl, rev, node):
255 def oncopiedrevision(rl, rev, node):
256 progress.increment()
256 progress.increment()
257
257
258 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
258 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
259
259
260 # Migrating filelogs
260 # Migrating filelogs
261 ui.status(
261 ui.status(
262 _(
262 _(
263 b'migrating %d filelogs containing %d revisions '
263 b'migrating %d filelogs containing %d revisions '
264 b'(%s in store; %s tracked data)\n'
264 b'(%s in store; %s tracked data)\n'
265 )
265 )
266 % (
266 % (
267 fcount,
267 fcount,
268 frevcount,
268 frevcount,
269 util.bytecount(fsrcsize),
269 util.bytecount(fsrcsize),
270 util.bytecount(frawsize),
270 util.bytecount(frawsize),
271 )
271 )
272 )
272 )
273 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
273 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
274 for unencoded, oldrl in sorted(filelogs.items()):
274 for unencoded, oldrl in sorted(filelogs.items()):
275 newrl = _perform_clone(
275 newrl = _perform_clone(
276 ui,
276 ui,
277 dstrepo,
277 dstrepo,
278 tr,
278 tr,
279 oldrl,
279 oldrl,
280 unencoded,
280 unencoded,
281 deltareuse,
281 deltareuse,
282 forcedeltabothparents,
282 forcedeltabothparents,
283 revlogs,
283 revlogs,
284 sidedatacompanion,
284 sidedatacompanion,
285 oncopiedrevision,
285 oncopiedrevision,
286 )
286 )
287 info = newrl.storageinfo(storedsize=True)
287 info = newrl.storageinfo(storedsize=True)
288 fdstsize += info[b'storedsize'] or 0
288 fdstsize += info[b'storedsize'] or 0
289 ui.status(
289 ui.status(
290 _(
290 _(
291 b'finished migrating %d filelog revisions across %d '
291 b'finished migrating %d filelog revisions across %d '
292 b'filelogs; change in size: %s\n'
292 b'filelogs; change in size: %s\n'
293 )
293 )
294 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
294 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
295 )
295 )
296
296
297 # Migrating manifests
297 # Migrating manifests
298 ui.status(
298 ui.status(
299 _(
299 _(
300 b'migrating %d manifests containing %d revisions '
300 b'migrating %d manifests containing %d revisions '
301 b'(%s in store; %s tracked data)\n'
301 b'(%s in store; %s tracked data)\n'
302 )
302 )
303 % (
303 % (
304 mcount,
304 mcount,
305 mrevcount,
305 mrevcount,
306 util.bytecount(msrcsize),
306 util.bytecount(msrcsize),
307 util.bytecount(mrawsize),
307 util.bytecount(mrawsize),
308 )
308 )
309 )
309 )
310 if progress:
310 if progress:
311 progress.complete()
311 progress.complete()
312 progress = srcrepo.ui.makeprogress(
312 progress = srcrepo.ui.makeprogress(
313 _(b'manifest revisions'), total=mrevcount
313 _(b'manifest revisions'), total=mrevcount
314 )
314 )
315 for unencoded, oldrl in sorted(manifests.items()):
315 for unencoded, oldrl in sorted(manifests.items()):
316 newrl = _perform_clone(
316 newrl = _perform_clone(
317 ui,
317 ui,
318 dstrepo,
318 dstrepo,
319 tr,
319 tr,
320 oldrl,
320 oldrl,
321 unencoded,
321 unencoded,
322 deltareuse,
322 deltareuse,
323 forcedeltabothparents,
323 forcedeltabothparents,
324 revlogs,
324 revlogs,
325 sidedatacompanion,
325 sidedatacompanion,
326 oncopiedrevision,
326 oncopiedrevision,
327 )
327 )
328 info = newrl.storageinfo(storedsize=True)
328 info = newrl.storageinfo(storedsize=True)
329 mdstsize += info[b'storedsize'] or 0
329 mdstsize += info[b'storedsize'] or 0
330 ui.status(
330 ui.status(
331 _(
331 _(
332 b'finished migrating %d manifest revisions across %d '
332 b'finished migrating %d manifest revisions across %d '
333 b'manifests; change in size: %s\n'
333 b'manifests; change in size: %s\n'
334 )
334 )
335 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
335 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
336 )
336 )
337
337
338 # Migrating changelog
338 # Migrating changelog
339 ui.status(
339 ui.status(
340 _(
340 _(
341 b'migrating changelog containing %d revisions '
341 b'migrating changelog containing %d revisions '
342 b'(%s in store; %s tracked data)\n'
342 b'(%s in store; %s tracked data)\n'
343 )
343 )
344 % (
344 % (
345 crevcount,
345 crevcount,
346 util.bytecount(csrcsize),
346 util.bytecount(csrcsize),
347 util.bytecount(crawsize),
347 util.bytecount(crawsize),
348 )
348 )
349 )
349 )
350 if progress:
350 if progress:
351 progress.complete()
351 progress.complete()
352 progress = srcrepo.ui.makeprogress(
352 progress = srcrepo.ui.makeprogress(
353 _(b'changelog revisions'), total=crevcount
353 _(b'changelog revisions'), total=crevcount
354 )
354 )
355 for unencoded, oldrl in sorted(changelogs.items()):
355 for unencoded, oldrl in sorted(changelogs.items()):
356 newrl = _perform_clone(
356 newrl = _perform_clone(
357 ui,
357 ui,
358 dstrepo,
358 dstrepo,
359 tr,
359 tr,
360 oldrl,
360 oldrl,
361 unencoded,
361 unencoded,
362 deltareuse,
362 deltareuse,
363 forcedeltabothparents,
363 forcedeltabothparents,
364 revlogs,
364 revlogs,
365 sidedatacompanion,
365 sidedatacompanion,
366 oncopiedrevision,
366 oncopiedrevision,
367 )
367 )
368 info = newrl.storageinfo(storedsize=True)
368 info = newrl.storageinfo(storedsize=True)
369 cdstsize += info[b'storedsize'] or 0
369 cdstsize += info[b'storedsize'] or 0
370 progress.complete()
370 progress.complete()
371 ui.status(
371 ui.status(
372 _(
372 _(
373 b'finished migrating %d changelog revisions; change in size: '
373 b'finished migrating %d changelog revisions; change in size: '
374 b'%s\n'
374 b'%s\n'
375 )
375 )
376 % (crevcount, util.bytecount(cdstsize - csrcsize))
376 % (crevcount, util.bytecount(cdstsize - csrcsize))
377 )
377 )
378
378
379 dstsize = fdstsize + mdstsize + cdstsize
379 dstsize = fdstsize + mdstsize + cdstsize
380 ui.status(
380 ui.status(
381 _(
381 _(
382 b'finished migrating %d total revisions; total change in store '
382 b'finished migrating %d total revisions; total change in store '
383 b'size: %s\n'
383 b'size: %s\n'
384 )
384 )
385 % (revcount, util.bytecount(dstsize - srcsize))
385 % (revcount, util.bytecount(dstsize - srcsize))
386 )
386 )
387
387
388
388
389 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
389 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
390 """Determine whether to copy a store file during upgrade.
390 """Determine whether to copy a store file during upgrade.
391
391
392 This function is called when migrating store files from ``srcrepo`` to
392 This function is called when migrating store files from ``srcrepo`` to
393 ``dstrepo`` as part of upgrading a repository.
393 ``dstrepo`` as part of upgrading a repository.
394
394
395 Args:
395 Args:
396 srcrepo: repo we are copying from
396 srcrepo: repo we are copying from
397 dstrepo: repo we are copying to
397 dstrepo: repo we are copying to
398 requirements: set of requirements for ``dstrepo``
398 requirements: set of requirements for ``dstrepo``
399 path: store file being examined
399 path: store file being examined
400 mode: the ``ST_MODE`` file type of ``path``
400 mode: the ``ST_MODE`` file type of ``path``
401 st: ``stat`` data structure for ``path``
401 st: ``stat`` data structure for ``path``
402
402
403 Function should return ``True`` if the file is to be copied.
403 Function should return ``True`` if the file is to be copied.
404 """
404 """
405 # Skip revlogs.
405 # Skip revlogs.
406 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
406 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
407 return False
407 return False
408 # Skip transaction related files.
408 # Skip transaction related files.
409 if path.startswith(b'undo'):
409 if path.startswith(b'undo'):
410 return False
410 return False
411 # Only copy regular files.
411 # Only copy regular files.
412 if mode != stat.S_IFREG:
412 if mode != stat.S_IFREG:
413 return False
413 return False
414 # Skip other skipped files.
414 # Skip other skipped files.
415 if path in (b'lock', b'fncache'):
415 if path in (b'lock', b'fncache'):
416 return False
416 return False
417
417
418 return True
418 return True
419
419
420
420
421 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
421 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
422 """Hook point for extensions to perform additional actions during upgrade.
422 """Hook point for extensions to perform additional actions during upgrade.
423
423
424 This function is called after revlogs and store files have been copied but
424 This function is called after revlogs and store files have been copied but
425 before the new store is swapped into the original location.
425 before the new store is swapped into the original location.
426 """
426 """
427
427
428
428
429 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
429 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
430 """Do the low-level work of upgrading a repository.
430 """Do the low-level work of upgrading a repository.
431
431
432 The upgrade is effectively performed as a copy between a source
432 The upgrade is effectively performed as a copy between a source
433 repository and a temporary destination repository.
433 repository and a temporary destination repository.
434
434
435 The source repository is unmodified for as long as possible so the
435 The source repository is unmodified for as long as possible so the
436 upgrade can abort at any time without causing loss of service for
436 upgrade can abort at any time without causing loss of service for
437 readers and without corrupting the source repository.
437 readers and without corrupting the source repository.
438 """
438 """
439 assert srcrepo.currentwlock()
439 assert srcrepo.currentwlock()
440 assert dstrepo.currentwlock()
440 assert dstrepo.currentwlock()
441
441
442 ui.status(
442 ui.status(
443 _(
443 _(
444 b'(it is safe to interrupt this process any time before '
444 b'(it is safe to interrupt this process any time before '
445 b'data migration completes)\n'
445 b'data migration completes)\n'
446 )
446 )
447 )
447 )
448
448
449 if upgrade_op.has_upgrade_action(b're-delta-all'):
450 deltareuse = revlog.revlog.DELTAREUSENEVER
451 elif upgrade_op.has_upgrade_action(b're-delta-parent'):
452 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
453 elif upgrade_op.has_upgrade_action(b're-delta-multibase'):
454 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
455 elif upgrade_op.has_upgrade_action(b're-delta-fulladd'):
456 deltareuse = revlog.revlog.DELTAREUSEFULLADD
457 else:
458 deltareuse = revlog.revlog.DELTAREUSEALWAYS
459
460 with dstrepo.transaction(b'upgrade') as tr:
449 with dstrepo.transaction(b'upgrade') as tr:
461 _clonerevlogs(
450 _clonerevlogs(
462 ui,
451 ui,
463 srcrepo,
452 srcrepo,
464 dstrepo,
453 dstrepo,
465 tr,
454 tr,
466 deltareuse,
455 upgrade_op.delta_reuse_mode,
467 upgrade_op.has_upgrade_action(b're-delta-multibase'),
456 upgrade_op.has_upgrade_action(b're-delta-multibase'),
468 revlogs=upgrade_op.revlogs_to_process,
457 revlogs=upgrade_op.revlogs_to_process,
469 )
458 )
470
459
471 # Now copy other files in the store directory.
460 # Now copy other files in the store directory.
472 # The sorted() makes execution deterministic.
461 # The sorted() makes execution deterministic.
473 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
462 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
474 if not _filterstorefile(
463 if not _filterstorefile(
475 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
464 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
476 ):
465 ):
477 continue
466 continue
478
467
479 srcrepo.ui.status(_(b'copying %s\n') % p)
468 srcrepo.ui.status(_(b'copying %s\n') % p)
480 src = srcrepo.store.rawvfs.join(p)
469 src = srcrepo.store.rawvfs.join(p)
481 dst = dstrepo.store.rawvfs.join(p)
470 dst = dstrepo.store.rawvfs.join(p)
482 util.copyfile(src, dst, copystat=True)
471 util.copyfile(src, dst, copystat=True)
483
472
484 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
473 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
485
474
486 ui.status(_(b'data fully migrated to temporary repository\n'))
475 ui.status(_(b'data fully migrated to temporary repository\n'))
487
476
488 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
477 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
489 backupvfs = vfsmod.vfs(backuppath)
478 backupvfs = vfsmod.vfs(backuppath)
490
479
491 # Make a backup of requires file first, as it is the first to be modified.
480 # Make a backup of requires file first, as it is the first to be modified.
492 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
481 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
493
482
494 # We install an arbitrary requirement that clients must not support
483 # We install an arbitrary requirement that clients must not support
495 # as a mechanism to lock out new clients during the data swap. This is
484 # as a mechanism to lock out new clients during the data swap. This is
496 # better than allowing a client to continue while the repository is in
485 # better than allowing a client to continue while the repository is in
497 # an inconsistent state.
486 # an inconsistent state.
498 ui.status(
487 ui.status(
499 _(
488 _(
500 b'marking source repository as being upgraded; clients will be '
489 b'marking source repository as being upgraded; clients will be '
501 b'unable to read from repository\n'
490 b'unable to read from repository\n'
502 )
491 )
503 )
492 )
504 scmutil.writereporequirements(
493 scmutil.writereporequirements(
505 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
494 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
506 )
495 )
507
496
508 ui.status(_(b'starting in-place swap of repository data\n'))
497 ui.status(_(b'starting in-place swap of repository data\n'))
509 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
498 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
510
499
511 # Now swap in the new store directory. Doing it as a rename should make
500 # Now swap in the new store directory. Doing it as a rename should make
512 # the operation nearly instantaneous and atomic (at least in well-behaved
501 # the operation nearly instantaneous and atomic (at least in well-behaved
513 # environments).
502 # environments).
514 ui.status(_(b'replacing store...\n'))
503 ui.status(_(b'replacing store...\n'))
515 tstart = util.timer()
504 tstart = util.timer()
516 util.rename(srcrepo.spath, backupvfs.join(b'store'))
505 util.rename(srcrepo.spath, backupvfs.join(b'store'))
517 util.rename(dstrepo.spath, srcrepo.spath)
506 util.rename(dstrepo.spath, srcrepo.spath)
518 elapsed = util.timer() - tstart
507 elapsed = util.timer() - tstart
519 ui.status(
508 ui.status(
520 _(
509 _(
521 b'store replacement complete; repository was inconsistent for '
510 b'store replacement complete; repository was inconsistent for '
522 b'%0.1fs\n'
511 b'%0.1fs\n'
523 )
512 )
524 % elapsed
513 % elapsed
525 )
514 )
526
515
527 # We first write the requirements file. Any new requirements will lock
516 # We first write the requirements file. Any new requirements will lock
528 # out legacy clients.
517 # out legacy clients.
529 ui.status(
518 ui.status(
530 _(
519 _(
531 b'finalizing requirements file and making repository readable '
520 b'finalizing requirements file and making repository readable '
532 b'again\n'
521 b'again\n'
533 )
522 )
534 )
523 )
535 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
524 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
536
525
537 # The lock file from the old store won't be removed because nothing has a
526 # The lock file from the old store won't be removed because nothing has a
538 # reference to its new location. So clean it up manually. Alternatively, we
527 # reference to its new location. So clean it up manually. Alternatively, we
539 # could update srcrepo.svfs and other variables to point to the new
528 # could update srcrepo.svfs and other variables to point to the new
540 # location. This is simpler.
529 # location. This is simpler.
541 backupvfs.unlink(b'store/lock')
530 backupvfs.unlink(b'store/lock')
542
531
543 return backuppath
532 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now