##// END OF EJS Templates
upgrade: move `printrequirements()` to UpgradeOperation class...
Pulkit Goyal -
r46807:aba979b1 default
parent child Browse files
Show More
@@ -1,299 +1,273 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 hg,
14 14 localrepo,
15 15 pycompat,
16 16 )
17 17
18 18 from .upgrade_utils import (
19 19 actions as upgrade_actions,
20 20 engine as upgrade_engine,
21 21 )
22 22
23 23 allformatvariant = upgrade_actions.allformatvariant
24 24
25 25 # search without '-' to support older form on newer client.
26 26 #
27 27 # We don't enforce backward compatibility for debug command so this
28 28 # might eventually be dropped. However, having to use two different
29 29 # forms in script when comparing result is anoying enough to add
30 30 # backward compatibility for a while.
31 31 legacy_opts_map = {
32 32 b'redeltaparent': b're-delta-parent',
33 33 b'redeltamultibase': b're-delta-multibase',
34 34 b'redeltaall': b're-delta-all',
35 35 b'redeltafulladd': b're-delta-fulladd',
36 36 }
37 37
38 38
39 39 def upgraderepo(
40 40 ui,
41 41 repo,
42 42 run=False,
43 43 optimize=None,
44 44 backup=True,
45 45 manifest=None,
46 46 changelog=None,
47 47 filelogs=None,
48 48 ):
49 49 """Upgrade a repository in place."""
50 50 if optimize is None:
51 51 optimize = []
52 52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
53 53 repo = repo.unfiltered()
54 54
55 55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
56 56 specentries = (
57 57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
58 58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
59 59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
60 60 )
61 61 specified = [(y, x) for (y, x) in specentries if x is not None]
62 62 if specified:
63 63 # we have some limitation on revlogs to be recloned
64 64 if any(x for y, x in specified):
65 65 revlogs = set()
66 66 for upgrade, enabled in specified:
67 67 if enabled:
68 68 revlogs.add(upgrade)
69 69 else:
70 70 # none are enabled
71 71 for upgrade, __ in specified:
72 72 revlogs.discard(upgrade)
73 73
74 74 # Ensure the repository can be upgraded.
75 75 upgrade_actions.check_source_requirements(repo)
76 76
77 77 default_options = localrepo.defaultcreateopts(repo.ui)
78 78 newreqs = localrepo.newreporequirements(repo.ui, default_options)
79 79 newreqs.update(upgrade_actions.preservedrequirements(repo))
80 80
81 81 upgrade_actions.check_requirements_changes(repo, newreqs)
82 82
83 83 # Find and validate all improvements that can be made.
84 84 alloptimizations = upgrade_actions.findoptimizations(repo)
85 85
86 86 # Apply and Validate arguments.
87 87 optimizations = []
88 88 for o in alloptimizations:
89 89 if o.name in optimize:
90 90 optimizations.append(o)
91 91 optimize.discard(o.name)
92 92
93 93 if optimize: # anything left is unknown
94 94 raise error.Abort(
95 95 _(b'unknown optimization action requested: %s')
96 96 % b', '.join(sorted(optimize)),
97 97 hint=_(b'run without arguments to see valid optimizations'),
98 98 )
99 99
100 100 deficiencies = upgrade_actions.finddeficiencies(repo)
101 101 actions = upgrade_actions.determineactions(
102 102 repo, deficiencies, repo.requirements, newreqs
103 103 )
104 104 actions.extend(
105 105 o
106 106 for o in sorted(optimizations)
107 107 # determineactions could have added optimisation
108 108 if o not in actions
109 109 )
110 110
111 111 removedreqs = repo.requirements - newreqs
112 112 addedreqs = newreqs - repo.requirements
113 113
114 114 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
115 115 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
116 116 removedreqs | addedreqs
117 117 )
118 118 if incompatible:
119 119 msg = _(
120 120 b'ignoring revlogs selection flags, format requirements '
121 121 b'change: %s\n'
122 122 )
123 123 ui.warn(msg % b', '.join(sorted(incompatible)))
124 124 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
125 125
126 def write_labeled(l, label):
127 first = True
128 for r in sorted(l):
129 if not first:
130 ui.write(b', ')
131 ui.write(r, label=label)
132 first = False
133
134 def printrequirements():
135 ui.write(_(b'requirements\n'))
136 ui.write(_(b' preserved: '))
137 write_labeled(
138 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
139 )
140 ui.write((b'\n'))
141 removed = repo.requirements - newreqs
142 if repo.requirements - newreqs:
143 ui.write(_(b' removed: '))
144 write_labeled(removed, "upgrade-repo.requirement.removed")
145 ui.write((b'\n'))
146 added = newreqs - repo.requirements
147 if added:
148 ui.write(_(b' added: '))
149 write_labeled(added, "upgrade-repo.requirement.added")
150 ui.write((b'\n'))
151 ui.write(b'\n')
152
153 126 upgrade_op = upgrade_actions.UpgradeOperation(
154 127 ui,
155 128 newreqs,
129 repo.requirements,
156 130 actions,
157 131 revlogs,
158 132 )
159 133
160 134 if not run:
161 135 fromconfig = []
162 136 onlydefault = []
163 137
164 138 for d in deficiencies:
165 139 if d.fromconfig(repo):
166 140 fromconfig.append(d)
167 141 elif d.default:
168 142 onlydefault.append(d)
169 143
170 144 if fromconfig or onlydefault:
171 145
172 146 if fromconfig:
173 147 ui.status(
174 148 _(
175 149 b'repository lacks features recommended by '
176 150 b'current config options:\n\n'
177 151 )
178 152 )
179 153 for i in fromconfig:
180 154 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
181 155
182 156 if onlydefault:
183 157 ui.status(
184 158 _(
185 159 b'repository lacks features used by the default '
186 160 b'config options:\n\n'
187 161 )
188 162 )
189 163 for i in onlydefault:
190 164 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
191 165
192 166 ui.status(b'\n')
193 167 else:
194 168 ui.status(
195 169 _(
196 170 b'(no feature deficiencies found in existing '
197 171 b'repository)\n'
198 172 )
199 173 )
200 174
201 175 ui.status(
202 176 _(
203 177 b'performing an upgrade with "--run" will make the following '
204 178 b'changes:\n\n'
205 179 )
206 180 )
207 181
208 printrequirements()
182 upgrade_op.print_requirements()
209 183 upgrade_op.print_optimisations()
210 184 upgrade_op.print_upgrade_actions()
211 185 upgrade_op.print_affected_revlogs()
212 186
213 187 unusedoptimize = [i for i in alloptimizations if i not in actions]
214 188
215 189 if unusedoptimize:
216 190 ui.status(
217 191 _(
218 192 b'additional optimizations are available by specifying '
219 193 b'"--optimize <name>":\n\n'
220 194 )
221 195 )
222 196 for i in unusedoptimize:
223 197 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
224 198 return
225 199
226 200 # Else we're in the run=true case.
227 201 ui.write(_(b'upgrade will perform the following actions:\n\n'))
228 printrequirements()
202 upgrade_op.print_requirements()
229 203 upgrade_op.print_optimisations()
230 204 upgrade_op.print_upgrade_actions()
231 205 upgrade_op.print_affected_revlogs()
232 206
233 207 ui.status(_(b'beginning upgrade...\n'))
234 208 with repo.wlock(), repo.lock():
235 209 ui.status(_(b'repository locked and read-only\n'))
236 210 # Our strategy for upgrading the repository is to create a new,
237 211 # temporary repository, write data to it, then do a swap of the
238 212 # data. There are less heavyweight ways to do this, but it is easier
239 213 # to create a new repo object than to instantiate all the components
240 214 # (like the store) separately.
241 215 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
242 216 backuppath = None
243 217 try:
244 218 ui.status(
245 219 _(
246 220 b'creating temporary repository to stage migrated '
247 221 b'data: %s\n'
248 222 )
249 223 % tmppath
250 224 )
251 225
252 226 # clone ui without using ui.copy because repo.ui is protected
253 227 repoui = repo.ui.__class__(repo.ui)
254 228 dstrepo = hg.repository(repoui, path=tmppath, create=True)
255 229
256 230 with dstrepo.wlock(), dstrepo.lock():
257 231 backuppath = upgrade_engine.upgrade(
258 232 ui, repo, dstrepo, upgrade_op
259 233 )
260 234 if not (backup or backuppath is None):
261 235 ui.status(
262 236 _(b'removing old repository content%s\n') % backuppath
263 237 )
264 238 repo.vfs.rmtree(backuppath, forcibly=True)
265 239 backuppath = None
266 240
267 241 finally:
268 242 ui.status(_(b'removing temporary repository %s\n') % tmppath)
269 243 repo.vfs.rmtree(tmppath, forcibly=True)
270 244
271 245 if backuppath and not ui.quiet:
272 246 ui.warn(
273 247 _(b'copy of old repository backed up at %s\n') % backuppath
274 248 )
275 249 ui.warn(
276 250 _(
277 251 b'the old repository will not be deleted; remove '
278 252 b'it to free up disk space once the upgraded '
279 253 b'repository is verified\n'
280 254 )
281 255 )
282 256
283 257 if upgrade_actions.sharesafe.name in addedreqs:
284 258 ui.warn(
285 259 _(
286 260 b'repository upgraded to share safe mode, existing'
287 261 b' shares will still work in old non-safe mode. '
288 262 b'Re-share existing shares to use them in safe mode'
289 263 b' New shares will be created in safe mode.\n'
290 264 )
291 265 )
292 266 if upgrade_actions.sharesafe.name in removedreqs:
293 267 ui.warn(
294 268 _(
295 269 b'repository downgraded to not use share safe mode, '
296 270 b'existing shares will not work and needs to'
297 271 b' be reshared.\n'
298 272 )
299 273 )
@@ -1,769 +1,810 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import (
12 12 error,
13 13 localrepo,
14 14 requirements,
15 15 util,
16 16 )
17 17
18 18 from ..utils import compression
19 19
20 20 # list of requirements that request a clone of all revlog if added/removed
21 21 RECLONES_REQUIREMENTS = {
22 22 b'generaldelta',
23 23 requirements.SPARSEREVLOG_REQUIREMENT,
24 24 }
25 25
26 26
27 27 def preservedrequirements(repo):
28 28 return set()
29 29
30 30
31 31 DEFICIENCY = b'deficiency'
32 32 OPTIMISATION = b'optimization'
33 33
34 34
35 35 class improvement(object):
36 36 """Represents an improvement that can be made as part of an upgrade.
37 37
38 38 The following attributes are defined on each instance:
39 39
40 40 name
41 41 Machine-readable string uniquely identifying this improvement. It
42 42 will be mapped to an action later in the upgrade process.
43 43
44 44 type
45 45 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
46 46 problem. An optimization is an action (sometimes optional) that
47 47 can be taken to further improve the state of the repository.
48 48
49 49 description
50 50 Message intended for humans explaining the improvement in more detail,
51 51 including the implications of it. For ``DEFICIENCY`` types, should be
52 52 worded in the present tense. For ``OPTIMISATION`` types, should be
53 53 worded in the future tense.
54 54
55 55 upgrademessage
56 56 Message intended for humans explaining what an upgrade addressing this
57 57 issue will do. Should be worded in the future tense.
58 58 """
59 59
60 60 def __init__(self, name, type, description, upgrademessage):
61 61 self.name = name
62 62 self.type = type
63 63 self.description = description
64 64 self.upgrademessage = upgrademessage
65 65
66 66 def __eq__(self, other):
67 67 if not isinstance(other, improvement):
68 68 # This is what python tell use to do
69 69 return NotImplemented
70 70 return self.name == other.name
71 71
72 72 def __ne__(self, other):
73 73 return not (self == other)
74 74
75 75 def __hash__(self):
76 76 return hash(self.name)
77 77
78 78
79 79 allformatvariant = []
80 80
81 81
82 82 def registerformatvariant(cls):
83 83 allformatvariant.append(cls)
84 84 return cls
85 85
86 86
87 87 class formatvariant(improvement):
88 88 """an improvement subclass dedicated to repository format"""
89 89
90 90 type = DEFICIENCY
91 91 ### The following attributes should be defined for each class:
92 92
93 93 # machine-readable string uniquely identifying this improvement. it will be
94 94 # mapped to an action later in the upgrade process.
95 95 name = None
96 96
97 97 # message intended for humans explaining the improvement in more detail,
98 98 # including the implications of it ``DEFICIENCY`` types, should be worded
99 99 # in the present tense.
100 100 description = None
101 101
102 102 # message intended for humans explaining what an upgrade addressing this
103 103 # issue will do. should be worded in the future tense.
104 104 upgrademessage = None
105 105
106 106 # value of current Mercurial default for new repository
107 107 default = None
108 108
109 109 def __init__(self):
110 110 raise NotImplementedError()
111 111
112 112 @staticmethod
113 113 def fromrepo(repo):
114 114 """current value of the variant in the repository"""
115 115 raise NotImplementedError()
116 116
117 117 @staticmethod
118 118 def fromconfig(repo):
119 119 """current value of the variant in the configuration"""
120 120 raise NotImplementedError()
121 121
122 122
123 123 class requirementformatvariant(formatvariant):
124 124 """formatvariant based on a 'requirement' name.
125 125
126 126 Many format variant are controlled by a 'requirement'. We define a small
127 127 subclass to factor the code.
128 128 """
129 129
130 130 # the requirement that control this format variant
131 131 _requirement = None
132 132
133 133 @staticmethod
134 134 def _newreporequirements(ui):
135 135 return localrepo.newreporequirements(
136 136 ui, localrepo.defaultcreateopts(ui)
137 137 )
138 138
139 139 @classmethod
140 140 def fromrepo(cls, repo):
141 141 assert cls._requirement is not None
142 142 return cls._requirement in repo.requirements
143 143
144 144 @classmethod
145 145 def fromconfig(cls, repo):
146 146 assert cls._requirement is not None
147 147 return cls._requirement in cls._newreporequirements(repo.ui)
148 148
149 149
150 150 @registerformatvariant
151 151 class fncache(requirementformatvariant):
152 152 name = b'fncache'
153 153
154 154 _requirement = b'fncache'
155 155
156 156 default = True
157 157
158 158 description = _(
159 159 b'long and reserved filenames may not work correctly; '
160 160 b'repository performance is sub-optimal'
161 161 )
162 162
163 163 upgrademessage = _(
164 164 b'repository will be more resilient to storing '
165 165 b'certain paths and performance of certain '
166 166 b'operations should be improved'
167 167 )
168 168
169 169
170 170 @registerformatvariant
171 171 class dotencode(requirementformatvariant):
172 172 name = b'dotencode'
173 173
174 174 _requirement = b'dotencode'
175 175
176 176 default = True
177 177
178 178 description = _(
179 179 b'storage of filenames beginning with a period or '
180 180 b'space may not work correctly'
181 181 )
182 182
183 183 upgrademessage = _(
184 184 b'repository will be better able to store files '
185 185 b'beginning with a space or period'
186 186 )
187 187
188 188
189 189 @registerformatvariant
190 190 class generaldelta(requirementformatvariant):
191 191 name = b'generaldelta'
192 192
193 193 _requirement = b'generaldelta'
194 194
195 195 default = True
196 196
197 197 description = _(
198 198 b'deltas within internal storage are unable to '
199 199 b'choose optimal revisions; repository is larger and '
200 200 b'slower than it could be; interaction with other '
201 201 b'repositories may require extra network and CPU '
202 202 b'resources, making "hg push" and "hg pull" slower'
203 203 )
204 204
205 205 upgrademessage = _(
206 206 b'repository storage will be able to create '
207 207 b'optimal deltas; new repository data will be '
208 208 b'smaller and read times should decrease; '
209 209 b'interacting with other repositories using this '
210 210 b'storage model should require less network and '
211 211 b'CPU resources, making "hg push" and "hg pull" '
212 212 b'faster'
213 213 )
214 214
215 215
216 216 @registerformatvariant
217 217 class sharesafe(requirementformatvariant):
218 218 name = b'exp-sharesafe'
219 219 _requirement = requirements.SHARESAFE_REQUIREMENT
220 220
221 221 default = False
222 222
223 223 description = _(
224 224 b'old shared repositories do not share source repository '
225 225 b'requirements and config. This leads to various problems '
226 226 b'when the source repository format is upgraded or some new '
227 227 b'extensions are enabled.'
228 228 )
229 229
230 230 upgrademessage = _(
231 231 b'Upgrades a repository to share-safe format so that future '
232 232 b'shares of this repository share its requirements and configs.'
233 233 )
234 234
235 235
236 236 @registerformatvariant
237 237 class sparserevlog(requirementformatvariant):
238 238 name = b'sparserevlog'
239 239
240 240 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
241 241
242 242 default = True
243 243
244 244 description = _(
245 245 b'in order to limit disk reading and memory usage on older '
246 246 b'version, the span of a delta chain from its root to its '
247 247 b'end is limited, whatever the relevant data in this span. '
248 248 b'This can severly limit Mercurial ability to build good '
249 249 b'chain of delta resulting is much more storage space being '
250 250 b'taken and limit reusability of on disk delta during '
251 251 b'exchange.'
252 252 )
253 253
254 254 upgrademessage = _(
255 255 b'Revlog supports delta chain with more unused data '
256 256 b'between payload. These gaps will be skipped at read '
257 257 b'time. This allows for better delta chains, making a '
258 258 b'better compression and faster exchange with server.'
259 259 )
260 260
261 261
262 262 @registerformatvariant
263 263 class sidedata(requirementformatvariant):
264 264 name = b'sidedata'
265 265
266 266 _requirement = requirements.SIDEDATA_REQUIREMENT
267 267
268 268 default = False
269 269
270 270 description = _(
271 271 b'Allows storage of extra data alongside a revision, '
272 272 b'unlocking various caching options.'
273 273 )
274 274
275 275 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
276 276
277 277
278 278 @registerformatvariant
279 279 class persistentnodemap(requirementformatvariant):
280 280 name = b'persistent-nodemap'
281 281
282 282 _requirement = requirements.NODEMAP_REQUIREMENT
283 283
284 284 default = False
285 285
286 286 description = _(
287 287 b'persist the node -> rev mapping on disk to speedup lookup'
288 288 )
289 289
290 290 upgrademessage = _(b'Speedup revision lookup by node id.')
291 291
292 292
293 293 @registerformatvariant
294 294 class copiessdc(requirementformatvariant):
295 295 name = b'copies-sdc'
296 296
297 297 _requirement = requirements.COPIESSDC_REQUIREMENT
298 298
299 299 default = False
300 300
301 301 description = _(b'Stores copies information alongside changesets.')
302 302
303 303 upgrademessage = _(
304 304 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
305 305 )
306 306
307 307
308 308 @registerformatvariant
309 309 class removecldeltachain(formatvariant):
310 310 name = b'plain-cl-delta'
311 311
312 312 default = True
313 313
314 314 description = _(
315 315 b'changelog storage is using deltas instead of '
316 316 b'raw entries; changelog reading and any '
317 317 b'operation relying on changelog data are slower '
318 318 b'than they could be'
319 319 )
320 320
321 321 upgrademessage = _(
322 322 b'changelog storage will be reformated to '
323 323 b'store raw entries; changelog reading will be '
324 324 b'faster; changelog size may be reduced'
325 325 )
326 326
327 327 @staticmethod
328 328 def fromrepo(repo):
329 329 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 330 # changelogs with deltas.
331 331 cl = repo.changelog
332 332 chainbase = cl.chainbase
333 333 return all(rev == chainbase(rev) for rev in cl)
334 334
335 335 @staticmethod
336 336 def fromconfig(repo):
337 337 return True
338 338
339 339
340 340 @registerformatvariant
341 341 class compressionengine(formatvariant):
342 342 name = b'compression'
343 343 default = b'zlib'
344 344
345 345 description = _(
346 346 b'Compresion algorithm used to compress data. '
347 347 b'Some engine are faster than other'
348 348 )
349 349
350 350 upgrademessage = _(
351 351 b'revlog content will be recompressed with the new algorithm.'
352 352 )
353 353
354 354 @classmethod
355 355 def fromrepo(cls, repo):
356 356 # we allow multiple compression engine requirement to co-exist because
357 357 # strickly speaking, revlog seems to support mixed compression style.
358 358 #
359 359 # The compression used for new entries will be "the last one"
360 360 compression = b'zlib'
361 361 for req in repo.requirements:
362 362 prefix = req.startswith
363 363 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
364 364 compression = req.split(b'-', 2)[2]
365 365 return compression
366 366
367 367 @classmethod
368 368 def fromconfig(cls, repo):
369 369 compengines = repo.ui.configlist(b'format', b'revlog-compression')
370 370 # return the first valid value as the selection code would do
371 371 for comp in compengines:
372 372 if comp in util.compengines:
373 373 return comp
374 374
375 375 # no valide compression found lets display it all for clarity
376 376 return b','.join(compengines)
377 377
378 378
379 379 @registerformatvariant
380 380 class compressionlevel(formatvariant):
381 381 name = b'compression-level'
382 382 default = b'default'
383 383
384 384 description = _(b'compression level')
385 385
386 386 upgrademessage = _(b'revlog content will be recompressed')
387 387
388 388 @classmethod
389 389 def fromrepo(cls, repo):
390 390 comp = compressionengine.fromrepo(repo)
391 391 level = None
392 392 if comp == b'zlib':
393 393 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
394 394 elif comp == b'zstd':
395 395 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
396 396 if level is None:
397 397 return b'default'
398 398 return bytes(level)
399 399
400 400 @classmethod
401 401 def fromconfig(cls, repo):
402 402 comp = compressionengine.fromconfig(repo)
403 403 level = None
404 404 if comp == b'zlib':
405 405 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
406 406 elif comp == b'zstd':
407 407 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
408 408 if level is None:
409 409 return b'default'
410 410 return bytes(level)
411 411
412 412
413 413 def finddeficiencies(repo):
414 414 """returns a list of deficiencies that the repo suffer from"""
415 415 deficiencies = []
416 416
417 417 # We could detect lack of revlogv1 and store here, but they were added
418 418 # in 0.9.2 and we don't support upgrading repos without these
419 419 # requirements, so let's not bother.
420 420
421 421 for fv in allformatvariant:
422 422 if not fv.fromrepo(repo):
423 423 deficiencies.append(fv)
424 424
425 425 return deficiencies
426 426
427 427
428 428 ALL_OPTIMISATIONS = []
429 429
430 430
431 431 def register_optimization(obj):
432 432 ALL_OPTIMISATIONS.append(obj)
433 433 return obj
434 434
435 435
436 436 register_optimization(
437 437 improvement(
438 438 name=b're-delta-parent',
439 439 type=OPTIMISATION,
440 440 description=_(
441 441 b'deltas within internal storage will be recalculated to '
442 442 b'choose an optimal base revision where this was not '
443 443 b'already done; the size of the repository may shrink and '
444 444 b'various operations may become faster; the first time '
445 445 b'this optimization is performed could slow down upgrade '
446 446 b'execution considerably; subsequent invocations should '
447 447 b'not run noticeably slower'
448 448 ),
449 449 upgrademessage=_(
450 450 b'deltas within internal storage will choose a new '
451 451 b'base revision if needed'
452 452 ),
453 453 )
454 454 )
455 455
456 456 register_optimization(
457 457 improvement(
458 458 name=b're-delta-multibase',
459 459 type=OPTIMISATION,
460 460 description=_(
461 461 b'deltas within internal storage will be recalculated '
462 462 b'against multiple base revision and the smallest '
463 463 b'difference will be used; the size of the repository may '
464 464 b'shrink significantly when there are many merges; this '
465 465 b'optimization will slow down execution in proportion to '
466 466 b'the number of merges in the repository and the amount '
467 467 b'of files in the repository; this slow down should not '
468 468 b'be significant unless there are tens of thousands of '
469 469 b'files and thousands of merges'
470 470 ),
471 471 upgrademessage=_(
472 472 b'deltas within internal storage will choose an '
473 473 b'optimal delta by computing deltas against multiple '
474 474 b'parents; may slow down execution time '
475 475 b'significantly'
476 476 ),
477 477 )
478 478 )
479 479
480 480 register_optimization(
481 481 improvement(
482 482 name=b're-delta-all',
483 483 type=OPTIMISATION,
484 484 description=_(
485 485 b'deltas within internal storage will always be '
486 486 b'recalculated without reusing prior deltas; this will '
487 487 b'likely make execution run several times slower; this '
488 488 b'optimization is typically not needed'
489 489 ),
490 490 upgrademessage=_(
491 491 b'deltas within internal storage will be fully '
492 492 b'recomputed; this will likely drastically slow down '
493 493 b'execution time'
494 494 ),
495 495 )
496 496 )
497 497
498 498 register_optimization(
499 499 improvement(
500 500 name=b're-delta-fulladd',
501 501 type=OPTIMISATION,
502 502 description=_(
503 503 b'every revision will be re-added as if it was new '
504 504 b'content. It will go through the full storage '
505 505 b'mechanism giving extensions a chance to process it '
506 506 b'(eg. lfs). This is similar to "re-delta-all" but even '
507 507 b'slower since more logic is involved.'
508 508 ),
509 509 upgrademessage=_(
510 510 b'each revision will be added as new content to the '
511 511 b'internal storage; this will likely drastically slow '
512 512 b'down execution time, but some extensions might need '
513 513 b'it'
514 514 ),
515 515 )
516 516 )
517 517
518 518
519 519 def findoptimizations(repo):
520 520 """Determine optimisation that could be used during upgrade"""
521 521 # These are unconditionally added. There is logic later that figures out
522 522 # which ones to apply.
523 523 return list(ALL_OPTIMISATIONS)
524 524
525 525
526 526 def determineactions(repo, deficiencies, sourcereqs, destreqs):
527 527 """Determine upgrade actions that will be performed.
528 528
529 529 Given a list of improvements as returned by ``finddeficiencies`` and
530 530 ``findoptimizations``, determine the list of upgrade actions that
531 531 will be performed.
532 532
533 533 The role of this function is to filter improvements if needed, apply
534 534 recommended optimizations from the improvements list that make sense,
535 535 etc.
536 536
537 537 Returns a list of action names.
538 538 """
539 539 newactions = []
540 540
541 541 for d in deficiencies:
542 542 name = d._requirement
543 543
544 544 # If the action is a requirement that doesn't show up in the
545 545 # destination requirements, prune the action.
546 546 if name is not None and name not in destreqs:
547 547 continue
548 548
549 549 newactions.append(d)
550 550
551 551 # FUTURE consider adding some optimizations here for certain transitions.
552 552 # e.g. adding generaldelta could schedule parent redeltas.
553 553
554 554 return newactions
555 555
556 556
557 557 class UpgradeOperation(object):
558 558 """represent the work to be done during an upgrade"""
559 559
560 def __init__(self, ui, requirements, actions, revlogs_to_process):
560 def __init__(
561 self,
562 ui,
563 new_requirements,
564 current_requirements,
565 actions,
566 revlogs_to_process,
567 ):
561 568 self.ui = ui
562 self.requirements = requirements
569 self.new_requirements = new_requirements
570 self.current_requirements = current_requirements
563 571 self.actions = actions
564 572 self._actions_names = set([a.name for a in actions])
565 573 self.revlogs_to_process = revlogs_to_process
574 # requirements which will be added by the operation
575 self._added_requirements = (
576 self.new_requirements - self.current_requirements
577 )
578 # requirements which will be removed by the operation
579 self._removed_requirements = (
580 self.current_requirements - self.new_requirements
581 )
582 # requirements which will be preserved by the operation
583 self._preserved_requirements = (
584 self.current_requirements & self.new_requirements
585 )
566 586
567 587 def _write_labeled(self, l, label):
568 588 """
569 589 Utility function to aid writing of a list under one label
570 590 """
571 591 first = True
572 592 for r in sorted(l):
573 593 if not first:
574 594 self.ui.write(b', ')
575 595 self.ui.write(r, label=label)
576 596 first = False
577 597
598 def print_requirements(self):
599 self.ui.write(_(b'requirements\n'))
600 self.ui.write(_(b' preserved: '))
601 self._write_labeled(
602 self._preserved_requirements, "upgrade-repo.requirement.preserved"
603 )
604 self.ui.write((b'\n'))
605 if self._removed_requirements:
606 self.ui.write(_(b' removed: '))
607 self._write_labeled(
608 self._removed_requirements, "upgrade-repo.requirement.removed"
609 )
610 self.ui.write((b'\n'))
611 if self._added_requirements:
612 self.ui.write(_(b' added: '))
613 self._write_labeled(
614 self._added_requirements, "upgrade-repo.requirement.added"
615 )
616 self.ui.write((b'\n'))
617 self.ui.write(b'\n')
618
578 619 def print_optimisations(self):
579 620 optimisations = [a for a in self.actions if a.type == OPTIMISATION]
580 621 optimisations.sort(key=lambda a: a.name)
581 622 if optimisations:
582 623 self.ui.write(_(b'optimisations: '))
583 624 self._write_labeled(
584 625 [a.name for a in optimisations],
585 626 "upgrade-repo.optimisation.performed",
586 627 )
587 628 self.ui.write(b'\n\n')
588 629
589 630 def print_upgrade_actions(self):
590 631 for a in self.actions:
591 632 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
592 633
593 634 def print_affected_revlogs(self):
594 635 if not self.revlogs_to_process:
595 636 self.ui.write((b'no revlogs to process\n'))
596 637 else:
597 638 self.ui.write((b'processed revlogs:\n'))
598 639 for r in sorted(self.revlogs_to_process):
599 640 self.ui.write((b' - %s\n' % r))
600 641 self.ui.write((b'\n'))
601 642
602 643 def has_action(self, name):
603 644 """ Check whether the upgrade operation will perform this action """
604 645 return name in self._actions_names
605 646
606 647
607 648 ### Code checking if a repository can got through the upgrade process at all. #
608 649
609 650
610 651 def requiredsourcerequirements(repo):
611 652 """Obtain requirements required to be present to upgrade a repo.
612 653
613 654 An upgrade will not be allowed if the repository doesn't have the
614 655 requirements returned by this function.
615 656 """
616 657 return {
617 658 # Introduced in Mercurial 0.9.2.
618 659 b'revlogv1',
619 660 # Introduced in Mercurial 0.9.2.
620 661 b'store',
621 662 }
622 663
623 664
624 665 def blocksourcerequirements(repo):
625 666 """Obtain requirements that will prevent an upgrade from occurring.
626 667
627 668 An upgrade cannot be performed if the source repository contains a
628 669 requirements in the returned set.
629 670 """
630 671 return {
631 672 # The upgrade code does not yet support these experimental features.
632 673 # This is an artificial limitation.
633 674 requirements.TREEMANIFEST_REQUIREMENT,
634 675 # This was a precursor to generaldelta and was never enabled by default.
635 676 # It should (hopefully) not exist in the wild.
636 677 b'parentdelta',
637 678 # Upgrade should operate on the actual store, not the shared link.
638 679 requirements.SHARED_REQUIREMENT,
639 680 }
640 681
641 682
642 683 def check_source_requirements(repo):
643 684 """Ensure that no existing requirements prevent the repository upgrade"""
644 685
645 686 required = requiredsourcerequirements(repo)
646 687 missingreqs = required - repo.requirements
647 688 if missingreqs:
648 689 msg = _(b'cannot upgrade repository; requirement missing: %s')
649 690 missingreqs = b', '.join(sorted(missingreqs))
650 691 raise error.Abort(msg % missingreqs)
651 692
652 693 blocking = blocksourcerequirements(repo)
653 694 blockingreqs = blocking & repo.requirements
654 695 if blockingreqs:
655 696 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
656 697 blockingreqs = b', '.join(sorted(blockingreqs))
657 698 raise error.Abort(m % blockingreqs)
658 699
659 700
660 701 ### Verify the validity of the planned requirement changes ####################
661 702
662 703
663 704 def supportremovedrequirements(repo):
664 705 """Obtain requirements that can be removed during an upgrade.
665 706
666 707 If an upgrade were to create a repository that dropped a requirement,
667 708 the dropped requirement must appear in the returned set for the upgrade
668 709 to be allowed.
669 710 """
670 711 supported = {
671 712 requirements.SPARSEREVLOG_REQUIREMENT,
672 713 requirements.SIDEDATA_REQUIREMENT,
673 714 requirements.COPIESSDC_REQUIREMENT,
674 715 requirements.NODEMAP_REQUIREMENT,
675 716 requirements.SHARESAFE_REQUIREMENT,
676 717 }
677 718 for name in compression.compengines:
678 719 engine = compression.compengines[name]
679 720 if engine.available() and engine.revlogheader():
680 721 supported.add(b'exp-compression-%s' % name)
681 722 if engine.name() == b'zstd':
682 723 supported.add(b'revlog-compression-zstd')
683 724 return supported
684 725
685 726
686 727 def supporteddestrequirements(repo):
687 728 """Obtain requirements that upgrade supports in the destination.
688 729
689 730 If the result of the upgrade would create requirements not in this set,
690 731 the upgrade is disallowed.
691 732
692 733 Extensions should monkeypatch this to add their custom requirements.
693 734 """
694 735 supported = {
695 736 b'dotencode',
696 737 b'fncache',
697 738 b'generaldelta',
698 739 b'revlogv1',
699 740 b'store',
700 741 requirements.SPARSEREVLOG_REQUIREMENT,
701 742 requirements.SIDEDATA_REQUIREMENT,
702 743 requirements.COPIESSDC_REQUIREMENT,
703 744 requirements.NODEMAP_REQUIREMENT,
704 745 requirements.SHARESAFE_REQUIREMENT,
705 746 }
706 747 for name in compression.compengines:
707 748 engine = compression.compengines[name]
708 749 if engine.available() and engine.revlogheader():
709 750 supported.add(b'exp-compression-%s' % name)
710 751 if engine.name() == b'zstd':
711 752 supported.add(b'revlog-compression-zstd')
712 753 return supported
713 754
714 755
715 756 def allowednewrequirements(repo):
716 757 """Obtain requirements that can be added to a repository during upgrade.
717 758
718 759 This is used to disallow proposed requirements from being added when
719 760 they weren't present before.
720 761
721 762 We use a list of allowed requirement additions instead of a list of known
722 763 bad additions because the whitelist approach is safer and will prevent
723 764 future, unknown requirements from accidentally being added.
724 765 """
725 766 supported = {
726 767 b'dotencode',
727 768 b'fncache',
728 769 b'generaldelta',
729 770 requirements.SPARSEREVLOG_REQUIREMENT,
730 771 requirements.SIDEDATA_REQUIREMENT,
731 772 requirements.COPIESSDC_REQUIREMENT,
732 773 requirements.NODEMAP_REQUIREMENT,
733 774 requirements.SHARESAFE_REQUIREMENT,
734 775 }
735 776 for name in compression.compengines:
736 777 engine = compression.compengines[name]
737 778 if engine.available() and engine.revlogheader():
738 779 supported.add(b'exp-compression-%s' % name)
739 780 if engine.name() == b'zstd':
740 781 supported.add(b'revlog-compression-zstd')
741 782 return supported
742 783
743 784
744 785 def check_requirements_changes(repo, new_reqs):
745 786 old_reqs = repo.requirements
746 787
747 788 support_removal = supportremovedrequirements(repo)
748 789 no_remove_reqs = old_reqs - new_reqs - support_removal
749 790 if no_remove_reqs:
750 791 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
751 792 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
752 793 raise error.Abort(msg % no_remove_reqs)
753 794
754 795 support_addition = allowednewrequirements(repo)
755 796 no_add_reqs = new_reqs - old_reqs - support_addition
756 797 if no_add_reqs:
757 798 m = _(b'cannot upgrade repository; do not support adding requirement: ')
758 799 no_add_reqs = b', '.join(sorted(no_add_reqs))
759 800 raise error.Abort(m + no_add_reqs)
760 801
761 802 supported = supporteddestrequirements(repo)
762 803 unsupported_reqs = new_reqs - supported
763 804 if unsupported_reqs:
764 805 msg = _(
765 806 b'cannot upgrade repository; do not support destination '
766 807 b'requirement: %s'
767 808 )
768 809 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
769 810 raise error.Abort(msg % unsupported_reqs)
@@ -1,500 +1,500 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27
28 28
29 29 def _revlogfrompath(repo, path):
30 30 """Obtain a revlog from a repo path.
31 31
32 32 An instance of the appropriate class is returned.
33 33 """
34 34 if path == b'00changelog.i':
35 35 return changelog.changelog(repo.svfs)
36 36 elif path.endswith(b'00manifest.i'):
37 37 mandir = path[: -len(b'00manifest.i')]
38 38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 39 else:
40 40 # reverse of "/".join(("data", path + ".i"))
41 41 return filelog.filelog(repo.svfs, path[5:-2])
42 42
43 43
44 44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 45 """copy all relevant files for `oldrl` into `destrepo` store
46 46
47 47 Files are copied "as is" without any transformation. The copy is performed
48 48 without extra checks. Callers are responsible for making sure the copied
49 49 content is compatible with format of the destination repository.
50 50 """
51 51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 52 newrl = _revlogfrompath(destrepo, unencodedname)
53 53 newrl = getattr(newrl, '_revlog', newrl)
54 54
55 55 oldvfs = oldrl.opener
56 56 newvfs = newrl.opener
57 57 oldindex = oldvfs.join(oldrl.indexfile)
58 58 newindex = newvfs.join(newrl.indexfile)
59 59 olddata = oldvfs.join(oldrl.datafile)
60 60 newdata = newvfs.join(newrl.datafile)
61 61
62 62 with newvfs(newrl.indexfile, b'w'):
63 63 pass # create all the directories
64 64
65 65 util.copyfile(oldindex, newindex)
66 66 copydata = oldrl.opener.exists(oldrl.datafile)
67 67 if copydata:
68 68 util.copyfile(olddata, newdata)
69 69
70 70 if not (
71 71 unencodedname.endswith(b'00changelog.i')
72 72 or unencodedname.endswith(b'00manifest.i')
73 73 ):
74 74 destrepo.svfs.fncache.add(unencodedname)
75 75 if copydata:
76 76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 77
78 78
79 79 UPGRADE_CHANGELOG = b"changelog"
80 80 UPGRADE_MANIFEST = b"manifest"
81 81 UPGRADE_FILELOGS = b"all-filelogs"
82 82
83 83 UPGRADE_ALL_REVLOGS = frozenset(
84 84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 85 )
86 86
87 87
88 88 def getsidedatacompanion(srcrepo, dstrepo):
89 89 sidedatacompanion = None
90 90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 93
94 94 def sidedatacompanion(rl, rev):
95 95 rl = getattr(rl, '_revlog', rl)
96 96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 97 return True, (), {}, 0, 0
98 98 return False, (), {}, 0, 0
99 99
100 100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 104 return sidedatacompanion
105 105
106 106
107 107 def matchrevlog(revlogfilter, entry):
108 108 """check if a revlog is selected for cloning.
109 109
110 110 In other words, are there any updates which need to be done on revlog
111 111 or it can be blindly copied.
112 112
113 113 The store entry is checked against the passed filter"""
114 114 if entry.endswith(b'00changelog.i'):
115 115 return UPGRADE_CHANGELOG in revlogfilter
116 116 elif entry.endswith(b'00manifest.i'):
117 117 return UPGRADE_MANIFEST in revlogfilter
118 118 return UPGRADE_FILELOGS in revlogfilter
119 119
120 120
121 121 def _clonerevlogs(
122 122 ui,
123 123 srcrepo,
124 124 dstrepo,
125 125 tr,
126 126 deltareuse,
127 127 forcedeltabothparents,
128 128 revlogs=UPGRADE_ALL_REVLOGS,
129 129 ):
130 130 """Copy revlogs between 2 repos."""
131 131 revcount = 0
132 132 srcsize = 0
133 133 srcrawsize = 0
134 134 dstsize = 0
135 135 fcount = 0
136 136 frevcount = 0
137 137 fsrcsize = 0
138 138 frawsize = 0
139 139 fdstsize = 0
140 140 mcount = 0
141 141 mrevcount = 0
142 142 msrcsize = 0
143 143 mrawsize = 0
144 144 mdstsize = 0
145 145 crevcount = 0
146 146 csrcsize = 0
147 147 crawsize = 0
148 148 cdstsize = 0
149 149
150 150 alldatafiles = list(srcrepo.store.walk())
151 151
152 152 # Perform a pass to collect metadata. This validates we can open all
153 153 # source files and allows a unified progress bar to be displayed.
154 154 for unencoded, encoded, size in alldatafiles:
155 155 if unencoded.endswith(b'.d'):
156 156 continue
157 157
158 158 rl = _revlogfrompath(srcrepo, unencoded)
159 159
160 160 info = rl.storageinfo(
161 161 exclusivefiles=True,
162 162 revisionscount=True,
163 163 trackedsize=True,
164 164 storedsize=True,
165 165 )
166 166
167 167 revcount += info[b'revisionscount'] or 0
168 168 datasize = info[b'storedsize'] or 0
169 169 rawsize = info[b'trackedsize'] or 0
170 170
171 171 srcsize += datasize
172 172 srcrawsize += rawsize
173 173
174 174 # This is for the separate progress bars.
175 175 if isinstance(rl, changelog.changelog):
176 176 crevcount += len(rl)
177 177 csrcsize += datasize
178 178 crawsize += rawsize
179 179 elif isinstance(rl, manifest.manifestrevlog):
180 180 mcount += 1
181 181 mrevcount += len(rl)
182 182 msrcsize += datasize
183 183 mrawsize += rawsize
184 184 elif isinstance(rl, filelog.filelog):
185 185 fcount += 1
186 186 frevcount += len(rl)
187 187 fsrcsize += datasize
188 188 frawsize += rawsize
189 189 else:
190 190 error.ProgrammingError(b'unknown revlog type')
191 191
192 192 if not revcount:
193 193 return
194 194
195 195 ui.status(
196 196 _(
197 197 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
198 198 b'%d in changelog)\n'
199 199 )
200 200 % (revcount, frevcount, mrevcount, crevcount)
201 201 )
202 202 ui.status(
203 203 _(b'migrating %s in store; %s tracked data\n')
204 204 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
205 205 )
206 206
207 207 # Used to keep track of progress.
208 208 progress = None
209 209
210 210 def oncopiedrevision(rl, rev, node):
211 211 progress.increment()
212 212
213 213 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
214 214
215 215 # Do the actual copying.
216 216 # FUTURE this operation can be farmed off to worker processes.
217 217 seen = set()
218 218 for unencoded, encoded, size in alldatafiles:
219 219 if unencoded.endswith(b'.d'):
220 220 continue
221 221
222 222 oldrl = _revlogfrompath(srcrepo, unencoded)
223 223
224 224 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
225 225 ui.status(
226 226 _(
227 227 b'finished migrating %d manifest revisions across %d '
228 228 b'manifests; change in size: %s\n'
229 229 )
230 230 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
231 231 )
232 232
233 233 ui.status(
234 234 _(
235 235 b'migrating changelog containing %d revisions '
236 236 b'(%s in store; %s tracked data)\n'
237 237 )
238 238 % (
239 239 crevcount,
240 240 util.bytecount(csrcsize),
241 241 util.bytecount(crawsize),
242 242 )
243 243 )
244 244 seen.add(b'c')
245 245 progress = srcrepo.ui.makeprogress(
246 246 _(b'changelog revisions'), total=crevcount
247 247 )
248 248 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
249 249 ui.status(
250 250 _(
251 251 b'finished migrating %d filelog revisions across %d '
252 252 b'filelogs; change in size: %s\n'
253 253 )
254 254 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
255 255 )
256 256
257 257 ui.status(
258 258 _(
259 259 b'migrating %d manifests containing %d revisions '
260 260 b'(%s in store; %s tracked data)\n'
261 261 )
262 262 % (
263 263 mcount,
264 264 mrevcount,
265 265 util.bytecount(msrcsize),
266 266 util.bytecount(mrawsize),
267 267 )
268 268 )
269 269 seen.add(b'm')
270 270 if progress:
271 271 progress.complete()
272 272 progress = srcrepo.ui.makeprogress(
273 273 _(b'manifest revisions'), total=mrevcount
274 274 )
275 275 elif b'f' not in seen:
276 276 ui.status(
277 277 _(
278 278 b'migrating %d filelogs containing %d revisions '
279 279 b'(%s in store; %s tracked data)\n'
280 280 )
281 281 % (
282 282 fcount,
283 283 frevcount,
284 284 util.bytecount(fsrcsize),
285 285 util.bytecount(frawsize),
286 286 )
287 287 )
288 288 seen.add(b'f')
289 289 if progress:
290 290 progress.complete()
291 291 progress = srcrepo.ui.makeprogress(
292 292 _(b'file revisions'), total=frevcount
293 293 )
294 294
295 295 if matchrevlog(revlogs, unencoded):
296 296 ui.note(
297 297 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
298 298 )
299 299 newrl = _revlogfrompath(dstrepo, unencoded)
300 300 oldrl.clone(
301 301 tr,
302 302 newrl,
303 303 addrevisioncb=oncopiedrevision,
304 304 deltareuse=deltareuse,
305 305 forcedeltabothparents=forcedeltabothparents,
306 306 sidedatacompanion=sidedatacompanion,
307 307 )
308 308 else:
309 309 msg = _(b'blindly copying %s containing %i revisions\n')
310 310 ui.note(msg % (unencoded, len(oldrl)))
311 311 _copyrevlog(tr, dstrepo, oldrl, unencoded)
312 312
313 313 newrl = _revlogfrompath(dstrepo, unencoded)
314 314
315 315 info = newrl.storageinfo(storedsize=True)
316 316 datasize = info[b'storedsize'] or 0
317 317
318 318 dstsize += datasize
319 319
320 320 if isinstance(newrl, changelog.changelog):
321 321 cdstsize += datasize
322 322 elif isinstance(newrl, manifest.manifestrevlog):
323 323 mdstsize += datasize
324 324 else:
325 325 fdstsize += datasize
326 326
327 327 progress.complete()
328 328
329 329 ui.status(
330 330 _(
331 331 b'finished migrating %d changelog revisions; change in size: '
332 332 b'%s\n'
333 333 )
334 334 % (crevcount, util.bytecount(cdstsize - csrcsize))
335 335 )
336 336
337 337 ui.status(
338 338 _(
339 339 b'finished migrating %d total revisions; total change in store '
340 340 b'size: %s\n'
341 341 )
342 342 % (revcount, util.bytecount(dstsize - srcsize))
343 343 )
344 344
345 345
346 346 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
347 347 """Determine whether to copy a store file during upgrade.
348 348
349 349 This function is called when migrating store files from ``srcrepo`` to
350 350 ``dstrepo`` as part of upgrading a repository.
351 351
352 352 Args:
353 353 srcrepo: repo we are copying from
354 354 dstrepo: repo we are copying to
355 355 requirements: set of requirements for ``dstrepo``
356 356 path: store file being examined
357 357 mode: the ``ST_MODE`` file type of ``path``
358 358 st: ``stat`` data structure for ``path``
359 359
360 360 Function should return ``True`` if the file is to be copied.
361 361 """
362 362 # Skip revlogs.
363 363 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
364 364 return False
365 365 # Skip transaction related files.
366 366 if path.startswith(b'undo'):
367 367 return False
368 368 # Only copy regular files.
369 369 if mode != stat.S_IFREG:
370 370 return False
371 371 # Skip other skipped files.
372 372 if path in (b'lock', b'fncache'):
373 373 return False
374 374
375 375 return True
376 376
377 377
378 378 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
379 379 """Hook point for extensions to perform additional actions during upgrade.
380 380
381 381 This function is called after revlogs and store files have been copied but
382 382 before the new store is swapped into the original location.
383 383 """
384 384
385 385
386 386 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
387 387 """Do the low-level work of upgrading a repository.
388 388
389 389 The upgrade is effectively performed as a copy between a source
390 390 repository and a temporary destination repository.
391 391
392 392 The source repository is unmodified for as long as possible so the
393 393 upgrade can abort at any time without causing loss of service for
394 394 readers and without corrupting the source repository.
395 395 """
396 396 assert srcrepo.currentwlock()
397 397 assert dstrepo.currentwlock()
398 398
399 399 ui.status(
400 400 _(
401 401 b'(it is safe to interrupt this process any time before '
402 402 b'data migration completes)\n'
403 403 )
404 404 )
405 405
406 406 if upgrade_op.has_action(b're-delta-all'):
407 407 deltareuse = revlog.revlog.DELTAREUSENEVER
408 408 elif upgrade_op.has_action(b're-delta-parent'):
409 409 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
410 410 elif upgrade_op.has_action(b're-delta-multibase'):
411 411 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
412 412 elif upgrade_op.has_action(b're-delta-fulladd'):
413 413 deltareuse = revlog.revlog.DELTAREUSEFULLADD
414 414 else:
415 415 deltareuse = revlog.revlog.DELTAREUSEALWAYS
416 416
417 417 with dstrepo.transaction(b'upgrade') as tr:
418 418 _clonerevlogs(
419 419 ui,
420 420 srcrepo,
421 421 dstrepo,
422 422 tr,
423 423 deltareuse,
424 424 upgrade_op.has_action(b're-delta-multibase'),
425 425 revlogs=upgrade_op.revlogs_to_process,
426 426 )
427 427
428 428 # Now copy other files in the store directory.
429 429 # The sorted() makes execution deterministic.
430 430 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
431 431 if not _filterstorefile(
432 srcrepo, dstrepo, upgrade_op.requirements, p, kind, st
432 srcrepo, dstrepo, upgrade_op.new_requirements, p, kind, st
433 433 ):
434 434 continue
435 435
436 436 srcrepo.ui.status(_(b'copying %s\n') % p)
437 437 src = srcrepo.store.rawvfs.join(p)
438 438 dst = dstrepo.store.rawvfs.join(p)
439 439 util.copyfile(src, dst, copystat=True)
440 440
441 441 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
442 442
443 443 ui.status(_(b'data fully migrated to temporary repository\n'))
444 444
445 445 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
446 446 backupvfs = vfsmod.vfs(backuppath)
447 447
448 448 # Make a backup of requires file first, as it is the first to be modified.
449 449 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
450 450
451 451 # We install an arbitrary requirement that clients must not support
452 452 # as a mechanism to lock out new clients during the data swap. This is
453 453 # better than allowing a client to continue while the repository is in
454 454 # an inconsistent state.
455 455 ui.status(
456 456 _(
457 457 b'marking source repository as being upgraded; clients will be '
458 458 b'unable to read from repository\n'
459 459 )
460 460 )
461 461 scmutil.writereporequirements(
462 462 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
463 463 )
464 464
465 465 ui.status(_(b'starting in-place swap of repository data\n'))
466 466 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
467 467
468 468 # Now swap in the new store directory. Doing it as a rename should make
469 469 # the operation nearly instantaneous and atomic (at least in well-behaved
470 470 # environments).
471 471 ui.status(_(b'replacing store...\n'))
472 472 tstart = util.timer()
473 473 util.rename(srcrepo.spath, backupvfs.join(b'store'))
474 474 util.rename(dstrepo.spath, srcrepo.spath)
475 475 elapsed = util.timer() - tstart
476 476 ui.status(
477 477 _(
478 478 b'store replacement complete; repository was inconsistent for '
479 479 b'%0.1fs\n'
480 480 )
481 481 % elapsed
482 482 )
483 483
484 484 # We first write the requirements file. Any new requirements will lock
485 485 # out legacy clients.
486 486 ui.status(
487 487 _(
488 488 b'finalizing requirements file and making repository readable '
489 489 b'again\n'
490 490 )
491 491 )
492 scmutil.writereporequirements(srcrepo, upgrade_op.requirements)
492 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
493 493
494 494 # The lock file from the old store won't be removed because nothing has a
495 495 # reference to its new location. So clean it up manually. Alternatively, we
496 496 # could update srcrepo.svfs and other variables to point to the new
497 497 # location. This is simpler.
498 498 backupvfs.unlink(b'store/lock')
499 499
500 500 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now