##// END OF EJS Templates
upgrade: start moving the "to be happening" data in a dedicated object...
marmoute -
r46671:74923cb8 default draft
parent child Browse files
Show More
@@ -1,320 +1,324 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 hg,
14 14 localrepo,
15 15 pycompat,
16 16 )
17 17
18 18 from .upgrade_utils import (
19 19 actions as upgrade_actions,
20 20 engine as upgrade_engine,
21 21 )
22 22
23 23 allformatvariant = upgrade_actions.allformatvariant
24 24
25 25 # search without '-' to support older form on newer client.
26 26 #
27 27 # We don't enforce backward compatibility for debug command so this
28 28 # might eventually be dropped. However, having to use two different
29 29 # forms in script when comparing result is anoying enough to add
30 30 # backward compatibility for a while.
31 31 legacy_opts_map = {
32 32 b'redeltaparent': b're-delta-parent',
33 33 b'redeltamultibase': b're-delta-multibase',
34 34 b'redeltaall': b're-delta-all',
35 35 b'redeltafulladd': b're-delta-fulladd',
36 36 }
37 37
38 38
39 39 def upgraderepo(
40 40 ui,
41 41 repo,
42 42 run=False,
43 43 optimize=None,
44 44 backup=True,
45 45 manifest=None,
46 46 changelog=None,
47 47 filelogs=None,
48 48 ):
49 49 """Upgrade a repository in place."""
50 50 if optimize is None:
51 51 optimize = []
52 52 optimize = {legacy_opts_map.get(o, o) for o in optimize}
53 53 repo = repo.unfiltered()
54 54
55 55 revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS)
56 56 specentries = (
57 57 (upgrade_engine.UPGRADE_CHANGELOG, changelog),
58 58 (upgrade_engine.UPGRADE_MANIFEST, manifest),
59 59 (upgrade_engine.UPGRADE_FILELOGS, filelogs),
60 60 )
61 61 specified = [(y, x) for (y, x) in specentries if x is not None]
62 62 if specified:
63 63 # we have some limitation on revlogs to be recloned
64 64 if any(x for y, x in specified):
65 65 revlogs = set()
66 66 for upgrade, enabled in specified:
67 67 if enabled:
68 68 revlogs.add(upgrade)
69 69 else:
70 70 # none are enabled
71 71 for upgrade, __ in specified:
72 72 revlogs.discard(upgrade)
73 73
74 74 # Ensure the repository can be upgraded.
75 75 upgrade_actions.check_source_requirements(repo)
76 76
77 77 default_options = localrepo.defaultcreateopts(repo.ui)
78 78 newreqs = localrepo.newreporequirements(repo.ui, default_options)
79 79 newreqs.update(upgrade_actions.preservedrequirements(repo))
80 80
81 81 upgrade_actions.check_requirements_changes(repo, newreqs)
82 82
83 83 # Find and validate all improvements that can be made.
84 84 alloptimizations = upgrade_actions.findoptimizations(repo)
85 85
86 86 # Apply and Validate arguments.
87 87 optimizations = []
88 88 for o in alloptimizations:
89 89 if o.name in optimize:
90 90 optimizations.append(o)
91 91 optimize.discard(o.name)
92 92
93 93 if optimize: # anything left is unknown
94 94 raise error.Abort(
95 95 _(b'unknown optimization action requested: %s')
96 96 % b', '.join(sorted(optimize)),
97 97 hint=_(b'run without arguments to see valid optimizations'),
98 98 )
99 99
100 100 deficiencies = upgrade_actions.finddeficiencies(repo)
101 101 actions = upgrade_actions.determineactions(
102 102 repo, deficiencies, repo.requirements, newreqs
103 103 )
104 104 actions.extend(
105 105 o
106 106 for o in sorted(optimizations)
107 107 # determineactions could have added optimisation
108 108 if o not in actions
109 109 )
110 110
111 111 removedreqs = repo.requirements - newreqs
112 112 addedreqs = newreqs - repo.requirements
113 113
114 114 if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS:
115 115 incompatible = upgrade_actions.RECLONES_REQUIREMENTS & (
116 116 removedreqs | addedreqs
117 117 )
118 118 if incompatible:
119 119 msg = _(
120 120 b'ignoring revlogs selection flags, format requirements '
121 121 b'change: %s\n'
122 122 )
123 123 ui.warn(msg % b', '.join(sorted(incompatible)))
124 124 revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS
125 125
126 126 def write_labeled(l, label):
127 127 first = True
128 128 for r in sorted(l):
129 129 if not first:
130 130 ui.write(b', ')
131 131 ui.write(r, label=label)
132 132 first = False
133 133
134 134 def printrequirements():
135 135 ui.write(_(b'requirements\n'))
136 136 ui.write(_(b' preserved: '))
137 137 write_labeled(
138 138 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
139 139 )
140 140 ui.write((b'\n'))
141 141 removed = repo.requirements - newreqs
142 142 if repo.requirements - newreqs:
143 143 ui.write(_(b' removed: '))
144 144 write_labeled(removed, "upgrade-repo.requirement.removed")
145 145 ui.write((b'\n'))
146 146 added = newreqs - repo.requirements
147 147 if added:
148 148 ui.write(_(b' added: '))
149 149 write_labeled(added, "upgrade-repo.requirement.added")
150 150 ui.write((b'\n'))
151 151 ui.write(b'\n')
152 152
153 153 def printoptimisations():
154 154 optimisations = [
155 155 a for a in actions if a.type == upgrade_actions.OPTIMISATION
156 156 ]
157 157 optimisations.sort(key=lambda a: a.name)
158 158 if optimisations:
159 159 ui.write(_(b'optimisations: '))
160 160 write_labeled(
161 161 [a.name for a in optimisations],
162 162 "upgrade-repo.optimisation.performed",
163 163 )
164 164 ui.write(b'\n\n')
165 165
166 166 def printupgradeactions():
167 167 for a in actions:
168 168 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
169 169
170 170 def print_affected_revlogs():
171 171 if not revlogs:
172 172 ui.write((b'no revlogs to process\n'))
173 173 else:
174 174 ui.write((b'processed revlogs:\n'))
175 175 for r in sorted(revlogs):
176 176 ui.write((b' - %s\n' % r))
177 177 ui.write((b'\n'))
178 178
179 upgrade_op = upgrade_actions.UpgradeOperation(
180 newreqs,
181 [a.name for a in actions],
182 revlogs,
183 )
184
179 185 if not run:
180 186 fromconfig = []
181 187 onlydefault = []
182 188
183 189 for d in deficiencies:
184 190 if d.fromconfig(repo):
185 191 fromconfig.append(d)
186 192 elif d.default:
187 193 onlydefault.append(d)
188 194
189 195 if fromconfig or onlydefault:
190 196
191 197 if fromconfig:
192 198 ui.status(
193 199 _(
194 200 b'repository lacks features recommended by '
195 201 b'current config options:\n\n'
196 202 )
197 203 )
198 204 for i in fromconfig:
199 205 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
200 206
201 207 if onlydefault:
202 208 ui.status(
203 209 _(
204 210 b'repository lacks features used by the default '
205 211 b'config options:\n\n'
206 212 )
207 213 )
208 214 for i in onlydefault:
209 215 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
210 216
211 217 ui.status(b'\n')
212 218 else:
213 219 ui.status(
214 220 _(
215 221 b'(no feature deficiencies found in existing '
216 222 b'repository)\n'
217 223 )
218 224 )
219 225
220 226 ui.status(
221 227 _(
222 228 b'performing an upgrade with "--run" will make the following '
223 229 b'changes:\n\n'
224 230 )
225 231 )
226 232
227 233 printrequirements()
228 234 printoptimisations()
229 235 printupgradeactions()
230 236 print_affected_revlogs()
231 237
232 238 unusedoptimize = [i for i in alloptimizations if i not in actions]
233 239
234 240 if unusedoptimize:
235 241 ui.status(
236 242 _(
237 243 b'additional optimizations are available by specifying '
238 244 b'"--optimize <name>":\n\n'
239 245 )
240 246 )
241 247 for i in unusedoptimize:
242 248 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
243 249 return
244 250
245 251 # Else we're in the run=true case.
246 252 ui.write(_(b'upgrade will perform the following actions:\n\n'))
247 253 printrequirements()
248 254 printoptimisations()
249 255 printupgradeactions()
250 256 print_affected_revlogs()
251 257
252 upgradeactions = [a.name for a in actions]
253
254 258 ui.status(_(b'beginning upgrade...\n'))
255 259 with repo.wlock(), repo.lock():
256 260 ui.status(_(b'repository locked and read-only\n'))
257 261 # Our strategy for upgrading the repository is to create a new,
258 262 # temporary repository, write data to it, then do a swap of the
259 263 # data. There are less heavyweight ways to do this, but it is easier
260 264 # to create a new repo object than to instantiate all the components
261 265 # (like the store) separately.
262 266 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
263 267 backuppath = None
264 268 try:
265 269 ui.status(
266 270 _(
267 271 b'creating temporary repository to stage migrated '
268 272 b'data: %s\n'
269 273 )
270 274 % tmppath
271 275 )
272 276
273 277 # clone ui without using ui.copy because repo.ui is protected
274 278 repoui = repo.ui.__class__(repo.ui)
275 279 dstrepo = hg.repository(repoui, path=tmppath, create=True)
276 280
277 281 with dstrepo.wlock(), dstrepo.lock():
278 282 backuppath = upgrade_engine.upgrade(
279 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
283 ui, repo, dstrepo, upgrade_op
280 284 )
281 285 if not (backup or backuppath is None):
282 286 ui.status(
283 287 _(b'removing old repository content%s\n') % backuppath
284 288 )
285 289 repo.vfs.rmtree(backuppath, forcibly=True)
286 290 backuppath = None
287 291
288 292 finally:
289 293 ui.status(_(b'removing temporary repository %s\n') % tmppath)
290 294 repo.vfs.rmtree(tmppath, forcibly=True)
291 295
292 296 if backuppath and not ui.quiet:
293 297 ui.warn(
294 298 _(b'copy of old repository backed up at %s\n') % backuppath
295 299 )
296 300 ui.warn(
297 301 _(
298 302 b'the old repository will not be deleted; remove '
299 303 b'it to free up disk space once the upgraded '
300 304 b'repository is verified\n'
301 305 )
302 306 )
303 307
304 308 if upgrade_actions.sharesafe.name in addedreqs:
305 309 ui.warn(
306 310 _(
307 311 b'repository upgraded to share safe mode, existing'
308 312 b' shares will still work in old non-safe mode. '
309 313 b'Re-share existing shares to use them in safe mode'
310 314 b' New shares will be created in safe mode.\n'
311 315 )
312 316 )
313 317 if upgrade_actions.sharesafe.name in removedreqs:
314 318 ui.warn(
315 319 _(
316 320 b'repository downgraded to not use share safe mode, '
317 321 b'existing shares will not work and needs to'
318 322 b' be reshared.\n'
319 323 )
320 324 )
@@ -1,719 +1,728 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import (
12 12 error,
13 13 localrepo,
14 14 requirements,
15 15 util,
16 16 )
17 17
18 18 from ..utils import compression
19 19
20 20 # list of requirements that request a clone of all revlog if added/removed
21 21 RECLONES_REQUIREMENTS = {
22 22 b'generaldelta',
23 23 requirements.SPARSEREVLOG_REQUIREMENT,
24 24 }
25 25
26 26
27 27 def preservedrequirements(repo):
28 28 return set()
29 29
30 30
31 31 DEFICIENCY = b'deficiency'
32 32 OPTIMISATION = b'optimization'
33 33
34 34
35 35 class improvement(object):
36 36 """Represents an improvement that can be made as part of an upgrade.
37 37
38 38 The following attributes are defined on each instance:
39 39
40 40 name
41 41 Machine-readable string uniquely identifying this improvement. It
42 42 will be mapped to an action later in the upgrade process.
43 43
44 44 type
45 45 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
46 46 problem. An optimization is an action (sometimes optional) that
47 47 can be taken to further improve the state of the repository.
48 48
49 49 description
50 50 Message intended for humans explaining the improvement in more detail,
51 51 including the implications of it. For ``DEFICIENCY`` types, should be
52 52 worded in the present tense. For ``OPTIMISATION`` types, should be
53 53 worded in the future tense.
54 54
55 55 upgrademessage
56 56 Message intended for humans explaining what an upgrade addressing this
57 57 issue will do. Should be worded in the future tense.
58 58 """
59 59
60 60 def __init__(self, name, type, description, upgrademessage):
61 61 self.name = name
62 62 self.type = type
63 63 self.description = description
64 64 self.upgrademessage = upgrademessage
65 65
66 66 def __eq__(self, other):
67 67 if not isinstance(other, improvement):
68 68 # This is what python tell use to do
69 69 return NotImplemented
70 70 return self.name == other.name
71 71
72 72 def __ne__(self, other):
73 73 return not (self == other)
74 74
75 75 def __hash__(self):
76 76 return hash(self.name)
77 77
78 78
79 79 allformatvariant = []
80 80
81 81
82 82 def registerformatvariant(cls):
83 83 allformatvariant.append(cls)
84 84 return cls
85 85
86 86
87 87 class formatvariant(improvement):
88 88 """an improvement subclass dedicated to repository format"""
89 89
90 90 type = DEFICIENCY
91 91 ### The following attributes should be defined for each class:
92 92
93 93 # machine-readable string uniquely identifying this improvement. it will be
94 94 # mapped to an action later in the upgrade process.
95 95 name = None
96 96
97 97 # message intended for humans explaining the improvement in more detail,
98 98 # including the implications of it ``DEFICIENCY`` types, should be worded
99 99 # in the present tense.
100 100 description = None
101 101
102 102 # message intended for humans explaining what an upgrade addressing this
103 103 # issue will do. should be worded in the future tense.
104 104 upgrademessage = None
105 105
106 106 # value of current Mercurial default for new repository
107 107 default = None
108 108
109 109 def __init__(self):
110 110 raise NotImplementedError()
111 111
112 112 @staticmethod
113 113 def fromrepo(repo):
114 114 """current value of the variant in the repository"""
115 115 raise NotImplementedError()
116 116
117 117 @staticmethod
118 118 def fromconfig(repo):
119 119 """current value of the variant in the configuration"""
120 120 raise NotImplementedError()
121 121
122 122
123 123 class requirementformatvariant(formatvariant):
124 124 """formatvariant based on a 'requirement' name.
125 125
126 126 Many format variant are controlled by a 'requirement'. We define a small
127 127 subclass to factor the code.
128 128 """
129 129
130 130 # the requirement that control this format variant
131 131 _requirement = None
132 132
133 133 @staticmethod
134 134 def _newreporequirements(ui):
135 135 return localrepo.newreporequirements(
136 136 ui, localrepo.defaultcreateopts(ui)
137 137 )
138 138
139 139 @classmethod
140 140 def fromrepo(cls, repo):
141 141 assert cls._requirement is not None
142 142 return cls._requirement in repo.requirements
143 143
144 144 @classmethod
145 145 def fromconfig(cls, repo):
146 146 assert cls._requirement is not None
147 147 return cls._requirement in cls._newreporequirements(repo.ui)
148 148
149 149
150 150 @registerformatvariant
151 151 class fncache(requirementformatvariant):
152 152 name = b'fncache'
153 153
154 154 _requirement = b'fncache'
155 155
156 156 default = True
157 157
158 158 description = _(
159 159 b'long and reserved filenames may not work correctly; '
160 160 b'repository performance is sub-optimal'
161 161 )
162 162
163 163 upgrademessage = _(
164 164 b'repository will be more resilient to storing '
165 165 b'certain paths and performance of certain '
166 166 b'operations should be improved'
167 167 )
168 168
169 169
170 170 @registerformatvariant
171 171 class dotencode(requirementformatvariant):
172 172 name = b'dotencode'
173 173
174 174 _requirement = b'dotencode'
175 175
176 176 default = True
177 177
178 178 description = _(
179 179 b'storage of filenames beginning with a period or '
180 180 b'space may not work correctly'
181 181 )
182 182
183 183 upgrademessage = _(
184 184 b'repository will be better able to store files '
185 185 b'beginning with a space or period'
186 186 )
187 187
188 188
189 189 @registerformatvariant
190 190 class generaldelta(requirementformatvariant):
191 191 name = b'generaldelta'
192 192
193 193 _requirement = b'generaldelta'
194 194
195 195 default = True
196 196
197 197 description = _(
198 198 b'deltas within internal storage are unable to '
199 199 b'choose optimal revisions; repository is larger and '
200 200 b'slower than it could be; interaction with other '
201 201 b'repositories may require extra network and CPU '
202 202 b'resources, making "hg push" and "hg pull" slower'
203 203 )
204 204
205 205 upgrademessage = _(
206 206 b'repository storage will be able to create '
207 207 b'optimal deltas; new repository data will be '
208 208 b'smaller and read times should decrease; '
209 209 b'interacting with other repositories using this '
210 210 b'storage model should require less network and '
211 211 b'CPU resources, making "hg push" and "hg pull" '
212 212 b'faster'
213 213 )
214 214
215 215
216 216 @registerformatvariant
217 217 class sharesafe(requirementformatvariant):
218 218 name = b'exp-sharesafe'
219 219 _requirement = requirements.SHARESAFE_REQUIREMENT
220 220
221 221 default = False
222 222
223 223 description = _(
224 224 b'old shared repositories do not share source repository '
225 225 b'requirements and config. This leads to various problems '
226 226 b'when the source repository format is upgraded or some new '
227 227 b'extensions are enabled.'
228 228 )
229 229
230 230 upgrademessage = _(
231 231 b'Upgrades a repository to share-safe format so that future '
232 232 b'shares of this repository share its requirements and configs.'
233 233 )
234 234
235 235
236 236 @registerformatvariant
237 237 class sparserevlog(requirementformatvariant):
238 238 name = b'sparserevlog'
239 239
240 240 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
241 241
242 242 default = True
243 243
244 244 description = _(
245 245 b'in order to limit disk reading and memory usage on older '
246 246 b'version, the span of a delta chain from its root to its '
247 247 b'end is limited, whatever the relevant data in this span. '
248 248 b'This can severly limit Mercurial ability to build good '
249 249 b'chain of delta resulting is much more storage space being '
250 250 b'taken and limit reusability of on disk delta during '
251 251 b'exchange.'
252 252 )
253 253
254 254 upgrademessage = _(
255 255 b'Revlog supports delta chain with more unused data '
256 256 b'between payload. These gaps will be skipped at read '
257 257 b'time. This allows for better delta chains, making a '
258 258 b'better compression and faster exchange with server.'
259 259 )
260 260
261 261
262 262 @registerformatvariant
263 263 class sidedata(requirementformatvariant):
264 264 name = b'sidedata'
265 265
266 266 _requirement = requirements.SIDEDATA_REQUIREMENT
267 267
268 268 default = False
269 269
270 270 description = _(
271 271 b'Allows storage of extra data alongside a revision, '
272 272 b'unlocking various caching options.'
273 273 )
274 274
275 275 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
276 276
277 277
278 278 @registerformatvariant
279 279 class persistentnodemap(requirementformatvariant):
280 280 name = b'persistent-nodemap'
281 281
282 282 _requirement = requirements.NODEMAP_REQUIREMENT
283 283
284 284 default = False
285 285
286 286 description = _(
287 287 b'persist the node -> rev mapping on disk to speedup lookup'
288 288 )
289 289
290 290 upgrademessage = _(b'Speedup revision lookup by node id.')
291 291
292 292
293 293 @registerformatvariant
294 294 class copiessdc(requirementformatvariant):
295 295 name = b'copies-sdc'
296 296
297 297 _requirement = requirements.COPIESSDC_REQUIREMENT
298 298
299 299 default = False
300 300
301 301 description = _(b'Stores copies information alongside changesets.')
302 302
303 303 upgrademessage = _(
304 304 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
305 305 )
306 306
307 307
308 308 @registerformatvariant
309 309 class removecldeltachain(formatvariant):
310 310 name = b'plain-cl-delta'
311 311
312 312 default = True
313 313
314 314 description = _(
315 315 b'changelog storage is using deltas instead of '
316 316 b'raw entries; changelog reading and any '
317 317 b'operation relying on changelog data are slower '
318 318 b'than they could be'
319 319 )
320 320
321 321 upgrademessage = _(
322 322 b'changelog storage will be reformated to '
323 323 b'store raw entries; changelog reading will be '
324 324 b'faster; changelog size may be reduced'
325 325 )
326 326
327 327 @staticmethod
328 328 def fromrepo(repo):
329 329 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
330 330 # changelogs with deltas.
331 331 cl = repo.changelog
332 332 chainbase = cl.chainbase
333 333 return all(rev == chainbase(rev) for rev in cl)
334 334
335 335 @staticmethod
336 336 def fromconfig(repo):
337 337 return True
338 338
339 339
340 340 @registerformatvariant
341 341 class compressionengine(formatvariant):
342 342 name = b'compression'
343 343 default = b'zlib'
344 344
345 345 description = _(
346 346 b'Compresion algorithm used to compress data. '
347 347 b'Some engine are faster than other'
348 348 )
349 349
350 350 upgrademessage = _(
351 351 b'revlog content will be recompressed with the new algorithm.'
352 352 )
353 353
354 354 @classmethod
355 355 def fromrepo(cls, repo):
356 356 # we allow multiple compression engine requirement to co-exist because
357 357 # strickly speaking, revlog seems to support mixed compression style.
358 358 #
359 359 # The compression used for new entries will be "the last one"
360 360 compression = b'zlib'
361 361 for req in repo.requirements:
362 362 prefix = req.startswith
363 363 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
364 364 compression = req.split(b'-', 2)[2]
365 365 return compression
366 366
367 367 @classmethod
368 368 def fromconfig(cls, repo):
369 369 compengines = repo.ui.configlist(b'format', b'revlog-compression')
370 370 # return the first valid value as the selection code would do
371 371 for comp in compengines:
372 372 if comp in util.compengines:
373 373 return comp
374 374
375 375 # no valide compression found lets display it all for clarity
376 376 return b','.join(compengines)
377 377
378 378
379 379 @registerformatvariant
380 380 class compressionlevel(formatvariant):
381 381 name = b'compression-level'
382 382 default = b'default'
383 383
384 384 description = _(b'compression level')
385 385
386 386 upgrademessage = _(b'revlog content will be recompressed')
387 387
388 388 @classmethod
389 389 def fromrepo(cls, repo):
390 390 comp = compressionengine.fromrepo(repo)
391 391 level = None
392 392 if comp == b'zlib':
393 393 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
394 394 elif comp == b'zstd':
395 395 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
396 396 if level is None:
397 397 return b'default'
398 398 return bytes(level)
399 399
400 400 @classmethod
401 401 def fromconfig(cls, repo):
402 402 comp = compressionengine.fromconfig(repo)
403 403 level = None
404 404 if comp == b'zlib':
405 405 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
406 406 elif comp == b'zstd':
407 407 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
408 408 if level is None:
409 409 return b'default'
410 410 return bytes(level)
411 411
412 412
413 413 def finddeficiencies(repo):
414 414 """returns a list of deficiencies that the repo suffer from"""
415 415 deficiencies = []
416 416
417 417 # We could detect lack of revlogv1 and store here, but they were added
418 418 # in 0.9.2 and we don't support upgrading repos without these
419 419 # requirements, so let's not bother.
420 420
421 421 for fv in allformatvariant:
422 422 if not fv.fromrepo(repo):
423 423 deficiencies.append(fv)
424 424
425 425 return deficiencies
426 426
427 427
428 428 ALL_OPTIMISATIONS = []
429 429
430 430
431 431 def register_optimization(obj):
432 432 ALL_OPTIMISATIONS.append(obj)
433 433 return obj
434 434
435 435
436 436 register_optimization(
437 437 improvement(
438 438 name=b're-delta-parent',
439 439 type=OPTIMISATION,
440 440 description=_(
441 441 b'deltas within internal storage will be recalculated to '
442 442 b'choose an optimal base revision where this was not '
443 443 b'already done; the size of the repository may shrink and '
444 444 b'various operations may become faster; the first time '
445 445 b'this optimization is performed could slow down upgrade '
446 446 b'execution considerably; subsequent invocations should '
447 447 b'not run noticeably slower'
448 448 ),
449 449 upgrademessage=_(
450 450 b'deltas within internal storage will choose a new '
451 451 b'base revision if needed'
452 452 ),
453 453 )
454 454 )
455 455
456 456 register_optimization(
457 457 improvement(
458 458 name=b're-delta-multibase',
459 459 type=OPTIMISATION,
460 460 description=_(
461 461 b'deltas within internal storage will be recalculated '
462 462 b'against multiple base revision and the smallest '
463 463 b'difference will be used; the size of the repository may '
464 464 b'shrink significantly when there are many merges; this '
465 465 b'optimization will slow down execution in proportion to '
466 466 b'the number of merges in the repository and the amount '
467 467 b'of files in the repository; this slow down should not '
468 468 b'be significant unless there are tens of thousands of '
469 469 b'files and thousands of merges'
470 470 ),
471 471 upgrademessage=_(
472 472 b'deltas within internal storage will choose an '
473 473 b'optimal delta by computing deltas against multiple '
474 474 b'parents; may slow down execution time '
475 475 b'significantly'
476 476 ),
477 477 )
478 478 )
479 479
480 480 register_optimization(
481 481 improvement(
482 482 name=b're-delta-all',
483 483 type=OPTIMISATION,
484 484 description=_(
485 485 b'deltas within internal storage will always be '
486 486 b'recalculated without reusing prior deltas; this will '
487 487 b'likely make execution run several times slower; this '
488 488 b'optimization is typically not needed'
489 489 ),
490 490 upgrademessage=_(
491 491 b'deltas within internal storage will be fully '
492 492 b'recomputed; this will likely drastically slow down '
493 493 b'execution time'
494 494 ),
495 495 )
496 496 )
497 497
498 498 register_optimization(
499 499 improvement(
500 500 name=b're-delta-fulladd',
501 501 type=OPTIMISATION,
502 502 description=_(
503 503 b'every revision will be re-added as if it was new '
504 504 b'content. It will go through the full storage '
505 505 b'mechanism giving extensions a chance to process it '
506 506 b'(eg. lfs). This is similar to "re-delta-all" but even '
507 507 b'slower since more logic is involved.'
508 508 ),
509 509 upgrademessage=_(
510 510 b'each revision will be added as new content to the '
511 511 b'internal storage; this will likely drastically slow '
512 512 b'down execution time, but some extensions might need '
513 513 b'it'
514 514 ),
515 515 )
516 516 )
517 517
518 518
519 519 def findoptimizations(repo):
520 520 """Determine optimisation that could be used during upgrade"""
521 521 # These are unconditionally added. There is logic later that figures out
522 522 # which ones to apply.
523 523 return list(ALL_OPTIMISATIONS)
524 524
525 525
526 526 def determineactions(repo, deficiencies, sourcereqs, destreqs):
527 527 """Determine upgrade actions that will be performed.
528 528
529 529 Given a list of improvements as returned by ``finddeficiencies`` and
530 530 ``findoptimizations``, determine the list of upgrade actions that
531 531 will be performed.
532 532
533 533 The role of this function is to filter improvements if needed, apply
534 534 recommended optimizations from the improvements list that make sense,
535 535 etc.
536 536
537 537 Returns a list of action names.
538 538 """
539 539 newactions = []
540 540
541 541 for d in deficiencies:
542 542 name = d._requirement
543 543
544 544 # If the action is a requirement that doesn't show up in the
545 545 # destination requirements, prune the action.
546 546 if name is not None and name not in destreqs:
547 547 continue
548 548
549 549 newactions.append(d)
550 550
551 551 # FUTURE consider adding some optimizations here for certain transitions.
552 552 # e.g. adding generaldelta could schedule parent redeltas.
553 553
554 554 return newactions
555 555
556 556
557 class UpgradeOperation(object):
558 """represent the work to be done during an upgrade"""
559
560 def __init__(self, requirements, actions, revlogs_to_process):
561 self.requirements = requirements
562 self.actions = actions
563 self.revlogs_to_process = revlogs_to_process
564
565
557 566 ### Code checking if a repository can got through the upgrade process at all. #
558 567
559 568
560 569 def requiredsourcerequirements(repo):
561 570 """Obtain requirements required to be present to upgrade a repo.
562 571
563 572 An upgrade will not be allowed if the repository doesn't have the
564 573 requirements returned by this function.
565 574 """
566 575 return {
567 576 # Introduced in Mercurial 0.9.2.
568 577 b'revlogv1',
569 578 # Introduced in Mercurial 0.9.2.
570 579 b'store',
571 580 }
572 581
573 582
574 583 def blocksourcerequirements(repo):
575 584 """Obtain requirements that will prevent an upgrade from occurring.
576 585
577 586 An upgrade cannot be performed if the source repository contains a
578 587 requirements in the returned set.
579 588 """
580 589 return {
581 590 # The upgrade code does not yet support these experimental features.
582 591 # This is an artificial limitation.
583 592 requirements.TREEMANIFEST_REQUIREMENT,
584 593 # This was a precursor to generaldelta and was never enabled by default.
585 594 # It should (hopefully) not exist in the wild.
586 595 b'parentdelta',
587 596 # Upgrade should operate on the actual store, not the shared link.
588 597 requirements.SHARED_REQUIREMENT,
589 598 }
590 599
591 600
592 601 def check_source_requirements(repo):
593 602 """Ensure that no existing requirements prevent the repository upgrade"""
594 603
595 604 required = requiredsourcerequirements(repo)
596 605 missingreqs = required - repo.requirements
597 606 if missingreqs:
598 607 msg = _(b'cannot upgrade repository; requirement missing: %s')
599 608 missingreqs = b', '.join(sorted(missingreqs))
600 609 raise error.Abort(msg % missingreqs)
601 610
602 611 blocking = blocksourcerequirements(repo)
603 612 blockingreqs = blocking & repo.requirements
604 613 if blockingreqs:
605 614 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
606 615 blockingreqs = b', '.join(sorted(blockingreqs))
607 616 raise error.Abort(m % blockingreqs)
608 617
609 618
610 619 ### Verify the validity of the planned requirement changes ####################
611 620
612 621
613 622 def supportremovedrequirements(repo):
614 623 """Obtain requirements that can be removed during an upgrade.
615 624
616 625 If an upgrade were to create a repository that dropped a requirement,
617 626 the dropped requirement must appear in the returned set for the upgrade
618 627 to be allowed.
619 628 """
620 629 supported = {
621 630 requirements.SPARSEREVLOG_REQUIREMENT,
622 631 requirements.SIDEDATA_REQUIREMENT,
623 632 requirements.COPIESSDC_REQUIREMENT,
624 633 requirements.NODEMAP_REQUIREMENT,
625 634 requirements.SHARESAFE_REQUIREMENT,
626 635 }
627 636 for name in compression.compengines:
628 637 engine = compression.compengines[name]
629 638 if engine.available() and engine.revlogheader():
630 639 supported.add(b'exp-compression-%s' % name)
631 640 if engine.name() == b'zstd':
632 641 supported.add(b'revlog-compression-zstd')
633 642 return supported
634 643
635 644
636 645 def supporteddestrequirements(repo):
637 646 """Obtain requirements that upgrade supports in the destination.
638 647
639 648 If the result of the upgrade would create requirements not in this set,
640 649 the upgrade is disallowed.
641 650
642 651 Extensions should monkeypatch this to add their custom requirements.
643 652 """
644 653 supported = {
645 654 b'dotencode',
646 655 b'fncache',
647 656 b'generaldelta',
648 657 b'revlogv1',
649 658 b'store',
650 659 requirements.SPARSEREVLOG_REQUIREMENT,
651 660 requirements.SIDEDATA_REQUIREMENT,
652 661 requirements.COPIESSDC_REQUIREMENT,
653 662 requirements.NODEMAP_REQUIREMENT,
654 663 requirements.SHARESAFE_REQUIREMENT,
655 664 }
656 665 for name in compression.compengines:
657 666 engine = compression.compengines[name]
658 667 if engine.available() and engine.revlogheader():
659 668 supported.add(b'exp-compression-%s' % name)
660 669 if engine.name() == b'zstd':
661 670 supported.add(b'revlog-compression-zstd')
662 671 return supported
663 672
664 673
665 674 def allowednewrequirements(repo):
666 675 """Obtain requirements that can be added to a repository during upgrade.
667 676
668 677 This is used to disallow proposed requirements from being added when
669 678 they weren't present before.
670 679
671 680 We use a list of allowed requirement additions instead of a list of known
672 681 bad additions because the whitelist approach is safer and will prevent
673 682 future, unknown requirements from accidentally being added.
674 683 """
675 684 supported = {
676 685 b'dotencode',
677 686 b'fncache',
678 687 b'generaldelta',
679 688 requirements.SPARSEREVLOG_REQUIREMENT,
680 689 requirements.SIDEDATA_REQUIREMENT,
681 690 requirements.COPIESSDC_REQUIREMENT,
682 691 requirements.NODEMAP_REQUIREMENT,
683 692 requirements.SHARESAFE_REQUIREMENT,
684 693 }
685 694 for name in compression.compengines:
686 695 engine = compression.compengines[name]
687 696 if engine.available() and engine.revlogheader():
688 697 supported.add(b'exp-compression-%s' % name)
689 698 if engine.name() == b'zstd':
690 699 supported.add(b'revlog-compression-zstd')
691 700 return supported
692 701
693 702
694 703 def check_requirements_changes(repo, new_reqs):
695 704 old_reqs = repo.requirements
696 705
697 706 support_removal = supportremovedrequirements(repo)
698 707 no_remove_reqs = old_reqs - new_reqs - support_removal
699 708 if no_remove_reqs:
700 709 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
701 710 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
702 711 raise error.Abort(msg % no_remove_reqs)
703 712
704 713 support_addition = allowednewrequirements(repo)
705 714 no_add_reqs = new_reqs - old_reqs - support_addition
706 715 if no_add_reqs:
707 716 m = _(b'cannot upgrade repository; do not support adding requirement: ')
708 717 no_add_reqs = b', '.join(sorted(no_add_reqs))
709 718 raise error.Abort(m + no_add_reqs)
710 719
711 720 supported = supporteddestrequirements(repo)
712 721 unsupported_reqs = new_reqs - supported
713 722 if unsupported_reqs:
714 723 msg = _(
715 724 b'cannot upgrade repository; do not support destination '
716 725 b'requirement: %s'
717 726 )
718 727 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
719 728 raise error.Abort(msg % unsupported_reqs)
@@ -1,500 +1,500 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27
28 28
29 29 def _revlogfrompath(repo, path):
30 30 """Obtain a revlog from a repo path.
31 31
32 32 An instance of the appropriate class is returned.
33 33 """
34 34 if path == b'00changelog.i':
35 35 return changelog.changelog(repo.svfs)
36 36 elif path.endswith(b'00manifest.i'):
37 37 mandir = path[: -len(b'00manifest.i')]
38 38 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 39 else:
40 40 # reverse of "/".join(("data", path + ".i"))
41 41 return filelog.filelog(repo.svfs, path[5:-2])
42 42
43 43
44 44 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 45 """copy all relevant files for `oldrl` into `destrepo` store
46 46
47 47 Files are copied "as is" without any transformation. The copy is performed
48 48 without extra checks. Callers are responsible for making sure the copied
49 49 content is compatible with format of the destination repository.
50 50 """
51 51 oldrl = getattr(oldrl, '_revlog', oldrl)
52 52 newrl = _revlogfrompath(destrepo, unencodedname)
53 53 newrl = getattr(newrl, '_revlog', newrl)
54 54
55 55 oldvfs = oldrl.opener
56 56 newvfs = newrl.opener
57 57 oldindex = oldvfs.join(oldrl.indexfile)
58 58 newindex = newvfs.join(newrl.indexfile)
59 59 olddata = oldvfs.join(oldrl.datafile)
60 60 newdata = newvfs.join(newrl.datafile)
61 61
62 62 with newvfs(newrl.indexfile, b'w'):
63 63 pass # create all the directories
64 64
65 65 util.copyfile(oldindex, newindex)
66 66 copydata = oldrl.opener.exists(oldrl.datafile)
67 67 if copydata:
68 68 util.copyfile(olddata, newdata)
69 69
70 70 if not (
71 71 unencodedname.endswith(b'00changelog.i')
72 72 or unencodedname.endswith(b'00manifest.i')
73 73 ):
74 74 destrepo.svfs.fncache.add(unencodedname)
75 75 if copydata:
76 76 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 77
78 78
79 79 UPGRADE_CHANGELOG = b"changelog"
80 80 UPGRADE_MANIFEST = b"manifest"
81 81 UPGRADE_FILELOGS = b"all-filelogs"
82 82
83 83 UPGRADE_ALL_REVLOGS = frozenset(
84 84 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 85 )
86 86
87 87
88 88 def getsidedatacompanion(srcrepo, dstrepo):
89 89 sidedatacompanion = None
90 90 removedreqs = srcrepo.requirements - dstrepo.requirements
91 91 addedreqs = dstrepo.requirements - srcrepo.requirements
92 92 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 93
94 94 def sidedatacompanion(rl, rev):
95 95 rl = getattr(rl, '_revlog', rl)
96 96 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 97 return True, (), {}, 0, 0
98 98 return False, (), {}, 0, 0
99 99
100 100 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 101 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 102 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 103 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 104 return sidedatacompanion
105 105
106 106
107 107 def matchrevlog(revlogfilter, entry):
108 108 """check if a revlog is selected for cloning.
109 109
110 110 In other words, are there any updates which need to be done on revlog
111 111 or it can be blindly copied.
112 112
113 113 The store entry is checked against the passed filter"""
114 114 if entry.endswith(b'00changelog.i'):
115 115 return UPGRADE_CHANGELOG in revlogfilter
116 116 elif entry.endswith(b'00manifest.i'):
117 117 return UPGRADE_MANIFEST in revlogfilter
118 118 return UPGRADE_FILELOGS in revlogfilter
119 119
120 120
121 121 def _clonerevlogs(
122 122 ui,
123 123 srcrepo,
124 124 dstrepo,
125 125 tr,
126 126 deltareuse,
127 127 forcedeltabothparents,
128 128 revlogs=UPGRADE_ALL_REVLOGS,
129 129 ):
130 130 """Copy revlogs between 2 repos."""
131 131 revcount = 0
132 132 srcsize = 0
133 133 srcrawsize = 0
134 134 dstsize = 0
135 135 fcount = 0
136 136 frevcount = 0
137 137 fsrcsize = 0
138 138 frawsize = 0
139 139 fdstsize = 0
140 140 mcount = 0
141 141 mrevcount = 0
142 142 msrcsize = 0
143 143 mrawsize = 0
144 144 mdstsize = 0
145 145 crevcount = 0
146 146 csrcsize = 0
147 147 crawsize = 0
148 148 cdstsize = 0
149 149
150 150 alldatafiles = list(srcrepo.store.walk())
151 151
152 152 # Perform a pass to collect metadata. This validates we can open all
153 153 # source files and allows a unified progress bar to be displayed.
154 154 for unencoded, encoded, size in alldatafiles:
155 155 if unencoded.endswith(b'.d'):
156 156 continue
157 157
158 158 rl = _revlogfrompath(srcrepo, unencoded)
159 159
160 160 info = rl.storageinfo(
161 161 exclusivefiles=True,
162 162 revisionscount=True,
163 163 trackedsize=True,
164 164 storedsize=True,
165 165 )
166 166
167 167 revcount += info[b'revisionscount'] or 0
168 168 datasize = info[b'storedsize'] or 0
169 169 rawsize = info[b'trackedsize'] or 0
170 170
171 171 srcsize += datasize
172 172 srcrawsize += rawsize
173 173
174 174 # This is for the separate progress bars.
175 175 if isinstance(rl, changelog.changelog):
176 176 crevcount += len(rl)
177 177 csrcsize += datasize
178 178 crawsize += rawsize
179 179 elif isinstance(rl, manifest.manifestrevlog):
180 180 mcount += 1
181 181 mrevcount += len(rl)
182 182 msrcsize += datasize
183 183 mrawsize += rawsize
184 184 elif isinstance(rl, filelog.filelog):
185 185 fcount += 1
186 186 frevcount += len(rl)
187 187 fsrcsize += datasize
188 188 frawsize += rawsize
189 189 else:
190 190 error.ProgrammingError(b'unknown revlog type')
191 191
192 192 if not revcount:
193 193 return
194 194
195 195 ui.status(
196 196 _(
197 197 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
198 198 b'%d in changelog)\n'
199 199 )
200 200 % (revcount, frevcount, mrevcount, crevcount)
201 201 )
202 202 ui.status(
203 203 _(b'migrating %s in store; %s tracked data\n')
204 204 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
205 205 )
206 206
207 207 # Used to keep track of progress.
208 208 progress = None
209 209
210 210 def oncopiedrevision(rl, rev, node):
211 211 progress.increment()
212 212
213 213 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
214 214
215 215 # Do the actual copying.
216 216 # FUTURE this operation can be farmed off to worker processes.
217 217 seen = set()
218 218 for unencoded, encoded, size in alldatafiles:
219 219 if unencoded.endswith(b'.d'):
220 220 continue
221 221
222 222 oldrl = _revlogfrompath(srcrepo, unencoded)
223 223
224 224 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
225 225 ui.status(
226 226 _(
227 227 b'finished migrating %d manifest revisions across %d '
228 228 b'manifests; change in size: %s\n'
229 229 )
230 230 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
231 231 )
232 232
233 233 ui.status(
234 234 _(
235 235 b'migrating changelog containing %d revisions '
236 236 b'(%s in store; %s tracked data)\n'
237 237 )
238 238 % (
239 239 crevcount,
240 240 util.bytecount(csrcsize),
241 241 util.bytecount(crawsize),
242 242 )
243 243 )
244 244 seen.add(b'c')
245 245 progress = srcrepo.ui.makeprogress(
246 246 _(b'changelog revisions'), total=crevcount
247 247 )
248 248 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
249 249 ui.status(
250 250 _(
251 251 b'finished migrating %d filelog revisions across %d '
252 252 b'filelogs; change in size: %s\n'
253 253 )
254 254 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
255 255 )
256 256
257 257 ui.status(
258 258 _(
259 259 b'migrating %d manifests containing %d revisions '
260 260 b'(%s in store; %s tracked data)\n'
261 261 )
262 262 % (
263 263 mcount,
264 264 mrevcount,
265 265 util.bytecount(msrcsize),
266 266 util.bytecount(mrawsize),
267 267 )
268 268 )
269 269 seen.add(b'm')
270 270 if progress:
271 271 progress.complete()
272 272 progress = srcrepo.ui.makeprogress(
273 273 _(b'manifest revisions'), total=mrevcount
274 274 )
275 275 elif b'f' not in seen:
276 276 ui.status(
277 277 _(
278 278 b'migrating %d filelogs containing %d revisions '
279 279 b'(%s in store; %s tracked data)\n'
280 280 )
281 281 % (
282 282 fcount,
283 283 frevcount,
284 284 util.bytecount(fsrcsize),
285 285 util.bytecount(frawsize),
286 286 )
287 287 )
288 288 seen.add(b'f')
289 289 if progress:
290 290 progress.complete()
291 291 progress = srcrepo.ui.makeprogress(
292 292 _(b'file revisions'), total=frevcount
293 293 )
294 294
295 295 if matchrevlog(revlogs, unencoded):
296 296 ui.note(
297 297 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
298 298 )
299 299 newrl = _revlogfrompath(dstrepo, unencoded)
300 300 oldrl.clone(
301 301 tr,
302 302 newrl,
303 303 addrevisioncb=oncopiedrevision,
304 304 deltareuse=deltareuse,
305 305 forcedeltabothparents=forcedeltabothparents,
306 306 sidedatacompanion=sidedatacompanion,
307 307 )
308 308 else:
309 309 msg = _(b'blindly copying %s containing %i revisions\n')
310 310 ui.note(msg % (unencoded, len(oldrl)))
311 311 _copyrevlog(tr, dstrepo, oldrl, unencoded)
312 312
313 313 newrl = _revlogfrompath(dstrepo, unencoded)
314 314
315 315 info = newrl.storageinfo(storedsize=True)
316 316 datasize = info[b'storedsize'] or 0
317 317
318 318 dstsize += datasize
319 319
320 320 if isinstance(newrl, changelog.changelog):
321 321 cdstsize += datasize
322 322 elif isinstance(newrl, manifest.manifestrevlog):
323 323 mdstsize += datasize
324 324 else:
325 325 fdstsize += datasize
326 326
327 327 progress.complete()
328 328
329 329 ui.status(
330 330 _(
331 331 b'finished migrating %d changelog revisions; change in size: '
332 332 b'%s\n'
333 333 )
334 334 % (crevcount, util.bytecount(cdstsize - csrcsize))
335 335 )
336 336
337 337 ui.status(
338 338 _(
339 339 b'finished migrating %d total revisions; total change in store '
340 340 b'size: %s\n'
341 341 )
342 342 % (revcount, util.bytecount(dstsize - srcsize))
343 343 )
344 344
345 345
346 346 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
347 347 """Determine whether to copy a store file during upgrade.
348 348
349 349 This function is called when migrating store files from ``srcrepo`` to
350 350 ``dstrepo`` as part of upgrading a repository.
351 351
352 352 Args:
353 353 srcrepo: repo we are copying from
354 354 dstrepo: repo we are copying to
355 355 requirements: set of requirements for ``dstrepo``
356 356 path: store file being examined
357 357 mode: the ``ST_MODE`` file type of ``path``
358 358 st: ``stat`` data structure for ``path``
359 359
360 360 Function should return ``True`` if the file is to be copied.
361 361 """
362 362 # Skip revlogs.
363 363 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
364 364 return False
365 365 # Skip transaction related files.
366 366 if path.startswith(b'undo'):
367 367 return False
368 368 # Only copy regular files.
369 369 if mode != stat.S_IFREG:
370 370 return False
371 371 # Skip other skipped files.
372 372 if path in (b'lock', b'fncache'):
373 373 return False
374 374
375 375 return True
376 376
377 377
378 378 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
379 379 """Hook point for extensions to perform additional actions during upgrade.
380 380
381 381 This function is called after revlogs and store files have been copied but
382 382 before the new store is swapped into the original location.
383 383 """
384 384
385 385
386 def upgrade(
387 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
388 ):
386 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
389 387 """Do the low-level work of upgrading a repository.
390 388
391 389 The upgrade is effectively performed as a copy between a source
392 390 repository and a temporary destination repository.
393 391
394 392 The source repository is unmodified for as long as possible so the
395 393 upgrade can abort at any time without causing loss of service for
396 394 readers and without corrupting the source repository.
397 395 """
398 396 assert srcrepo.currentwlock()
399 397 assert dstrepo.currentwlock()
400 398
401 399 ui.status(
402 400 _(
403 401 b'(it is safe to interrupt this process any time before '
404 402 b'data migration completes)\n'
405 403 )
406 404 )
407 405
408 if b're-delta-all' in actions:
406 if b're-delta-all' in upgrade_op.actions:
409 407 deltareuse = revlog.revlog.DELTAREUSENEVER
410 elif b're-delta-parent' in actions:
408 elif b're-delta-parent' in upgrade_op.actions:
411 409 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
412 elif b're-delta-multibase' in actions:
410 elif b're-delta-multibase' in upgrade_op.actions:
413 411 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
414 elif b're-delta-fulladd' in actions:
412 elif b're-delta-fulladd' in upgrade_op.actions:
415 413 deltareuse = revlog.revlog.DELTAREUSEFULLADD
416 414 else:
417 415 deltareuse = revlog.revlog.DELTAREUSEALWAYS
418 416
419 417 with dstrepo.transaction(b'upgrade') as tr:
420 418 _clonerevlogs(
421 419 ui,
422 420 srcrepo,
423 421 dstrepo,
424 422 tr,
425 423 deltareuse,
426 b're-delta-multibase' in actions,
427 revlogs=revlogs,
424 b're-delta-multibase' in upgrade_op.actions,
425 revlogs=upgrade_op.revlogs_to_process,
428 426 )
429 427
430 428 # Now copy other files in the store directory.
431 429 # The sorted() makes execution deterministic.
432 430 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
433 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
431 if not _filterstorefile(
432 srcrepo, dstrepo, upgrade_op.requirements, p, kind, st
433 ):
434 434 continue
435 435
436 436 srcrepo.ui.status(_(b'copying %s\n') % p)
437 437 src = srcrepo.store.rawvfs.join(p)
438 438 dst = dstrepo.store.rawvfs.join(p)
439 439 util.copyfile(src, dst, copystat=True)
440 440
441 441 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
442 442
443 443 ui.status(_(b'data fully migrated to temporary repository\n'))
444 444
445 445 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
446 446 backupvfs = vfsmod.vfs(backuppath)
447 447
448 448 # Make a backup of requires file first, as it is the first to be modified.
449 449 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
450 450
451 451 # We install an arbitrary requirement that clients must not support
452 452 # as a mechanism to lock out new clients during the data swap. This is
453 453 # better than allowing a client to continue while the repository is in
454 454 # an inconsistent state.
455 455 ui.status(
456 456 _(
457 457 b'marking source repository as being upgraded; clients will be '
458 458 b'unable to read from repository\n'
459 459 )
460 460 )
461 461 scmutil.writereporequirements(
462 462 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
463 463 )
464 464
465 465 ui.status(_(b'starting in-place swap of repository data\n'))
466 466 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
467 467
468 468 # Now swap in the new store directory. Doing it as a rename should make
469 469 # the operation nearly instantaneous and atomic (at least in well-behaved
470 470 # environments).
471 471 ui.status(_(b'replacing store...\n'))
472 472 tstart = util.timer()
473 473 util.rename(srcrepo.spath, backupvfs.join(b'store'))
474 474 util.rename(dstrepo.spath, srcrepo.spath)
475 475 elapsed = util.timer() - tstart
476 476 ui.status(
477 477 _(
478 478 b'store replacement complete; repository was inconsistent for '
479 479 b'%0.1fs\n'
480 480 )
481 481 % elapsed
482 482 )
483 483
484 484 # We first write the requirements file. Any new requirements will lock
485 485 # out legacy clients.
486 486 ui.status(
487 487 _(
488 488 b'finalizing requirements file and making repository readable '
489 489 b'again\n'
490 490 )
491 491 )
492 scmutil.writereporequirements(srcrepo, requirements)
492 scmutil.writereporequirements(srcrepo, upgrade_op.requirements)
493 493
494 494 # The lock file from the old store won't be removed because nothing has a
495 495 # reference to its new location. So clean it up manually. Alternatively, we
496 496 # could update srcrepo.svfs and other variables to point to the new
497 497 # location. This is simpler.
498 498 backupvfs.unlink(b'store/lock')
499 499
500 500 return backuppath
General Comments 0
You need to be logged in to leave comments. Login now