##// END OF EJS Templates
upgrade: move optimisation to something more declarative...
marmoute -
r46610:32dcd783 default
parent child Browse files
Show More
@@ -1,1433 +1,1439 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 hg,
19 19 localrepo,
20 20 manifest,
21 21 metadata,
22 22 pycompat,
23 23 requirements,
24 24 revlog,
25 25 scmutil,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29
30 30 from .utils import compression
31 31
32 32 # list of requirements that request a clone of all revlog if added/removed
33 33 RECLONES_REQUIREMENTS = {
34 34 b'generaldelta',
35 35 requirements.SPARSEREVLOG_REQUIREMENT,
36 36 }
37 37
38 38
39 39 def requiredsourcerequirements(repo):
40 40 """Obtain requirements required to be present to upgrade a repo.
41 41
42 42 An upgrade will not be allowed if the repository doesn't have the
43 43 requirements returned by this function.
44 44 """
45 45 return {
46 46 # Introduced in Mercurial 0.9.2.
47 47 b'revlogv1',
48 48 # Introduced in Mercurial 0.9.2.
49 49 b'store',
50 50 }
51 51
52 52
53 53 def blocksourcerequirements(repo):
54 54 """Obtain requirements that will prevent an upgrade from occurring.
55 55
56 56 An upgrade cannot be performed if the source repository contains a
57 57 requirements in the returned set.
58 58 """
59 59 return {
60 60 # The upgrade code does not yet support these experimental features.
61 61 # This is an artificial limitation.
62 62 requirements.TREEMANIFEST_REQUIREMENT,
63 63 # This was a precursor to generaldelta and was never enabled by default.
64 64 # It should (hopefully) not exist in the wild.
65 65 b'parentdelta',
66 66 # Upgrade should operate on the actual store, not the shared link.
67 67 requirements.SHARED_REQUIREMENT,
68 68 }
69 69
70 70
71 71 def supportremovedrequirements(repo):
72 72 """Obtain requirements that can be removed during an upgrade.
73 73
74 74 If an upgrade were to create a repository that dropped a requirement,
75 75 the dropped requirement must appear in the returned set for the upgrade
76 76 to be allowed.
77 77 """
78 78 supported = {
79 79 requirements.SPARSEREVLOG_REQUIREMENT,
80 80 requirements.SIDEDATA_REQUIREMENT,
81 81 requirements.COPIESSDC_REQUIREMENT,
82 82 requirements.NODEMAP_REQUIREMENT,
83 83 }
84 84 for name in compression.compengines:
85 85 engine = compression.compengines[name]
86 86 if engine.available() and engine.revlogheader():
87 87 supported.add(b'exp-compression-%s' % name)
88 88 if engine.name() == b'zstd':
89 89 supported.add(b'revlog-compression-zstd')
90 90 return supported
91 91
92 92
93 93 def supporteddestrequirements(repo):
94 94 """Obtain requirements that upgrade supports in the destination.
95 95
96 96 If the result of the upgrade would create requirements not in this set,
97 97 the upgrade is disallowed.
98 98
99 99 Extensions should monkeypatch this to add their custom requirements.
100 100 """
101 101 supported = {
102 102 b'dotencode',
103 103 b'fncache',
104 104 b'generaldelta',
105 105 b'revlogv1',
106 106 b'store',
107 107 requirements.SPARSEREVLOG_REQUIREMENT,
108 108 requirements.SIDEDATA_REQUIREMENT,
109 109 requirements.COPIESSDC_REQUIREMENT,
110 110 requirements.NODEMAP_REQUIREMENT,
111 111 requirements.SHARESAFE_REQUIREMENT,
112 112 }
113 113 for name in compression.compengines:
114 114 engine = compression.compengines[name]
115 115 if engine.available() and engine.revlogheader():
116 116 supported.add(b'exp-compression-%s' % name)
117 117 if engine.name() == b'zstd':
118 118 supported.add(b'revlog-compression-zstd')
119 119 return supported
120 120
121 121
122 122 def allowednewrequirements(repo):
123 123 """Obtain requirements that can be added to a repository during upgrade.
124 124
125 125 This is used to disallow proposed requirements from being added when
126 126 they weren't present before.
127 127
128 128 We use a list of allowed requirement additions instead of a list of known
129 129 bad additions because the whitelist approach is safer and will prevent
130 130 future, unknown requirements from accidentally being added.
131 131 """
132 132 supported = {
133 133 b'dotencode',
134 134 b'fncache',
135 135 b'generaldelta',
136 136 requirements.SPARSEREVLOG_REQUIREMENT,
137 137 requirements.SIDEDATA_REQUIREMENT,
138 138 requirements.COPIESSDC_REQUIREMENT,
139 139 requirements.NODEMAP_REQUIREMENT,
140 140 }
141 141 for name in compression.compengines:
142 142 engine = compression.compengines[name]
143 143 if engine.available() and engine.revlogheader():
144 144 supported.add(b'exp-compression-%s' % name)
145 145 if engine.name() == b'zstd':
146 146 supported.add(b'revlog-compression-zstd')
147 147 return supported
148 148
149 149
150 150 def preservedrequirements(repo):
151 151 return set()
152 152
153 153
154 154 DEFICIENCY = b'deficiency'
155 155 OPTIMISATION = b'optimization'
156 156
157 157
158 158 class improvement(object):
159 159 """Represents an improvement that can be made as part of an upgrade.
160 160
161 161 The following attributes are defined on each instance:
162 162
163 163 name
164 164 Machine-readable string uniquely identifying this improvement. It
165 165 will be mapped to an action later in the upgrade process.
166 166
167 167 type
168 168 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
169 169 problem. An optimization is an action (sometimes optional) that
170 170 can be taken to further improve the state of the repository.
171 171
172 172 description
173 173 Message intended for humans explaining the improvement in more detail,
174 174 including the implications of it. For ``DEFICIENCY`` types, should be
175 175 worded in the present tense. For ``OPTIMISATION`` types, should be
176 176 worded in the future tense.
177 177
178 178 upgrademessage
179 179 Message intended for humans explaining what an upgrade addressing this
180 180 issue will do. Should be worded in the future tense.
181 181 """
182 182
183 183 def __init__(self, name, type, description, upgrademessage):
184 184 self.name = name
185 185 self.type = type
186 186 self.description = description
187 187 self.upgrademessage = upgrademessage
188 188
189 189 def __eq__(self, other):
190 190 if not isinstance(other, improvement):
191 191 # This is what python tell use to do
192 192 return NotImplemented
193 193 return self.name == other.name
194 194
195 195 def __ne__(self, other):
196 196 return not (self == other)
197 197
198 198 def __hash__(self):
199 199 return hash(self.name)
200 200
201 201
202 202 allformatvariant = []
203 203
204 204
205 205 def registerformatvariant(cls):
206 206 allformatvariant.append(cls)
207 207 return cls
208 208
209 209
210 210 class formatvariant(improvement):
211 211 """an improvement subclass dedicated to repository format"""
212 212
213 213 type = DEFICIENCY
214 214 ### The following attributes should be defined for each class:
215 215
216 216 # machine-readable string uniquely identifying this improvement. it will be
217 217 # mapped to an action later in the upgrade process.
218 218 name = None
219 219
220 220 # message intended for humans explaining the improvement in more detail,
221 221 # including the implications of it ``DEFICIENCY`` types, should be worded
222 222 # in the present tense.
223 223 description = None
224 224
225 225 # message intended for humans explaining what an upgrade addressing this
226 226 # issue will do. should be worded in the future tense.
227 227 upgrademessage = None
228 228
229 229 # value of current Mercurial default for new repository
230 230 default = None
231 231
232 232 def __init__(self):
233 233 raise NotImplementedError()
234 234
235 235 @staticmethod
236 236 def fromrepo(repo):
237 237 """current value of the variant in the repository"""
238 238 raise NotImplementedError()
239 239
240 240 @staticmethod
241 241 def fromconfig(repo):
242 242 """current value of the variant in the configuration"""
243 243 raise NotImplementedError()
244 244
245 245
246 246 class requirementformatvariant(formatvariant):
247 247 """formatvariant based on a 'requirement' name.
248 248
249 249 Many format variant are controlled by a 'requirement'. We define a small
250 250 subclass to factor the code.
251 251 """
252 252
253 253 # the requirement that control this format variant
254 254 _requirement = None
255 255
256 256 @staticmethod
257 257 def _newreporequirements(ui):
258 258 return localrepo.newreporequirements(
259 259 ui, localrepo.defaultcreateopts(ui)
260 260 )
261 261
262 262 @classmethod
263 263 def fromrepo(cls, repo):
264 264 assert cls._requirement is not None
265 265 return cls._requirement in repo.requirements
266 266
267 267 @classmethod
268 268 def fromconfig(cls, repo):
269 269 assert cls._requirement is not None
270 270 return cls._requirement in cls._newreporequirements(repo.ui)
271 271
272 272
273 273 @registerformatvariant
274 274 class fncache(requirementformatvariant):
275 275 name = b'fncache'
276 276
277 277 _requirement = b'fncache'
278 278
279 279 default = True
280 280
281 281 description = _(
282 282 b'long and reserved filenames may not work correctly; '
283 283 b'repository performance is sub-optimal'
284 284 )
285 285
286 286 upgrademessage = _(
287 287 b'repository will be more resilient to storing '
288 288 b'certain paths and performance of certain '
289 289 b'operations should be improved'
290 290 )
291 291
292 292
293 293 @registerformatvariant
294 294 class dotencode(requirementformatvariant):
295 295 name = b'dotencode'
296 296
297 297 _requirement = b'dotencode'
298 298
299 299 default = True
300 300
301 301 description = _(
302 302 b'storage of filenames beginning with a period or '
303 303 b'space may not work correctly'
304 304 )
305 305
306 306 upgrademessage = _(
307 307 b'repository will be better able to store files '
308 308 b'beginning with a space or period'
309 309 )
310 310
311 311
312 312 @registerformatvariant
313 313 class generaldelta(requirementformatvariant):
314 314 name = b'generaldelta'
315 315
316 316 _requirement = b'generaldelta'
317 317
318 318 default = True
319 319
320 320 description = _(
321 321 b'deltas within internal storage are unable to '
322 322 b'choose optimal revisions; repository is larger and '
323 323 b'slower than it could be; interaction with other '
324 324 b'repositories may require extra network and CPU '
325 325 b'resources, making "hg push" and "hg pull" slower'
326 326 )
327 327
328 328 upgrademessage = _(
329 329 b'repository storage will be able to create '
330 330 b'optimal deltas; new repository data will be '
331 331 b'smaller and read times should decrease; '
332 332 b'interacting with other repositories using this '
333 333 b'storage model should require less network and '
334 334 b'CPU resources, making "hg push" and "hg pull" '
335 335 b'faster'
336 336 )
337 337
338 338
339 339 @registerformatvariant
340 340 class sparserevlog(requirementformatvariant):
341 341 name = b'sparserevlog'
342 342
343 343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
344 344
345 345 default = True
346 346
347 347 description = _(
348 348 b'in order to limit disk reading and memory usage on older '
349 349 b'version, the span of a delta chain from its root to its '
350 350 b'end is limited, whatever the relevant data in this span. '
351 351 b'This can severly limit Mercurial ability to build good '
352 352 b'chain of delta resulting is much more storage space being '
353 353 b'taken and limit reusability of on disk delta during '
354 354 b'exchange.'
355 355 )
356 356
357 357 upgrademessage = _(
358 358 b'Revlog supports delta chain with more unused data '
359 359 b'between payload. These gaps will be skipped at read '
360 360 b'time. This allows for better delta chains, making a '
361 361 b'better compression and faster exchange with server.'
362 362 )
363 363
364 364
365 365 @registerformatvariant
366 366 class sidedata(requirementformatvariant):
367 367 name = b'sidedata'
368 368
369 369 _requirement = requirements.SIDEDATA_REQUIREMENT
370 370
371 371 default = False
372 372
373 373 description = _(
374 374 b'Allows storage of extra data alongside a revision, '
375 375 b'unlocking various caching options.'
376 376 )
377 377
378 378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
379 379
380 380
381 381 @registerformatvariant
382 382 class persistentnodemap(requirementformatvariant):
383 383 name = b'persistent-nodemap'
384 384
385 385 _requirement = requirements.NODEMAP_REQUIREMENT
386 386
387 387 default = False
388 388
389 389 description = _(
390 390 b'persist the node -> rev mapping on disk to speedup lookup'
391 391 )
392 392
393 393 upgrademessage = _(b'Speedup revision lookup by node id.')
394 394
395 395
396 396 @registerformatvariant
397 397 class copiessdc(requirementformatvariant):
398 398 name = b'copies-sdc'
399 399
400 400 _requirement = requirements.COPIESSDC_REQUIREMENT
401 401
402 402 default = False
403 403
404 404 description = _(b'Stores copies information alongside changesets.')
405 405
406 406 upgrademessage = _(
407 407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
408 408 )
409 409
410 410
411 411 @registerformatvariant
412 412 class removecldeltachain(formatvariant):
413 413 name = b'plain-cl-delta'
414 414
415 415 default = True
416 416
417 417 description = _(
418 418 b'changelog storage is using deltas instead of '
419 419 b'raw entries; changelog reading and any '
420 420 b'operation relying on changelog data are slower '
421 421 b'than they could be'
422 422 )
423 423
424 424 upgrademessage = _(
425 425 b'changelog storage will be reformated to '
426 426 b'store raw entries; changelog reading will be '
427 427 b'faster; changelog size may be reduced'
428 428 )
429 429
430 430 @staticmethod
431 431 def fromrepo(repo):
432 432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
433 433 # changelogs with deltas.
434 434 cl = repo.changelog
435 435 chainbase = cl.chainbase
436 436 return all(rev == chainbase(rev) for rev in cl)
437 437
438 438 @staticmethod
439 439 def fromconfig(repo):
440 440 return True
441 441
442 442
443 443 @registerformatvariant
444 444 class compressionengine(formatvariant):
445 445 name = b'compression'
446 446 default = b'zlib'
447 447
448 448 description = _(
449 449 b'Compresion algorithm used to compress data. '
450 450 b'Some engine are faster than other'
451 451 )
452 452
453 453 upgrademessage = _(
454 454 b'revlog content will be recompressed with the new algorithm.'
455 455 )
456 456
457 457 @classmethod
458 458 def fromrepo(cls, repo):
459 459 # we allow multiple compression engine requirement to co-exist because
460 460 # strickly speaking, revlog seems to support mixed compression style.
461 461 #
462 462 # The compression used for new entries will be "the last one"
463 463 compression = b'zlib'
464 464 for req in repo.requirements:
465 465 prefix = req.startswith
466 466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 467 compression = req.split(b'-', 2)[2]
468 468 return compression
469 469
470 470 @classmethod
471 471 def fromconfig(cls, repo):
472 472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 473 # return the first valid value as the selection code would do
474 474 for comp in compengines:
475 475 if comp in util.compengines:
476 476 return comp
477 477
478 478 # no valide compression found lets display it all for clarity
479 479 return b','.join(compengines)
480 480
481 481
482 482 @registerformatvariant
483 483 class compressionlevel(formatvariant):
484 484 name = b'compression-level'
485 485 default = b'default'
486 486
487 487 description = _(b'compression level')
488 488
489 489 upgrademessage = _(b'revlog content will be recompressed')
490 490
491 491 @classmethod
492 492 def fromrepo(cls, repo):
493 493 comp = compressionengine.fromrepo(repo)
494 494 level = None
495 495 if comp == b'zlib':
496 496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
497 497 elif comp == b'zstd':
498 498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
499 499 if level is None:
500 500 return b'default'
501 501 return bytes(level)
502 502
503 503 @classmethod
504 504 def fromconfig(cls, repo):
505 505 comp = compressionengine.fromconfig(repo)
506 506 level = None
507 507 if comp == b'zlib':
508 508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
509 509 elif comp == b'zstd':
510 510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
511 511 if level is None:
512 512 return b'default'
513 513 return bytes(level)
514 514
515 515
516 516 def finddeficiencies(repo):
517 517 """returns a list of deficiencies that the repo suffer from"""
518 518 deficiencies = []
519 519
520 520 # We could detect lack of revlogv1 and store here, but they were added
521 521 # in 0.9.2 and we don't support upgrading repos without these
522 522 # requirements, so let's not bother.
523 523
524 524 for fv in allformatvariant:
525 525 if not fv.fromrepo(repo):
526 526 deficiencies.append(fv)
527 527
528 528 return deficiencies
529 529
530 530
531 531 # search without '-' to support older form on newer client.
532 532 #
533 533 # We don't enforce backward compatibility for debug command so this
534 534 # might eventually be dropped. However, having to use two different
535 535 # forms in script when comparing result is anoying enough to add
536 536 # backward compatibility for a while.
537 537 legacy_opts_map = {
538 538 b'redeltaparent': b're-delta-parent',
539 539 b'redeltamultibase': b're-delta-multibase',
540 540 b'redeltaall': b're-delta-all',
541 541 b'redeltafulladd': b're-delta-fulladd',
542 542 }
543 543
544 ALL_OPTIMISATIONS = []
545
546
547 def register_optimization(obj):
548 ALL_OPTIMISATIONS.append(obj)
549 return obj
550
551
552 register_optimization(
553 improvement(
554 name=b're-delta-parent',
555 type=OPTIMISATION,
556 description=_(
557 b'deltas within internal storage will be recalculated to '
558 b'choose an optimal base revision where this was not '
559 b'already done; the size of the repository may shrink and '
560 b'various operations may become faster; the first time '
561 b'this optimization is performed could slow down upgrade '
562 b'execution considerably; subsequent invocations should '
563 b'not run noticeably slower'
564 ),
565 upgrademessage=_(
566 b'deltas within internal storage will choose a new '
567 b'base revision if needed'
568 ),
569 )
570 )
571
572 register_optimization(
573 improvement(
574 name=b're-delta-multibase',
575 type=OPTIMISATION,
576 description=_(
577 b'deltas within internal storage will be recalculated '
578 b'against multiple base revision and the smallest '
579 b'difference will be used; the size of the repository may '
580 b'shrink significantly when there are many merges; this '
581 b'optimization will slow down execution in proportion to '
582 b'the number of merges in the repository and the amount '
583 b'of files in the repository; this slow down should not '
584 b'be significant unless there are tens of thousands of '
585 b'files and thousands of merges'
586 ),
587 upgrademessage=_(
588 b'deltas within internal storage will choose an '
589 b'optimal delta by computing deltas against multiple '
590 b'parents; may slow down execution time '
591 b'significantly'
592 ),
593 )
594 )
595
596 register_optimization(
597 improvement(
598 name=b're-delta-all',
599 type=OPTIMISATION,
600 description=_(
601 b'deltas within internal storage will always be '
602 b'recalculated without reusing prior deltas; this will '
603 b'likely make execution run several times slower; this '
604 b'optimization is typically not needed'
605 ),
606 upgrademessage=_(
607 b'deltas within internal storage will be fully '
608 b'recomputed; this will likely drastically slow down '
609 b'execution time'
610 ),
611 )
612 )
613
614 register_optimization(
615 improvement(
616 name=b're-delta-fulladd',
617 type=OPTIMISATION,
618 description=_(
619 b'every revision will be re-added as if it was new '
620 b'content. It will go through the full storage '
621 b'mechanism giving extensions a chance to process it '
622 b'(eg. lfs). This is similar to "re-delta-all" but even '
623 b'slower since more logic is involved.'
624 ),
625 upgrademessage=_(
626 b'each revision will be added as new content to the '
627 b'internal storage; this will likely drastically slow '
628 b'down execution time, but some extensions might need '
629 b'it'
630 ),
631 )
632 )
633
544 634
545 635 def findoptimizations(repo):
546 636 """Determine optimisation that could be used during upgrade"""
547 637 # These are unconditionally added. There is logic later that figures out
548 638 # which ones to apply.
549 optimizations = []
550
551 optimizations.append(
552 improvement(
553 name=b're-delta-parent',
554 type=OPTIMISATION,
555 description=_(
556 b'deltas within internal storage will be recalculated to '
557 b'choose an optimal base revision where this was not '
558 b'already done; the size of the repository may shrink and '
559 b'various operations may become faster; the first time '
560 b'this optimization is performed could slow down upgrade '
561 b'execution considerably; subsequent invocations should '
562 b'not run noticeably slower'
563 ),
564 upgrademessage=_(
565 b'deltas within internal storage will choose a new '
566 b'base revision if needed'
567 ),
568 )
569 )
570
571 optimizations.append(
572 improvement(
573 name=b're-delta-multibase',
574 type=OPTIMISATION,
575 description=_(
576 b'deltas within internal storage will be recalculated '
577 b'against multiple base revision and the smallest '
578 b'difference will be used; the size of the repository may '
579 b'shrink significantly when there are many merges; this '
580 b'optimization will slow down execution in proportion to '
581 b'the number of merges in the repository and the amount '
582 b'of files in the repository; this slow down should not '
583 b'be significant unless there are tens of thousands of '
584 b'files and thousands of merges'
585 ),
586 upgrademessage=_(
587 b'deltas within internal storage will choose an '
588 b'optimal delta by computing deltas against multiple '
589 b'parents; may slow down execution time '
590 b'significantly'
591 ),
592 )
593 )
594
595 optimizations.append(
596 improvement(
597 name=b're-delta-all',
598 type=OPTIMISATION,
599 description=_(
600 b'deltas within internal storage will always be '
601 b'recalculated without reusing prior deltas; this will '
602 b'likely make execution run several times slower; this '
603 b'optimization is typically not needed'
604 ),
605 upgrademessage=_(
606 b'deltas within internal storage will be fully '
607 b'recomputed; this will likely drastically slow down '
608 b'execution time'
609 ),
610 )
611 )
612
613 optimizations.append(
614 improvement(
615 name=b're-delta-fulladd',
616 type=OPTIMISATION,
617 description=_(
618 b'every revision will be re-added as if it was new '
619 b'content. It will go through the full storage '
620 b'mechanism giving extensions a chance to process it '
621 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 b'slower since more logic is involved.'
623 ),
624 upgrademessage=_(
625 b'each revision will be added as new content to the '
626 b'internal storage; this will likely drastically slow '
627 b'down execution time, but some extensions might need '
628 b'it'
629 ),
630 )
631 )
632
633 return optimizations
639 return list(ALL_OPTIMISATIONS)
634 640
635 641
636 642 def determineactions(repo, deficiencies, sourcereqs, destreqs):
637 643 """Determine upgrade actions that will be performed.
638 644
639 645 Given a list of improvements as returned by ``finddeficiencies`` and
640 646 ``findoptimizations``, determine the list of upgrade actions that
641 647 will be performed.
642 648
643 649 The role of this function is to filter improvements if needed, apply
644 650 recommended optimizations from the improvements list that make sense,
645 651 etc.
646 652
647 653 Returns a list of action names.
648 654 """
649 655 newactions = []
650 656
651 657 for d in deficiencies:
652 658 name = d._requirement
653 659
654 660 # If the action is a requirement that doesn't show up in the
655 661 # destination requirements, prune the action.
656 662 if name is not None and name not in destreqs:
657 663 continue
658 664
659 665 newactions.append(d)
660 666
661 667 # FUTURE consider adding some optimizations here for certain transitions.
662 668 # e.g. adding generaldelta could schedule parent redeltas.
663 669
664 670 return newactions
665 671
666 672
667 673 def _revlogfrompath(repo, path):
668 674 """Obtain a revlog from a repo path.
669 675
670 676 An instance of the appropriate class is returned.
671 677 """
672 678 if path == b'00changelog.i':
673 679 return changelog.changelog(repo.svfs)
674 680 elif path.endswith(b'00manifest.i'):
675 681 mandir = path[: -len(b'00manifest.i')]
676 682 return manifest.manifestrevlog(repo.svfs, tree=mandir)
677 683 else:
678 684 # reverse of "/".join(("data", path + ".i"))
679 685 return filelog.filelog(repo.svfs, path[5:-2])
680 686
681 687
682 688 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
683 689 """copy all relevant files for `oldrl` into `destrepo` store
684 690
685 691 Files are copied "as is" without any transformation. The copy is performed
686 692 without extra checks. Callers are responsible for making sure the copied
687 693 content is compatible with format of the destination repository.
688 694 """
689 695 oldrl = getattr(oldrl, '_revlog', oldrl)
690 696 newrl = _revlogfrompath(destrepo, unencodedname)
691 697 newrl = getattr(newrl, '_revlog', newrl)
692 698
693 699 oldvfs = oldrl.opener
694 700 newvfs = newrl.opener
695 701 oldindex = oldvfs.join(oldrl.indexfile)
696 702 newindex = newvfs.join(newrl.indexfile)
697 703 olddata = oldvfs.join(oldrl.datafile)
698 704 newdata = newvfs.join(newrl.datafile)
699 705
700 706 with newvfs(newrl.indexfile, b'w'):
701 707 pass # create all the directories
702 708
703 709 util.copyfile(oldindex, newindex)
704 710 copydata = oldrl.opener.exists(oldrl.datafile)
705 711 if copydata:
706 712 util.copyfile(olddata, newdata)
707 713
708 714 if not (
709 715 unencodedname.endswith(b'00changelog.i')
710 716 or unencodedname.endswith(b'00manifest.i')
711 717 ):
712 718 destrepo.svfs.fncache.add(unencodedname)
713 719 if copydata:
714 720 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
715 721
716 722
717 723 UPGRADE_CHANGELOG = object()
718 724 UPGRADE_MANIFEST = object()
719 725 UPGRADE_FILELOGS = object()
720 726
721 727 UPGRADE_ALL_REVLOGS = frozenset(
722 728 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
723 729 )
724 730
725 731
726 732 def getsidedatacompanion(srcrepo, dstrepo):
727 733 sidedatacompanion = None
728 734 removedreqs = srcrepo.requirements - dstrepo.requirements
729 735 addedreqs = dstrepo.requirements - srcrepo.requirements
730 736 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
731 737
732 738 def sidedatacompanion(rl, rev):
733 739 rl = getattr(rl, '_revlog', rl)
734 740 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
735 741 return True, (), {}, 0, 0
736 742 return False, (), {}, 0, 0
737 743
738 744 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
739 745 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
740 746 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
741 747 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
742 748 return sidedatacompanion
743 749
744 750
745 751 def matchrevlog(revlogfilter, entry):
746 752 """check if a revlog is selected for cloning.
747 753
748 754 In other words, are there any updates which need to be done on revlog
749 755 or it can be blindly copied.
750 756
751 757 The store entry is checked against the passed filter"""
752 758 if entry.endswith(b'00changelog.i'):
753 759 return UPGRADE_CHANGELOG in revlogfilter
754 760 elif entry.endswith(b'00manifest.i'):
755 761 return UPGRADE_MANIFEST in revlogfilter
756 762 return UPGRADE_FILELOGS in revlogfilter
757 763
758 764
759 765 def _clonerevlogs(
760 766 ui,
761 767 srcrepo,
762 768 dstrepo,
763 769 tr,
764 770 deltareuse,
765 771 forcedeltabothparents,
766 772 revlogs=UPGRADE_ALL_REVLOGS,
767 773 ):
768 774 """Copy revlogs between 2 repos."""
769 775 revcount = 0
770 776 srcsize = 0
771 777 srcrawsize = 0
772 778 dstsize = 0
773 779 fcount = 0
774 780 frevcount = 0
775 781 fsrcsize = 0
776 782 frawsize = 0
777 783 fdstsize = 0
778 784 mcount = 0
779 785 mrevcount = 0
780 786 msrcsize = 0
781 787 mrawsize = 0
782 788 mdstsize = 0
783 789 crevcount = 0
784 790 csrcsize = 0
785 791 crawsize = 0
786 792 cdstsize = 0
787 793
788 794 alldatafiles = list(srcrepo.store.walk())
789 795
790 796 # Perform a pass to collect metadata. This validates we can open all
791 797 # source files and allows a unified progress bar to be displayed.
792 798 for unencoded, encoded, size in alldatafiles:
793 799 if unencoded.endswith(b'.d'):
794 800 continue
795 801
796 802 rl = _revlogfrompath(srcrepo, unencoded)
797 803
798 804 info = rl.storageinfo(
799 805 exclusivefiles=True,
800 806 revisionscount=True,
801 807 trackedsize=True,
802 808 storedsize=True,
803 809 )
804 810
805 811 revcount += info[b'revisionscount'] or 0
806 812 datasize = info[b'storedsize'] or 0
807 813 rawsize = info[b'trackedsize'] or 0
808 814
809 815 srcsize += datasize
810 816 srcrawsize += rawsize
811 817
812 818 # This is for the separate progress bars.
813 819 if isinstance(rl, changelog.changelog):
814 820 crevcount += len(rl)
815 821 csrcsize += datasize
816 822 crawsize += rawsize
817 823 elif isinstance(rl, manifest.manifestrevlog):
818 824 mcount += 1
819 825 mrevcount += len(rl)
820 826 msrcsize += datasize
821 827 mrawsize += rawsize
822 828 elif isinstance(rl, filelog.filelog):
823 829 fcount += 1
824 830 frevcount += len(rl)
825 831 fsrcsize += datasize
826 832 frawsize += rawsize
827 833 else:
828 834 error.ProgrammingError(b'unknown revlog type')
829 835
830 836 if not revcount:
831 837 return
832 838
833 839 ui.status(
834 840 _(
835 841 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
836 842 b'%d in changelog)\n'
837 843 )
838 844 % (revcount, frevcount, mrevcount, crevcount)
839 845 )
840 846 ui.status(
841 847 _(b'migrating %s in store; %s tracked data\n')
842 848 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
843 849 )
844 850
845 851 # Used to keep track of progress.
846 852 progress = None
847 853
848 854 def oncopiedrevision(rl, rev, node):
849 855 progress.increment()
850 856
851 857 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
852 858
853 859 # Do the actual copying.
854 860 # FUTURE this operation can be farmed off to worker processes.
855 861 seen = set()
856 862 for unencoded, encoded, size in alldatafiles:
857 863 if unencoded.endswith(b'.d'):
858 864 continue
859 865
860 866 oldrl = _revlogfrompath(srcrepo, unencoded)
861 867
862 868 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
863 869 ui.status(
864 870 _(
865 871 b'finished migrating %d manifest revisions across %d '
866 872 b'manifests; change in size: %s\n'
867 873 )
868 874 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
869 875 )
870 876
871 877 ui.status(
872 878 _(
873 879 b'migrating changelog containing %d revisions '
874 880 b'(%s in store; %s tracked data)\n'
875 881 )
876 882 % (
877 883 crevcount,
878 884 util.bytecount(csrcsize),
879 885 util.bytecount(crawsize),
880 886 )
881 887 )
882 888 seen.add(b'c')
883 889 progress = srcrepo.ui.makeprogress(
884 890 _(b'changelog revisions'), total=crevcount
885 891 )
886 892 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
887 893 ui.status(
888 894 _(
889 895 b'finished migrating %d filelog revisions across %d '
890 896 b'filelogs; change in size: %s\n'
891 897 )
892 898 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
893 899 )
894 900
895 901 ui.status(
896 902 _(
897 903 b'migrating %d manifests containing %d revisions '
898 904 b'(%s in store; %s tracked data)\n'
899 905 )
900 906 % (
901 907 mcount,
902 908 mrevcount,
903 909 util.bytecount(msrcsize),
904 910 util.bytecount(mrawsize),
905 911 )
906 912 )
907 913 seen.add(b'm')
908 914 if progress:
909 915 progress.complete()
910 916 progress = srcrepo.ui.makeprogress(
911 917 _(b'manifest revisions'), total=mrevcount
912 918 )
913 919 elif b'f' not in seen:
914 920 ui.status(
915 921 _(
916 922 b'migrating %d filelogs containing %d revisions '
917 923 b'(%s in store; %s tracked data)\n'
918 924 )
919 925 % (
920 926 fcount,
921 927 frevcount,
922 928 util.bytecount(fsrcsize),
923 929 util.bytecount(frawsize),
924 930 )
925 931 )
926 932 seen.add(b'f')
927 933 if progress:
928 934 progress.complete()
929 935 progress = srcrepo.ui.makeprogress(
930 936 _(b'file revisions'), total=frevcount
931 937 )
932 938
933 939 if matchrevlog(revlogs, unencoded):
934 940 ui.note(
935 941 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
936 942 )
937 943 newrl = _revlogfrompath(dstrepo, unencoded)
938 944 oldrl.clone(
939 945 tr,
940 946 newrl,
941 947 addrevisioncb=oncopiedrevision,
942 948 deltareuse=deltareuse,
943 949 forcedeltabothparents=forcedeltabothparents,
944 950 sidedatacompanion=sidedatacompanion,
945 951 )
946 952 else:
947 953 msg = _(b'blindly copying %s containing %i revisions\n')
948 954 ui.note(msg % (unencoded, len(oldrl)))
949 955 _copyrevlog(tr, dstrepo, oldrl, unencoded)
950 956
951 957 newrl = _revlogfrompath(dstrepo, unencoded)
952 958
953 959 info = newrl.storageinfo(storedsize=True)
954 960 datasize = info[b'storedsize'] or 0
955 961
956 962 dstsize += datasize
957 963
958 964 if isinstance(newrl, changelog.changelog):
959 965 cdstsize += datasize
960 966 elif isinstance(newrl, manifest.manifestrevlog):
961 967 mdstsize += datasize
962 968 else:
963 969 fdstsize += datasize
964 970
965 971 progress.complete()
966 972
967 973 ui.status(
968 974 _(
969 975 b'finished migrating %d changelog revisions; change in size: '
970 976 b'%s\n'
971 977 )
972 978 % (crevcount, util.bytecount(cdstsize - csrcsize))
973 979 )
974 980
975 981 ui.status(
976 982 _(
977 983 b'finished migrating %d total revisions; total change in store '
978 984 b'size: %s\n'
979 985 )
980 986 % (revcount, util.bytecount(dstsize - srcsize))
981 987 )
982 988
983 989
984 990 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
985 991 """Determine whether to copy a store file during upgrade.
986 992
987 993 This function is called when migrating store files from ``srcrepo`` to
988 994 ``dstrepo`` as part of upgrading a repository.
989 995
990 996 Args:
991 997 srcrepo: repo we are copying from
992 998 dstrepo: repo we are copying to
993 999 requirements: set of requirements for ``dstrepo``
994 1000 path: store file being examined
995 1001 mode: the ``ST_MODE`` file type of ``path``
996 1002 st: ``stat`` data structure for ``path``
997 1003
998 1004 Function should return ``True`` if the file is to be copied.
999 1005 """
1000 1006 # Skip revlogs.
1001 1007 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1002 1008 return False
1003 1009 # Skip transaction related files.
1004 1010 if path.startswith(b'undo'):
1005 1011 return False
1006 1012 # Only copy regular files.
1007 1013 if mode != stat.S_IFREG:
1008 1014 return False
1009 1015 # Skip other skipped files.
1010 1016 if path in (b'lock', b'fncache'):
1011 1017 return False
1012 1018
1013 1019 return True
1014 1020
1015 1021
1016 1022 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1017 1023 """Hook point for extensions to perform additional actions during upgrade.
1018 1024
1019 1025 This function is called after revlogs and store files have been copied but
1020 1026 before the new store is swapped into the original location.
1021 1027 """
1022 1028
1023 1029
1024 1030 def _upgraderepo(
1025 1031 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1026 1032 ):
1027 1033 """Do the low-level work of upgrading a repository.
1028 1034
1029 1035 The upgrade is effectively performed as a copy between a source
1030 1036 repository and a temporary destination repository.
1031 1037
1032 1038 The source repository is unmodified for as long as possible so the
1033 1039 upgrade can abort at any time without causing loss of service for
1034 1040 readers and without corrupting the source repository.
1035 1041 """
1036 1042 assert srcrepo.currentwlock()
1037 1043 assert dstrepo.currentwlock()
1038 1044
1039 1045 ui.status(
1040 1046 _(
1041 1047 b'(it is safe to interrupt this process any time before '
1042 1048 b'data migration completes)\n'
1043 1049 )
1044 1050 )
1045 1051
1046 1052 if b're-delta-all' in actions:
1047 1053 deltareuse = revlog.revlog.DELTAREUSENEVER
1048 1054 elif b're-delta-parent' in actions:
1049 1055 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1050 1056 elif b're-delta-multibase' in actions:
1051 1057 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1052 1058 elif b're-delta-fulladd' in actions:
1053 1059 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1054 1060 else:
1055 1061 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1056 1062
1057 1063 with dstrepo.transaction(b'upgrade') as tr:
1058 1064 _clonerevlogs(
1059 1065 ui,
1060 1066 srcrepo,
1061 1067 dstrepo,
1062 1068 tr,
1063 1069 deltareuse,
1064 1070 b're-delta-multibase' in actions,
1065 1071 revlogs=revlogs,
1066 1072 )
1067 1073
1068 1074 # Now copy other files in the store directory.
1069 1075 # The sorted() makes execution deterministic.
1070 1076 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1071 1077 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1072 1078 continue
1073 1079
1074 1080 srcrepo.ui.status(_(b'copying %s\n') % p)
1075 1081 src = srcrepo.store.rawvfs.join(p)
1076 1082 dst = dstrepo.store.rawvfs.join(p)
1077 1083 util.copyfile(src, dst, copystat=True)
1078 1084
1079 1085 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1080 1086
1081 1087 ui.status(_(b'data fully migrated to temporary repository\n'))
1082 1088
1083 1089 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1084 1090 backupvfs = vfsmod.vfs(backuppath)
1085 1091
1086 1092 # Make a backup of requires file first, as it is the first to be modified.
1087 1093 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1088 1094
1089 1095 # We install an arbitrary requirement that clients must not support
1090 1096 # as a mechanism to lock out new clients during the data swap. This is
1091 1097 # better than allowing a client to continue while the repository is in
1092 1098 # an inconsistent state.
1093 1099 ui.status(
1094 1100 _(
1095 1101 b'marking source repository as being upgraded; clients will be '
1096 1102 b'unable to read from repository\n'
1097 1103 )
1098 1104 )
1099 1105 scmutil.writereporequirements(
1100 1106 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1101 1107 )
1102 1108
1103 1109 ui.status(_(b'starting in-place swap of repository data\n'))
1104 1110 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1105 1111
1106 1112 # Now swap in the new store directory. Doing it as a rename should make
1107 1113 # the operation nearly instantaneous and atomic (at least in well-behaved
1108 1114 # environments).
1109 1115 ui.status(_(b'replacing store...\n'))
1110 1116 tstart = util.timer()
1111 1117 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1112 1118 util.rename(dstrepo.spath, srcrepo.spath)
1113 1119 elapsed = util.timer() - tstart
1114 1120 ui.status(
1115 1121 _(
1116 1122 b'store replacement complete; repository was inconsistent for '
1117 1123 b'%0.1fs\n'
1118 1124 )
1119 1125 % elapsed
1120 1126 )
1121 1127
1122 1128 # We first write the requirements file. Any new requirements will lock
1123 1129 # out legacy clients.
1124 1130 ui.status(
1125 1131 _(
1126 1132 b'finalizing requirements file and making repository readable '
1127 1133 b'again\n'
1128 1134 )
1129 1135 )
1130 1136 scmutil.writereporequirements(srcrepo, requirements)
1131 1137
1132 1138 # The lock file from the old store won't be removed because nothing has a
1133 1139 # reference to its new location. So clean it up manually. Alternatively, we
1134 1140 # could update srcrepo.svfs and other variables to point to the new
1135 1141 # location. This is simpler.
1136 1142 backupvfs.unlink(b'store/lock')
1137 1143
1138 1144 return backuppath
1139 1145
1140 1146
1141 1147 def upgraderepo(
1142 1148 ui,
1143 1149 repo,
1144 1150 run=False,
1145 1151 optimize=None,
1146 1152 backup=True,
1147 1153 manifest=None,
1148 1154 changelog=None,
1149 1155 ):
1150 1156 """Upgrade a repository in place."""
1151 1157 if optimize is None:
1152 1158 optimize = []
1153 1159 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1154 1160 repo = repo.unfiltered()
1155 1161
1156 1162 revlogs = set(UPGRADE_ALL_REVLOGS)
1157 1163 specentries = (
1158 1164 (UPGRADE_CHANGELOG, changelog),
1159 1165 (UPGRADE_MANIFEST, manifest)
1160 1166 )
1161 1167 specified = [(y, x) for (y, x) in specentries if x is not None]
1162 1168 if specified:
1163 1169 # we have some limitation on revlogs to be recloned
1164 1170 if any(x for y, x in specified):
1165 1171 revlogs = set()
1166 1172 for upgrade, enabled in specified:
1167 1173 if enabled:
1168 1174 revlogs.add(upgrade)
1169 1175 else:
1170 1176 # none are enabled
1171 1177 for upgrade, __ in specified:
1172 1178 revlogs.discard(upgrade)
1173 1179
1174 1180 # Ensure the repository can be upgraded.
1175 1181 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1176 1182 if missingreqs:
1177 1183 raise error.Abort(
1178 1184 _(b'cannot upgrade repository; requirement missing: %s')
1179 1185 % _(b', ').join(sorted(missingreqs))
1180 1186 )
1181 1187
1182 1188 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1183 1189 if blockedreqs:
1184 1190 raise error.Abort(
1185 1191 _(
1186 1192 b'cannot upgrade repository; unsupported source '
1187 1193 b'requirement: %s'
1188 1194 )
1189 1195 % _(b', ').join(sorted(blockedreqs))
1190 1196 )
1191 1197
1192 1198 # FUTURE there is potentially a need to control the wanted requirements via
1193 1199 # command arguments or via an extension hook point.
1194 1200 newreqs = localrepo.newreporequirements(
1195 1201 repo.ui, localrepo.defaultcreateopts(repo.ui)
1196 1202 )
1197 1203 newreqs.update(preservedrequirements(repo))
1198 1204
1199 1205 noremovereqs = (
1200 1206 repo.requirements - newreqs - supportremovedrequirements(repo)
1201 1207 )
1202 1208 if noremovereqs:
1203 1209 raise error.Abort(
1204 1210 _(
1205 1211 b'cannot upgrade repository; requirement would be '
1206 1212 b'removed: %s'
1207 1213 )
1208 1214 % _(b', ').join(sorted(noremovereqs))
1209 1215 )
1210 1216
1211 1217 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1212 1218 if noaddreqs:
1213 1219 raise error.Abort(
1214 1220 _(
1215 1221 b'cannot upgrade repository; do not support adding '
1216 1222 b'requirement: %s'
1217 1223 )
1218 1224 % _(b', ').join(sorted(noaddreqs))
1219 1225 )
1220 1226
1221 1227 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1222 1228 if unsupportedreqs:
1223 1229 raise error.Abort(
1224 1230 _(
1225 1231 b'cannot upgrade repository; do not support '
1226 1232 b'destination requirement: %s'
1227 1233 )
1228 1234 % _(b', ').join(sorted(unsupportedreqs))
1229 1235 )
1230 1236
1231 1237 # Find and validate all improvements that can be made.
1232 1238 alloptimizations = findoptimizations(repo)
1233 1239
1234 1240 # Apply and Validate arguments.
1235 1241 optimizations = []
1236 1242 for o in alloptimizations:
1237 1243 if o.name in optimize:
1238 1244 optimizations.append(o)
1239 1245 optimize.discard(o.name)
1240 1246
1241 1247 if optimize: # anything left is unknown
1242 1248 raise error.Abort(
1243 1249 _(b'unknown optimization action requested: %s')
1244 1250 % b', '.join(sorted(optimize)),
1245 1251 hint=_(b'run without arguments to see valid optimizations'),
1246 1252 )
1247 1253
1248 1254 deficiencies = finddeficiencies(repo)
1249 1255 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1250 1256 actions.extend(
1251 1257 o
1252 1258 for o in sorted(optimizations)
1253 1259 # determineactions could have added optimisation
1254 1260 if o not in actions
1255 1261 )
1256 1262
1257 1263 removedreqs = repo.requirements - newreqs
1258 1264 addedreqs = newreqs - repo.requirements
1259 1265
1260 1266 if revlogs != UPGRADE_ALL_REVLOGS:
1261 1267 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1262 1268 if incompatible:
1263 1269 msg = _(
1264 1270 b'ignoring revlogs selection flags, format requirements '
1265 1271 b'change: %s\n'
1266 1272 )
1267 1273 ui.warn(msg % b', '.join(sorted(incompatible)))
1268 1274 revlogs = UPGRADE_ALL_REVLOGS
1269 1275
1270 1276 def write_labeled(l, label):
1271 1277 first = True
1272 1278 for r in sorted(l):
1273 1279 if not first:
1274 1280 ui.write(b', ')
1275 1281 ui.write(r, label=label)
1276 1282 first = False
1277 1283
1278 1284 def printrequirements():
1279 1285 ui.write(_(b'requirements\n'))
1280 1286 ui.write(_(b' preserved: '))
1281 1287 write_labeled(
1282 1288 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1283 1289 )
1284 1290 ui.write((b'\n'))
1285 1291 removed = repo.requirements - newreqs
1286 1292 if repo.requirements - newreqs:
1287 1293 ui.write(_(b' removed: '))
1288 1294 write_labeled(removed, "upgrade-repo.requirement.removed")
1289 1295 ui.write((b'\n'))
1290 1296 added = newreqs - repo.requirements
1291 1297 if added:
1292 1298 ui.write(_(b' added: '))
1293 1299 write_labeled(added, "upgrade-repo.requirement.added")
1294 1300 ui.write((b'\n'))
1295 1301 ui.write(b'\n')
1296 1302
1297 1303 def printoptimisations():
1298 1304 optimisations = [a for a in actions if a.type == OPTIMISATION]
1299 1305 optimisations.sort(key=lambda a: a.name)
1300 1306 if optimisations:
1301 1307 ui.write(_(b'optimisations: '))
1302 1308 write_labeled(
1303 1309 [a.name for a in optimisations],
1304 1310 "upgrade-repo.optimisation.performed",
1305 1311 )
1306 1312 ui.write(b'\n\n')
1307 1313
1308 1314 def printupgradeactions():
1309 1315 for a in actions:
1310 1316 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1311 1317
1312 1318 if not run:
1313 1319 fromconfig = []
1314 1320 onlydefault = []
1315 1321
1316 1322 for d in deficiencies:
1317 1323 if d.fromconfig(repo):
1318 1324 fromconfig.append(d)
1319 1325 elif d.default:
1320 1326 onlydefault.append(d)
1321 1327
1322 1328 if fromconfig or onlydefault:
1323 1329
1324 1330 if fromconfig:
1325 1331 ui.status(
1326 1332 _(
1327 1333 b'repository lacks features recommended by '
1328 1334 b'current config options:\n\n'
1329 1335 )
1330 1336 )
1331 1337 for i in fromconfig:
1332 1338 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1333 1339
1334 1340 if onlydefault:
1335 1341 ui.status(
1336 1342 _(
1337 1343 b'repository lacks features used by the default '
1338 1344 b'config options:\n\n'
1339 1345 )
1340 1346 )
1341 1347 for i in onlydefault:
1342 1348 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1343 1349
1344 1350 ui.status(b'\n')
1345 1351 else:
1346 1352 ui.status(
1347 1353 _(
1348 1354 b'(no feature deficiencies found in existing '
1349 1355 b'repository)\n'
1350 1356 )
1351 1357 )
1352 1358
1353 1359 ui.status(
1354 1360 _(
1355 1361 b'performing an upgrade with "--run" will make the following '
1356 1362 b'changes:\n\n'
1357 1363 )
1358 1364 )
1359 1365
1360 1366 printrequirements()
1361 1367 printoptimisations()
1362 1368 printupgradeactions()
1363 1369
1364 1370 unusedoptimize = [i for i in alloptimizations if i not in actions]
1365 1371
1366 1372 if unusedoptimize:
1367 1373 ui.status(
1368 1374 _(
1369 1375 b'additional optimizations are available by specifying '
1370 1376 b'"--optimize <name>":\n\n'
1371 1377 )
1372 1378 )
1373 1379 for i in unusedoptimize:
1374 1380 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1375 1381 return
1376 1382
1377 1383 # Else we're in the run=true case.
1378 1384 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1379 1385 printrequirements()
1380 1386 printoptimisations()
1381 1387 printupgradeactions()
1382 1388
1383 1389 upgradeactions = [a.name for a in actions]
1384 1390
1385 1391 ui.status(_(b'beginning upgrade...\n'))
1386 1392 with repo.wlock(), repo.lock():
1387 1393 ui.status(_(b'repository locked and read-only\n'))
1388 1394 # Our strategy for upgrading the repository is to create a new,
1389 1395 # temporary repository, write data to it, then do a swap of the
1390 1396 # data. There are less heavyweight ways to do this, but it is easier
1391 1397 # to create a new repo object than to instantiate all the components
1392 1398 # (like the store) separately.
1393 1399 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1394 1400 backuppath = None
1395 1401 try:
1396 1402 ui.status(
1397 1403 _(
1398 1404 b'creating temporary repository to stage migrated '
1399 1405 b'data: %s\n'
1400 1406 )
1401 1407 % tmppath
1402 1408 )
1403 1409
1404 1410 # clone ui without using ui.copy because repo.ui is protected
1405 1411 repoui = repo.ui.__class__(repo.ui)
1406 1412 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1407 1413
1408 1414 with dstrepo.wlock(), dstrepo.lock():
1409 1415 backuppath = _upgraderepo(
1410 1416 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1411 1417 )
1412 1418 if not (backup or backuppath is None):
1413 1419 ui.status(
1414 1420 _(b'removing old repository content%s\n') % backuppath
1415 1421 )
1416 1422 repo.vfs.rmtree(backuppath, forcibly=True)
1417 1423 backuppath = None
1418 1424
1419 1425 finally:
1420 1426 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1421 1427 repo.vfs.rmtree(tmppath, forcibly=True)
1422 1428
1423 1429 if backuppath and not ui.quiet:
1424 1430 ui.warn(
1425 1431 _(b'copy of old repository backed up at %s\n') % backuppath
1426 1432 )
1427 1433 ui.warn(
1428 1434 _(
1429 1435 b'the old repository will not be deleted; remove '
1430 1436 b'it to free up disk space once the upgraded '
1431 1437 b'repository is verified\n'
1432 1438 )
1433 1439 )
General Comments 0
You need to be logged in to leave comments. Login now