##// END OF EJS Templates
upgrade: improve documentation of matchrevlog()...
Pulkit Goyal -
r46286:d1c10d33 default
parent child Browse files
Show More
@@ -1,1433 +1,1436 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 hg,
19 19 localrepo,
20 20 manifest,
21 21 metadata,
22 22 pycompat,
23 23 requirements,
24 24 revlog,
25 25 scmutil,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29
30 30 from .utils import compression
31 31
32 32 # list of requirements that request a clone of all revlog if added/removed
33 33 RECLONES_REQUIREMENTS = {
34 34 b'generaldelta',
35 35 requirements.SPARSEREVLOG_REQUIREMENT,
36 36 }
37 37
38 38
39 39 def requiredsourcerequirements(repo):
40 40 """Obtain requirements required to be present to upgrade a repo.
41 41
42 42 An upgrade will not be allowed if the repository doesn't have the
43 43 requirements returned by this function.
44 44 """
45 45 return {
46 46 # Introduced in Mercurial 0.9.2.
47 47 b'revlogv1',
48 48 # Introduced in Mercurial 0.9.2.
49 49 b'store',
50 50 }
51 51
52 52
53 53 def blocksourcerequirements(repo):
54 54 """Obtain requirements that will prevent an upgrade from occurring.
55 55
56 56 An upgrade cannot be performed if the source repository contains a
57 57 requirements in the returned set.
58 58 """
59 59 return {
60 60 # The upgrade code does not yet support these experimental features.
61 61 # This is an artificial limitation.
62 62 requirements.TREEMANIFEST_REQUIREMENT,
63 63 # This was a precursor to generaldelta and was never enabled by default.
64 64 # It should (hopefully) not exist in the wild.
65 65 b'parentdelta',
66 66 # Upgrade should operate on the actual store, not the shared link.
67 67 requirements.SHARED_REQUIREMENT,
68 68 }
69 69
70 70
71 71 def supportremovedrequirements(repo):
72 72 """Obtain requirements that can be removed during an upgrade.
73 73
74 74 If an upgrade were to create a repository that dropped a requirement,
75 75 the dropped requirement must appear in the returned set for the upgrade
76 76 to be allowed.
77 77 """
78 78 supported = {
79 79 requirements.SPARSEREVLOG_REQUIREMENT,
80 80 requirements.SIDEDATA_REQUIREMENT,
81 81 requirements.COPIESSDC_REQUIREMENT,
82 82 requirements.NODEMAP_REQUIREMENT,
83 83 }
84 84 for name in compression.compengines:
85 85 engine = compression.compengines[name]
86 86 if engine.available() and engine.revlogheader():
87 87 supported.add(b'exp-compression-%s' % name)
88 88 if engine.name() == b'zstd':
89 89 supported.add(b'revlog-compression-zstd')
90 90 return supported
91 91
92 92
93 93 def supporteddestrequirements(repo):
94 94 """Obtain requirements that upgrade supports in the destination.
95 95
96 96 If the result of the upgrade would create requirements not in this set,
97 97 the upgrade is disallowed.
98 98
99 99 Extensions should monkeypatch this to add their custom requirements.
100 100 """
101 101 supported = {
102 102 b'dotencode',
103 103 b'fncache',
104 104 b'generaldelta',
105 105 b'revlogv1',
106 106 b'store',
107 107 requirements.SPARSEREVLOG_REQUIREMENT,
108 108 requirements.SIDEDATA_REQUIREMENT,
109 109 requirements.COPIESSDC_REQUIREMENT,
110 110 requirements.NODEMAP_REQUIREMENT,
111 111 requirements.SHARESAFE_REQUIREMENT,
112 112 }
113 113 for name in compression.compengines:
114 114 engine = compression.compengines[name]
115 115 if engine.available() and engine.revlogheader():
116 116 supported.add(b'exp-compression-%s' % name)
117 117 if engine.name() == b'zstd':
118 118 supported.add(b'revlog-compression-zstd')
119 119 return supported
120 120
121 121
122 122 def allowednewrequirements(repo):
123 123 """Obtain requirements that can be added to a repository during upgrade.
124 124
125 125 This is used to disallow proposed requirements from being added when
126 126 they weren't present before.
127 127
128 128 We use a list of allowed requirement additions instead of a list of known
129 129 bad additions because the whitelist approach is safer and will prevent
130 130 future, unknown requirements from accidentally being added.
131 131 """
132 132 supported = {
133 133 b'dotencode',
134 134 b'fncache',
135 135 b'generaldelta',
136 136 requirements.SPARSEREVLOG_REQUIREMENT,
137 137 requirements.SIDEDATA_REQUIREMENT,
138 138 requirements.COPIESSDC_REQUIREMENT,
139 139 requirements.NODEMAP_REQUIREMENT,
140 140 }
141 141 for name in compression.compengines:
142 142 engine = compression.compengines[name]
143 143 if engine.available() and engine.revlogheader():
144 144 supported.add(b'exp-compression-%s' % name)
145 145 if engine.name() == b'zstd':
146 146 supported.add(b'revlog-compression-zstd')
147 147 return supported
148 148
149 149
150 150 def preservedrequirements(repo):
151 151 return set()
152 152
153 153
154 154 deficiency = b'deficiency'
155 155 optimisation = b'optimization'
156 156
157 157
158 158 class improvement(object):
159 159 """Represents an improvement that can be made as part of an upgrade.
160 160
161 161 The following attributes are defined on each instance:
162 162
163 163 name
164 164 Machine-readable string uniquely identifying this improvement. It
165 165 will be mapped to an action later in the upgrade process.
166 166
167 167 type
168 168 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
169 169 problem. An optimization is an action (sometimes optional) that
170 170 can be taken to further improve the state of the repository.
171 171
172 172 description
173 173 Message intended for humans explaining the improvement in more detail,
174 174 including the implications of it. For ``deficiency`` types, should be
175 175 worded in the present tense. For ``optimisation`` types, should be
176 176 worded in the future tense.
177 177
178 178 upgrademessage
179 179 Message intended for humans explaining what an upgrade addressing this
180 180 issue will do. Should be worded in the future tense.
181 181 """
182 182
183 183 def __init__(self, name, type, description, upgrademessage):
184 184 self.name = name
185 185 self.type = type
186 186 self.description = description
187 187 self.upgrademessage = upgrademessage
188 188
189 189 def __eq__(self, other):
190 190 if not isinstance(other, improvement):
191 191 # This is what python tell use to do
192 192 return NotImplemented
193 193 return self.name == other.name
194 194
195 195 def __ne__(self, other):
196 196 return not (self == other)
197 197
198 198 def __hash__(self):
199 199 return hash(self.name)
200 200
201 201
202 202 allformatvariant = []
203 203
204 204
205 205 def registerformatvariant(cls):
206 206 allformatvariant.append(cls)
207 207 return cls
208 208
209 209
210 210 class formatvariant(improvement):
211 211 """an improvement subclass dedicated to repository format"""
212 212
213 213 type = deficiency
214 214 ### The following attributes should be defined for each class:
215 215
216 216 # machine-readable string uniquely identifying this improvement. it will be
217 217 # mapped to an action later in the upgrade process.
218 218 name = None
219 219
220 220 # message intended for humans explaining the improvement in more detail,
221 221 # including the implications of it ``deficiency`` types, should be worded
222 222 # in the present tense.
223 223 description = None
224 224
225 225 # message intended for humans explaining what an upgrade addressing this
226 226 # issue will do. should be worded in the future tense.
227 227 upgrademessage = None
228 228
229 229 # value of current Mercurial default for new repository
230 230 default = None
231 231
232 232 def __init__(self):
233 233 raise NotImplementedError()
234 234
235 235 @staticmethod
236 236 def fromrepo(repo):
237 237 """current value of the variant in the repository"""
238 238 raise NotImplementedError()
239 239
240 240 @staticmethod
241 241 def fromconfig(repo):
242 242 """current value of the variant in the configuration"""
243 243 raise NotImplementedError()
244 244
245 245
246 246 class requirementformatvariant(formatvariant):
247 247 """formatvariant based on a 'requirement' name.
248 248
249 249 Many format variant are controlled by a 'requirement'. We define a small
250 250 subclass to factor the code.
251 251 """
252 252
253 253 # the requirement that control this format variant
254 254 _requirement = None
255 255
256 256 @staticmethod
257 257 def _newreporequirements(ui):
258 258 return localrepo.newreporequirements(
259 259 ui, localrepo.defaultcreateopts(ui)
260 260 )
261 261
262 262 @classmethod
263 263 def fromrepo(cls, repo):
264 264 assert cls._requirement is not None
265 265 return cls._requirement in repo.requirements
266 266
267 267 @classmethod
268 268 def fromconfig(cls, repo):
269 269 assert cls._requirement is not None
270 270 return cls._requirement in cls._newreporequirements(repo.ui)
271 271
272 272
273 273 @registerformatvariant
274 274 class fncache(requirementformatvariant):
275 275 name = b'fncache'
276 276
277 277 _requirement = b'fncache'
278 278
279 279 default = True
280 280
281 281 description = _(
282 282 b'long and reserved filenames may not work correctly; '
283 283 b'repository performance is sub-optimal'
284 284 )
285 285
286 286 upgrademessage = _(
287 287 b'repository will be more resilient to storing '
288 288 b'certain paths and performance of certain '
289 289 b'operations should be improved'
290 290 )
291 291
292 292
293 293 @registerformatvariant
294 294 class dotencode(requirementformatvariant):
295 295 name = b'dotencode'
296 296
297 297 _requirement = b'dotencode'
298 298
299 299 default = True
300 300
301 301 description = _(
302 302 b'storage of filenames beginning with a period or '
303 303 b'space may not work correctly'
304 304 )
305 305
306 306 upgrademessage = _(
307 307 b'repository will be better able to store files '
308 308 b'beginning with a space or period'
309 309 )
310 310
311 311
312 312 @registerformatvariant
313 313 class generaldelta(requirementformatvariant):
314 314 name = b'generaldelta'
315 315
316 316 _requirement = b'generaldelta'
317 317
318 318 default = True
319 319
320 320 description = _(
321 321 b'deltas within internal storage are unable to '
322 322 b'choose optimal revisions; repository is larger and '
323 323 b'slower than it could be; interaction with other '
324 324 b'repositories may require extra network and CPU '
325 325 b'resources, making "hg push" and "hg pull" slower'
326 326 )
327 327
328 328 upgrademessage = _(
329 329 b'repository storage will be able to create '
330 330 b'optimal deltas; new repository data will be '
331 331 b'smaller and read times should decrease; '
332 332 b'interacting with other repositories using this '
333 333 b'storage model should require less network and '
334 334 b'CPU resources, making "hg push" and "hg pull" '
335 335 b'faster'
336 336 )
337 337
338 338
339 339 @registerformatvariant
340 340 class sparserevlog(requirementformatvariant):
341 341 name = b'sparserevlog'
342 342
343 343 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
344 344
345 345 default = True
346 346
347 347 description = _(
348 348 b'in order to limit disk reading and memory usage on older '
349 349 b'version, the span of a delta chain from its root to its '
350 350 b'end is limited, whatever the relevant data in this span. '
351 351 b'This can severly limit Mercurial ability to build good '
352 352 b'chain of delta resulting is much more storage space being '
353 353 b'taken and limit reusability of on disk delta during '
354 354 b'exchange.'
355 355 )
356 356
357 357 upgrademessage = _(
358 358 b'Revlog supports delta chain with more unused data '
359 359 b'between payload. These gaps will be skipped at read '
360 360 b'time. This allows for better delta chains, making a '
361 361 b'better compression and faster exchange with server.'
362 362 )
363 363
364 364
365 365 @registerformatvariant
366 366 class sidedata(requirementformatvariant):
367 367 name = b'sidedata'
368 368
369 369 _requirement = requirements.SIDEDATA_REQUIREMENT
370 370
371 371 default = False
372 372
373 373 description = _(
374 374 b'Allows storage of extra data alongside a revision, '
375 375 b'unlocking various caching options.'
376 376 )
377 377
378 378 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
379 379
380 380
381 381 @registerformatvariant
382 382 class persistentnodemap(requirementformatvariant):
383 383 name = b'persistent-nodemap'
384 384
385 385 _requirement = requirements.NODEMAP_REQUIREMENT
386 386
387 387 default = False
388 388
389 389 description = _(
390 390 b'persist the node -> rev mapping on disk to speedup lookup'
391 391 )
392 392
393 393 upgrademessage = _(b'Speedup revision lookup by node id.')
394 394
395 395
396 396 @registerformatvariant
397 397 class copiessdc(requirementformatvariant):
398 398 name = b'copies-sdc'
399 399
400 400 _requirement = requirements.COPIESSDC_REQUIREMENT
401 401
402 402 default = False
403 403
404 404 description = _(b'Stores copies information alongside changesets.')
405 405
406 406 upgrademessage = _(
407 407 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
408 408 )
409 409
410 410
411 411 @registerformatvariant
412 412 class removecldeltachain(formatvariant):
413 413 name = b'plain-cl-delta'
414 414
415 415 default = True
416 416
417 417 description = _(
418 418 b'changelog storage is using deltas instead of '
419 419 b'raw entries; changelog reading and any '
420 420 b'operation relying on changelog data are slower '
421 421 b'than they could be'
422 422 )
423 423
424 424 upgrademessage = _(
425 425 b'changelog storage will be reformated to '
426 426 b'store raw entries; changelog reading will be '
427 427 b'faster; changelog size may be reduced'
428 428 )
429 429
430 430 @staticmethod
431 431 def fromrepo(repo):
432 432 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
433 433 # changelogs with deltas.
434 434 cl = repo.changelog
435 435 chainbase = cl.chainbase
436 436 return all(rev == chainbase(rev) for rev in cl)
437 437
438 438 @staticmethod
439 439 def fromconfig(repo):
440 440 return True
441 441
442 442
443 443 @registerformatvariant
444 444 class compressionengine(formatvariant):
445 445 name = b'compression'
446 446 default = b'zlib'
447 447
448 448 description = _(
449 449 b'Compresion algorithm used to compress data. '
450 450 b'Some engine are faster than other'
451 451 )
452 452
453 453 upgrademessage = _(
454 454 b'revlog content will be recompressed with the new algorithm.'
455 455 )
456 456
457 457 @classmethod
458 458 def fromrepo(cls, repo):
459 459 # we allow multiple compression engine requirement to co-exist because
460 460 # strickly speaking, revlog seems to support mixed compression style.
461 461 #
462 462 # The compression used for new entries will be "the last one"
463 463 compression = b'zlib'
464 464 for req in repo.requirements:
465 465 prefix = req.startswith
466 466 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 467 compression = req.split(b'-', 2)[2]
468 468 return compression
469 469
470 470 @classmethod
471 471 def fromconfig(cls, repo):
472 472 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 473 # return the first valid value as the selection code would do
474 474 for comp in compengines:
475 475 if comp in util.compengines:
476 476 return comp
477 477
478 478 # no valide compression found lets display it all for clarity
479 479 return b','.join(compengines)
480 480
481 481
482 482 @registerformatvariant
483 483 class compressionlevel(formatvariant):
484 484 name = b'compression-level'
485 485 default = b'default'
486 486
487 487 description = _(b'compression level')
488 488
489 489 upgrademessage = _(b'revlog content will be recompressed')
490 490
491 491 @classmethod
492 492 def fromrepo(cls, repo):
493 493 comp = compressionengine.fromrepo(repo)
494 494 level = None
495 495 if comp == b'zlib':
496 496 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
497 497 elif comp == b'zstd':
498 498 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
499 499 if level is None:
500 500 return b'default'
501 501 return bytes(level)
502 502
503 503 @classmethod
504 504 def fromconfig(cls, repo):
505 505 comp = compressionengine.fromconfig(repo)
506 506 level = None
507 507 if comp == b'zlib':
508 508 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
509 509 elif comp == b'zstd':
510 510 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
511 511 if level is None:
512 512 return b'default'
513 513 return bytes(level)
514 514
515 515
516 516 def finddeficiencies(repo):
517 517 """returns a list of deficiencies that the repo suffer from"""
518 518 deficiencies = []
519 519
520 520 # We could detect lack of revlogv1 and store here, but they were added
521 521 # in 0.9.2 and we don't support upgrading repos without these
522 522 # requirements, so let's not bother.
523 523
524 524 for fv in allformatvariant:
525 525 if not fv.fromrepo(repo):
526 526 deficiencies.append(fv)
527 527
528 528 return deficiencies
529 529
530 530
531 531 # search without '-' to support older form on newer client.
532 532 #
533 533 # We don't enforce backward compatibility for debug command so this
534 534 # might eventually be dropped. However, having to use two different
535 535 # forms in script when comparing result is anoying enough to add
536 536 # backward compatibility for a while.
537 537 legacy_opts_map = {
538 538 b'redeltaparent': b're-delta-parent',
539 539 b'redeltamultibase': b're-delta-multibase',
540 540 b'redeltaall': b're-delta-all',
541 541 b'redeltafulladd': b're-delta-fulladd',
542 542 }
543 543
544 544
545 545 def findoptimizations(repo):
546 546 """Determine optimisation that could be used during upgrade"""
547 547 # These are unconditionally added. There is logic later that figures out
548 548 # which ones to apply.
549 549 optimizations = []
550 550
551 551 optimizations.append(
552 552 improvement(
553 553 name=b're-delta-parent',
554 554 type=optimisation,
555 555 description=_(
556 556 b'deltas within internal storage will be recalculated to '
557 557 b'choose an optimal base revision where this was not '
558 558 b'already done; the size of the repository may shrink and '
559 559 b'various operations may become faster; the first time '
560 560 b'this optimization is performed could slow down upgrade '
561 561 b'execution considerably; subsequent invocations should '
562 562 b'not run noticeably slower'
563 563 ),
564 564 upgrademessage=_(
565 565 b'deltas within internal storage will choose a new '
566 566 b'base revision if needed'
567 567 ),
568 568 )
569 569 )
570 570
571 571 optimizations.append(
572 572 improvement(
573 573 name=b're-delta-multibase',
574 574 type=optimisation,
575 575 description=_(
576 576 b'deltas within internal storage will be recalculated '
577 577 b'against multiple base revision and the smallest '
578 578 b'difference will be used; the size of the repository may '
579 579 b'shrink significantly when there are many merges; this '
580 580 b'optimization will slow down execution in proportion to '
581 581 b'the number of merges in the repository and the amount '
582 582 b'of files in the repository; this slow down should not '
583 583 b'be significant unless there are tens of thousands of '
584 584 b'files and thousands of merges'
585 585 ),
586 586 upgrademessage=_(
587 587 b'deltas within internal storage will choose an '
588 588 b'optimal delta by computing deltas against multiple '
589 589 b'parents; may slow down execution time '
590 590 b'significantly'
591 591 ),
592 592 )
593 593 )
594 594
595 595 optimizations.append(
596 596 improvement(
597 597 name=b're-delta-all',
598 598 type=optimisation,
599 599 description=_(
600 600 b'deltas within internal storage will always be '
601 601 b'recalculated without reusing prior deltas; this will '
602 602 b'likely make execution run several times slower; this '
603 603 b'optimization is typically not needed'
604 604 ),
605 605 upgrademessage=_(
606 606 b'deltas within internal storage will be fully '
607 607 b'recomputed; this will likely drastically slow down '
608 608 b'execution time'
609 609 ),
610 610 )
611 611 )
612 612
613 613 optimizations.append(
614 614 improvement(
615 615 name=b're-delta-fulladd',
616 616 type=optimisation,
617 617 description=_(
618 618 b'every revision will be re-added as if it was new '
619 619 b'content. It will go through the full storage '
620 620 b'mechanism giving extensions a chance to process it '
621 621 b'(eg. lfs). This is similar to "re-delta-all" but even '
622 622 b'slower since more logic is involved.'
623 623 ),
624 624 upgrademessage=_(
625 625 b'each revision will be added as new content to the '
626 626 b'internal storage; this will likely drastically slow '
627 627 b'down execution time, but some extensions might need '
628 628 b'it'
629 629 ),
630 630 )
631 631 )
632 632
633 633 return optimizations
634 634
635 635
636 636 def determineactions(repo, deficiencies, sourcereqs, destreqs):
637 637 """Determine upgrade actions that will be performed.
638 638
639 639 Given a list of improvements as returned by ``finddeficiencies`` and
640 640 ``findoptimizations``, determine the list of upgrade actions that
641 641 will be performed.
642 642
643 643 The role of this function is to filter improvements if needed, apply
644 644 recommended optimizations from the improvements list that make sense,
645 645 etc.
646 646
647 647 Returns a list of action names.
648 648 """
649 649 newactions = []
650 650
651 651 for d in deficiencies:
652 652 name = d._requirement
653 653
654 654 # If the action is a requirement that doesn't show up in the
655 655 # destination requirements, prune the action.
656 656 if name is not None and name not in destreqs:
657 657 continue
658 658
659 659 newactions.append(d)
660 660
661 661 # FUTURE consider adding some optimizations here for certain transitions.
662 662 # e.g. adding generaldelta could schedule parent redeltas.
663 663
664 664 return newactions
665 665
666 666
667 667 def _revlogfrompath(repo, path):
668 668 """Obtain a revlog from a repo path.
669 669
670 670 An instance of the appropriate class is returned.
671 671 """
672 672 if path == b'00changelog.i':
673 673 return changelog.changelog(repo.svfs)
674 674 elif path.endswith(b'00manifest.i'):
675 675 mandir = path[: -len(b'00manifest.i')]
676 676 return manifest.manifestrevlog(repo.svfs, tree=mandir)
677 677 else:
678 678 # reverse of "/".join(("data", path + ".i"))
679 679 return filelog.filelog(repo.svfs, path[5:-2])
680 680
681 681
682 682 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
683 683 """copy all relevant files for `oldrl` into `destrepo` store
684 684
685 685 Files are copied "as is" without any transformation. The copy is performed
686 686 without extra checks. Callers are responsible for making sure the copied
687 687 content is compatible with format of the destination repository.
688 688 """
689 689 oldrl = getattr(oldrl, '_revlog', oldrl)
690 690 newrl = _revlogfrompath(destrepo, unencodedname)
691 691 newrl = getattr(newrl, '_revlog', newrl)
692 692
693 693 oldvfs = oldrl.opener
694 694 newvfs = newrl.opener
695 695 oldindex = oldvfs.join(oldrl.indexfile)
696 696 newindex = newvfs.join(newrl.indexfile)
697 697 olddata = oldvfs.join(oldrl.datafile)
698 698 newdata = newvfs.join(newrl.datafile)
699 699
700 700 with newvfs(newrl.indexfile, b'w'):
701 701 pass # create all the directories
702 702
703 703 util.copyfile(oldindex, newindex)
704 704 copydata = oldrl.opener.exists(oldrl.datafile)
705 705 if copydata:
706 706 util.copyfile(olddata, newdata)
707 707
708 708 if not (
709 709 unencodedname.endswith(b'00changelog.i')
710 710 or unencodedname.endswith(b'00manifest.i')
711 711 ):
712 712 destrepo.svfs.fncache.add(unencodedname)
713 713 if copydata:
714 714 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
715 715
716 716
717 717 UPGRADE_CHANGELOG = object()
718 718 UPGRADE_MANIFEST = object()
719 719 UPGRADE_FILELOG = object()
720 720
721 721 UPGRADE_ALL_REVLOGS = frozenset(
722 722 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
723 723 )
724 724
725 725
726 726 def getsidedatacompanion(srcrepo, dstrepo):
727 727 sidedatacompanion = None
728 728 removedreqs = srcrepo.requirements - dstrepo.requirements
729 729 addedreqs = dstrepo.requirements - srcrepo.requirements
730 730 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
731 731
732 732 def sidedatacompanion(rl, rev):
733 733 rl = getattr(rl, '_revlog', rl)
734 734 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
735 735 return True, (), {}
736 736 return False, (), {}
737 737
738 738 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
739 739 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
740 740 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
741 741 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
742 742 return sidedatacompanion
743 743
744 744
745 745 def matchrevlog(revlogfilter, entry):
746 """check is a revlog is selected for cloning
746 """check if a revlog is selected for cloning.
747
748 In other words, are there any updates which need to be done on revlog
749 or it can be blindly copied.
747 750
748 751 The store entry is checked against the passed filter"""
749 752 if entry.endswith(b'00changelog.i'):
750 753 return UPGRADE_CHANGELOG in revlogfilter
751 754 elif entry.endswith(b'00manifest.i'):
752 755 return UPGRADE_MANIFEST in revlogfilter
753 756 return UPGRADE_FILELOG in revlogfilter
754 757
755 758
756 759 def _clonerevlogs(
757 760 ui,
758 761 srcrepo,
759 762 dstrepo,
760 763 tr,
761 764 deltareuse,
762 765 forcedeltabothparents,
763 766 revlogs=UPGRADE_ALL_REVLOGS,
764 767 ):
765 768 """Copy revlogs between 2 repos."""
766 769 revcount = 0
767 770 srcsize = 0
768 771 srcrawsize = 0
769 772 dstsize = 0
770 773 fcount = 0
771 774 frevcount = 0
772 775 fsrcsize = 0
773 776 frawsize = 0
774 777 fdstsize = 0
775 778 mcount = 0
776 779 mrevcount = 0
777 780 msrcsize = 0
778 781 mrawsize = 0
779 782 mdstsize = 0
780 783 crevcount = 0
781 784 csrcsize = 0
782 785 crawsize = 0
783 786 cdstsize = 0
784 787
785 788 alldatafiles = list(srcrepo.store.walk())
786 789
787 790 # Perform a pass to collect metadata. This validates we can open all
788 791 # source files and allows a unified progress bar to be displayed.
789 792 for unencoded, encoded, size in alldatafiles:
790 793 if unencoded.endswith(b'.d'):
791 794 continue
792 795
793 796 rl = _revlogfrompath(srcrepo, unencoded)
794 797
795 798 info = rl.storageinfo(
796 799 exclusivefiles=True,
797 800 revisionscount=True,
798 801 trackedsize=True,
799 802 storedsize=True,
800 803 )
801 804
802 805 revcount += info[b'revisionscount'] or 0
803 806 datasize = info[b'storedsize'] or 0
804 807 rawsize = info[b'trackedsize'] or 0
805 808
806 809 srcsize += datasize
807 810 srcrawsize += rawsize
808 811
809 812 # This is for the separate progress bars.
810 813 if isinstance(rl, changelog.changelog):
811 814 crevcount += len(rl)
812 815 csrcsize += datasize
813 816 crawsize += rawsize
814 817 elif isinstance(rl, manifest.manifestrevlog):
815 818 mcount += 1
816 819 mrevcount += len(rl)
817 820 msrcsize += datasize
818 821 mrawsize += rawsize
819 822 elif isinstance(rl, filelog.filelog):
820 823 fcount += 1
821 824 frevcount += len(rl)
822 825 fsrcsize += datasize
823 826 frawsize += rawsize
824 827 else:
825 828 error.ProgrammingError(b'unknown revlog type')
826 829
827 830 if not revcount:
828 831 return
829 832
830 833 ui.status(
831 834 _(
832 835 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
833 836 b'%d in changelog)\n'
834 837 )
835 838 % (revcount, frevcount, mrevcount, crevcount)
836 839 )
837 840 ui.status(
838 841 _(b'migrating %s in store; %s tracked data\n')
839 842 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
840 843 )
841 844
842 845 # Used to keep track of progress.
843 846 progress = None
844 847
845 848 def oncopiedrevision(rl, rev, node):
846 849 progress.increment()
847 850
848 851 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
849 852
850 853 # Do the actual copying.
851 854 # FUTURE this operation can be farmed off to worker processes.
852 855 seen = set()
853 856 for unencoded, encoded, size in alldatafiles:
854 857 if unencoded.endswith(b'.d'):
855 858 continue
856 859
857 860 oldrl = _revlogfrompath(srcrepo, unencoded)
858 861
859 862 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
860 863 ui.status(
861 864 _(
862 865 b'finished migrating %d manifest revisions across %d '
863 866 b'manifests; change in size: %s\n'
864 867 )
865 868 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
866 869 )
867 870
868 871 ui.status(
869 872 _(
870 873 b'migrating changelog containing %d revisions '
871 874 b'(%s in store; %s tracked data)\n'
872 875 )
873 876 % (
874 877 crevcount,
875 878 util.bytecount(csrcsize),
876 879 util.bytecount(crawsize),
877 880 )
878 881 )
879 882 seen.add(b'c')
880 883 progress = srcrepo.ui.makeprogress(
881 884 _(b'changelog revisions'), total=crevcount
882 885 )
883 886 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
884 887 ui.status(
885 888 _(
886 889 b'finished migrating %d filelog revisions across %d '
887 890 b'filelogs; change in size: %s\n'
888 891 )
889 892 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
890 893 )
891 894
892 895 ui.status(
893 896 _(
894 897 b'migrating %d manifests containing %d revisions '
895 898 b'(%s in store; %s tracked data)\n'
896 899 )
897 900 % (
898 901 mcount,
899 902 mrevcount,
900 903 util.bytecount(msrcsize),
901 904 util.bytecount(mrawsize),
902 905 )
903 906 )
904 907 seen.add(b'm')
905 908 if progress:
906 909 progress.complete()
907 910 progress = srcrepo.ui.makeprogress(
908 911 _(b'manifest revisions'), total=mrevcount
909 912 )
910 913 elif b'f' not in seen:
911 914 ui.status(
912 915 _(
913 916 b'migrating %d filelogs containing %d revisions '
914 917 b'(%s in store; %s tracked data)\n'
915 918 )
916 919 % (
917 920 fcount,
918 921 frevcount,
919 922 util.bytecount(fsrcsize),
920 923 util.bytecount(frawsize),
921 924 )
922 925 )
923 926 seen.add(b'f')
924 927 if progress:
925 928 progress.complete()
926 929 progress = srcrepo.ui.makeprogress(
927 930 _(b'file revisions'), total=frevcount
928 931 )
929 932
930 933 if matchrevlog(revlogs, unencoded):
931 934 ui.note(
932 935 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
933 936 )
934 937 newrl = _revlogfrompath(dstrepo, unencoded)
935 938 oldrl.clone(
936 939 tr,
937 940 newrl,
938 941 addrevisioncb=oncopiedrevision,
939 942 deltareuse=deltareuse,
940 943 forcedeltabothparents=forcedeltabothparents,
941 944 sidedatacompanion=sidedatacompanion,
942 945 )
943 946 else:
944 947 msg = _(b'blindly copying %s containing %i revisions\n')
945 948 ui.note(msg % (unencoded, len(oldrl)))
946 949 _copyrevlog(tr, dstrepo, oldrl, unencoded)
947 950
948 951 newrl = _revlogfrompath(dstrepo, unencoded)
949 952
950 953 info = newrl.storageinfo(storedsize=True)
951 954 datasize = info[b'storedsize'] or 0
952 955
953 956 dstsize += datasize
954 957
955 958 if isinstance(newrl, changelog.changelog):
956 959 cdstsize += datasize
957 960 elif isinstance(newrl, manifest.manifestrevlog):
958 961 mdstsize += datasize
959 962 else:
960 963 fdstsize += datasize
961 964
962 965 progress.complete()
963 966
964 967 ui.status(
965 968 _(
966 969 b'finished migrating %d changelog revisions; change in size: '
967 970 b'%s\n'
968 971 )
969 972 % (crevcount, util.bytecount(cdstsize - csrcsize))
970 973 )
971 974
972 975 ui.status(
973 976 _(
974 977 b'finished migrating %d total revisions; total change in store '
975 978 b'size: %s\n'
976 979 )
977 980 % (revcount, util.bytecount(dstsize - srcsize))
978 981 )
979 982
980 983
981 984 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
982 985 """Determine whether to copy a store file during upgrade.
983 986
984 987 This function is called when migrating store files from ``srcrepo`` to
985 988 ``dstrepo`` as part of upgrading a repository.
986 989
987 990 Args:
988 991 srcrepo: repo we are copying from
989 992 dstrepo: repo we are copying to
990 993 requirements: set of requirements for ``dstrepo``
991 994 path: store file being examined
992 995 mode: the ``ST_MODE`` file type of ``path``
993 996 st: ``stat`` data structure for ``path``
994 997
995 998 Function should return ``True`` if the file is to be copied.
996 999 """
997 1000 # Skip revlogs.
998 1001 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
999 1002 return False
1000 1003 # Skip transaction related files.
1001 1004 if path.startswith(b'undo'):
1002 1005 return False
1003 1006 # Only copy regular files.
1004 1007 if mode != stat.S_IFREG:
1005 1008 return False
1006 1009 # Skip other skipped files.
1007 1010 if path in (b'lock', b'fncache'):
1008 1011 return False
1009 1012
1010 1013 return True
1011 1014
1012 1015
1013 1016 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1014 1017 """Hook point for extensions to perform additional actions during upgrade.
1015 1018
1016 1019 This function is called after revlogs and store files have been copied but
1017 1020 before the new store is swapped into the original location.
1018 1021 """
1019 1022
1020 1023
1021 1024 def _upgraderepo(
1022 1025 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1023 1026 ):
1024 1027 """Do the low-level work of upgrading a repository.
1025 1028
1026 1029 The upgrade is effectively performed as a copy between a source
1027 1030 repository and a temporary destination repository.
1028 1031
1029 1032 The source repository is unmodified for as long as possible so the
1030 1033 upgrade can abort at any time without causing loss of service for
1031 1034 readers and without corrupting the source repository.
1032 1035 """
1033 1036 assert srcrepo.currentwlock()
1034 1037 assert dstrepo.currentwlock()
1035 1038
1036 1039 ui.status(
1037 1040 _(
1038 1041 b'(it is safe to interrupt this process any time before '
1039 1042 b'data migration completes)\n'
1040 1043 )
1041 1044 )
1042 1045
1043 1046 if b're-delta-all' in actions:
1044 1047 deltareuse = revlog.revlog.DELTAREUSENEVER
1045 1048 elif b're-delta-parent' in actions:
1046 1049 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1047 1050 elif b're-delta-multibase' in actions:
1048 1051 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1049 1052 elif b're-delta-fulladd' in actions:
1050 1053 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1051 1054 else:
1052 1055 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1053 1056
1054 1057 with dstrepo.transaction(b'upgrade') as tr:
1055 1058 _clonerevlogs(
1056 1059 ui,
1057 1060 srcrepo,
1058 1061 dstrepo,
1059 1062 tr,
1060 1063 deltareuse,
1061 1064 b're-delta-multibase' in actions,
1062 1065 revlogs=revlogs,
1063 1066 )
1064 1067
1065 1068 # Now copy other files in the store directory.
1066 1069 # The sorted() makes execution deterministic.
1067 1070 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1068 1071 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1069 1072 continue
1070 1073
1071 1074 srcrepo.ui.status(_(b'copying %s\n') % p)
1072 1075 src = srcrepo.store.rawvfs.join(p)
1073 1076 dst = dstrepo.store.rawvfs.join(p)
1074 1077 util.copyfile(src, dst, copystat=True)
1075 1078
1076 1079 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1077 1080
1078 1081 ui.status(_(b'data fully migrated to temporary repository\n'))
1079 1082
1080 1083 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1081 1084 backupvfs = vfsmod.vfs(backuppath)
1082 1085
1083 1086 # Make a backup of requires file first, as it is the first to be modified.
1084 1087 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1085 1088
1086 1089 # We install an arbitrary requirement that clients must not support
1087 1090 # as a mechanism to lock out new clients during the data swap. This is
1088 1091 # better than allowing a client to continue while the repository is in
1089 1092 # an inconsistent state.
1090 1093 ui.status(
1091 1094 _(
1092 1095 b'marking source repository as being upgraded; clients will be '
1093 1096 b'unable to read from repository\n'
1094 1097 )
1095 1098 )
1096 1099 scmutil.writereporequirements(
1097 1100 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1098 1101 )
1099 1102
1100 1103 ui.status(_(b'starting in-place swap of repository data\n'))
1101 1104 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1102 1105
1103 1106 # Now swap in the new store directory. Doing it as a rename should make
1104 1107 # the operation nearly instantaneous and atomic (at least in well-behaved
1105 1108 # environments).
1106 1109 ui.status(_(b'replacing store...\n'))
1107 1110 tstart = util.timer()
1108 1111 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1109 1112 util.rename(dstrepo.spath, srcrepo.spath)
1110 1113 elapsed = util.timer() - tstart
1111 1114 ui.status(
1112 1115 _(
1113 1116 b'store replacement complete; repository was inconsistent for '
1114 1117 b'%0.1fs\n'
1115 1118 )
1116 1119 % elapsed
1117 1120 )
1118 1121
1119 1122 # We first write the requirements file. Any new requirements will lock
1120 1123 # out legacy clients.
1121 1124 ui.status(
1122 1125 _(
1123 1126 b'finalizing requirements file and making repository readable '
1124 1127 b'again\n'
1125 1128 )
1126 1129 )
1127 1130 scmutil.writereporequirements(srcrepo, requirements)
1128 1131
1129 1132 # The lock file from the old store won't be removed because nothing has a
1130 1133 # reference to its new location. So clean it up manually. Alternatively, we
1131 1134 # could update srcrepo.svfs and other variables to point to the new
1132 1135 # location. This is simpler.
1133 1136 backupvfs.unlink(b'store/lock')
1134 1137
1135 1138 return backuppath
1136 1139
1137 1140
1138 1141 def upgraderepo(
1139 1142 ui,
1140 1143 repo,
1141 1144 run=False,
1142 1145 optimize=None,
1143 1146 backup=True,
1144 1147 manifest=None,
1145 1148 changelog=None,
1146 1149 ):
1147 1150 """Upgrade a repository in place."""
1148 1151 if optimize is None:
1149 1152 optimize = []
1150 1153 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1151 1154 repo = repo.unfiltered()
1152 1155
1153 1156 revlogs = set(UPGRADE_ALL_REVLOGS)
1154 1157 specentries = ((b'c', changelog), (b'm', manifest))
1155 1158 specified = [(y, x) for (y, x) in specentries if x is not None]
1156 1159 if specified:
1157 1160 # we have some limitation on revlogs to be recloned
1158 1161 if any(x for y, x in specified):
1159 1162 revlogs = set()
1160 1163 for r, enabled in specified:
1161 1164 if enabled:
1162 1165 if r == b'c':
1163 1166 revlogs.add(UPGRADE_CHANGELOG)
1164 1167 elif r == b'm':
1165 1168 revlogs.add(UPGRADE_MANIFEST)
1166 1169 else:
1167 1170 # none are enabled
1168 1171 for r, __ in specified:
1169 1172 if r == b'c':
1170 1173 revlogs.discard(UPGRADE_CHANGELOG)
1171 1174 elif r == b'm':
1172 1175 revlogs.discard(UPGRADE_MANIFEST)
1173 1176
1174 1177 # Ensure the repository can be upgraded.
1175 1178 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1176 1179 if missingreqs:
1177 1180 raise error.Abort(
1178 1181 _(b'cannot upgrade repository; requirement missing: %s')
1179 1182 % _(b', ').join(sorted(missingreqs))
1180 1183 )
1181 1184
1182 1185 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1183 1186 if blockedreqs:
1184 1187 raise error.Abort(
1185 1188 _(
1186 1189 b'cannot upgrade repository; unsupported source '
1187 1190 b'requirement: %s'
1188 1191 )
1189 1192 % _(b', ').join(sorted(blockedreqs))
1190 1193 )
1191 1194
1192 1195 # FUTURE there is potentially a need to control the wanted requirements via
1193 1196 # command arguments or via an extension hook point.
1194 1197 newreqs = localrepo.newreporequirements(
1195 1198 repo.ui, localrepo.defaultcreateopts(repo.ui)
1196 1199 )
1197 1200 newreqs.update(preservedrequirements(repo))
1198 1201
1199 1202 noremovereqs = (
1200 1203 repo.requirements - newreqs - supportremovedrequirements(repo)
1201 1204 )
1202 1205 if noremovereqs:
1203 1206 raise error.Abort(
1204 1207 _(
1205 1208 b'cannot upgrade repository; requirement would be '
1206 1209 b'removed: %s'
1207 1210 )
1208 1211 % _(b', ').join(sorted(noremovereqs))
1209 1212 )
1210 1213
1211 1214 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1212 1215 if noaddreqs:
1213 1216 raise error.Abort(
1214 1217 _(
1215 1218 b'cannot upgrade repository; do not support adding '
1216 1219 b'requirement: %s'
1217 1220 )
1218 1221 % _(b', ').join(sorted(noaddreqs))
1219 1222 )
1220 1223
1221 1224 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1222 1225 if unsupportedreqs:
1223 1226 raise error.Abort(
1224 1227 _(
1225 1228 b'cannot upgrade repository; do not support '
1226 1229 b'destination requirement: %s'
1227 1230 )
1228 1231 % _(b', ').join(sorted(unsupportedreqs))
1229 1232 )
1230 1233
1231 1234 # Find and validate all improvements that can be made.
1232 1235 alloptimizations = findoptimizations(repo)
1233 1236
1234 1237 # Apply and Validate arguments.
1235 1238 optimizations = []
1236 1239 for o in alloptimizations:
1237 1240 if o.name in optimize:
1238 1241 optimizations.append(o)
1239 1242 optimize.discard(o.name)
1240 1243
1241 1244 if optimize: # anything left is unknown
1242 1245 raise error.Abort(
1243 1246 _(b'unknown optimization action requested: %s')
1244 1247 % b', '.join(sorted(optimize)),
1245 1248 hint=_(b'run without arguments to see valid optimizations'),
1246 1249 )
1247 1250
1248 1251 deficiencies = finddeficiencies(repo)
1249 1252 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1250 1253 actions.extend(
1251 1254 o
1252 1255 for o in sorted(optimizations)
1253 1256 # determineactions could have added optimisation
1254 1257 if o not in actions
1255 1258 )
1256 1259
1257 1260 removedreqs = repo.requirements - newreqs
1258 1261 addedreqs = newreqs - repo.requirements
1259 1262
1260 1263 if revlogs != UPGRADE_ALL_REVLOGS:
1261 1264 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1262 1265 if incompatible:
1263 1266 msg = _(
1264 1267 b'ignoring revlogs selection flags, format requirements '
1265 1268 b'change: %s\n'
1266 1269 )
1267 1270 ui.warn(msg % b', '.join(sorted(incompatible)))
1268 1271 revlogs = UPGRADE_ALL_REVLOGS
1269 1272
1270 1273 def write_labeled(l, label):
1271 1274 first = True
1272 1275 for r in sorted(l):
1273 1276 if not first:
1274 1277 ui.write(b', ')
1275 1278 ui.write(r, label=label)
1276 1279 first = False
1277 1280
1278 1281 def printrequirements():
1279 1282 ui.write(_(b'requirements\n'))
1280 1283 ui.write(_(b' preserved: '))
1281 1284 write_labeled(
1282 1285 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1283 1286 )
1284 1287 ui.write((b'\n'))
1285 1288 removed = repo.requirements - newreqs
1286 1289 if repo.requirements - newreqs:
1287 1290 ui.write(_(b' removed: '))
1288 1291 write_labeled(removed, "upgrade-repo.requirement.removed")
1289 1292 ui.write((b'\n'))
1290 1293 added = newreqs - repo.requirements
1291 1294 if added:
1292 1295 ui.write(_(b' added: '))
1293 1296 write_labeled(added, "upgrade-repo.requirement.added")
1294 1297 ui.write((b'\n'))
1295 1298 ui.write(b'\n')
1296 1299
1297 1300 def printoptimisations():
1298 1301 optimisations = [a for a in actions if a.type == optimisation]
1299 1302 optimisations.sort(key=lambda a: a.name)
1300 1303 if optimisations:
1301 1304 ui.write(_(b'optimisations: '))
1302 1305 write_labeled(
1303 1306 [a.name for a in optimisations],
1304 1307 "upgrade-repo.optimisation.performed",
1305 1308 )
1306 1309 ui.write(b'\n\n')
1307 1310
1308 1311 def printupgradeactions():
1309 1312 for a in actions:
1310 1313 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1311 1314
1312 1315 if not run:
1313 1316 fromconfig = []
1314 1317 onlydefault = []
1315 1318
1316 1319 for d in deficiencies:
1317 1320 if d.fromconfig(repo):
1318 1321 fromconfig.append(d)
1319 1322 elif d.default:
1320 1323 onlydefault.append(d)
1321 1324
1322 1325 if fromconfig or onlydefault:
1323 1326
1324 1327 if fromconfig:
1325 1328 ui.status(
1326 1329 _(
1327 1330 b'repository lacks features recommended by '
1328 1331 b'current config options:\n\n'
1329 1332 )
1330 1333 )
1331 1334 for i in fromconfig:
1332 1335 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1333 1336
1334 1337 if onlydefault:
1335 1338 ui.status(
1336 1339 _(
1337 1340 b'repository lacks features used by the default '
1338 1341 b'config options:\n\n'
1339 1342 )
1340 1343 )
1341 1344 for i in onlydefault:
1342 1345 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1343 1346
1344 1347 ui.status(b'\n')
1345 1348 else:
1346 1349 ui.status(
1347 1350 _(
1348 1351 b'(no feature deficiencies found in existing '
1349 1352 b'repository)\n'
1350 1353 )
1351 1354 )
1352 1355
1353 1356 ui.status(
1354 1357 _(
1355 1358 b'performing an upgrade with "--run" will make the following '
1356 1359 b'changes:\n\n'
1357 1360 )
1358 1361 )
1359 1362
1360 1363 printrequirements()
1361 1364 printoptimisations()
1362 1365 printupgradeactions()
1363 1366
1364 1367 unusedoptimize = [i for i in alloptimizations if i not in actions]
1365 1368
1366 1369 if unusedoptimize:
1367 1370 ui.status(
1368 1371 _(
1369 1372 b'additional optimizations are available by specifying '
1370 1373 b'"--optimize <name>":\n\n'
1371 1374 )
1372 1375 )
1373 1376 for i in unusedoptimize:
1374 1377 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1375 1378 return
1376 1379
1377 1380 # Else we're in the run=true case.
1378 1381 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1379 1382 printrequirements()
1380 1383 printoptimisations()
1381 1384 printupgradeactions()
1382 1385
1383 1386 upgradeactions = [a.name for a in actions]
1384 1387
1385 1388 ui.status(_(b'beginning upgrade...\n'))
1386 1389 with repo.wlock(), repo.lock():
1387 1390 ui.status(_(b'repository locked and read-only\n'))
1388 1391 # Our strategy for upgrading the repository is to create a new,
1389 1392 # temporary repository, write data to it, then do a swap of the
1390 1393 # data. There are less heavyweight ways to do this, but it is easier
1391 1394 # to create a new repo object than to instantiate all the components
1392 1395 # (like the store) separately.
1393 1396 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1394 1397 backuppath = None
1395 1398 try:
1396 1399 ui.status(
1397 1400 _(
1398 1401 b'creating temporary repository to stage migrated '
1399 1402 b'data: %s\n'
1400 1403 )
1401 1404 % tmppath
1402 1405 )
1403 1406
1404 1407 # clone ui without using ui.copy because repo.ui is protected
1405 1408 repoui = repo.ui.__class__(repo.ui)
1406 1409 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1407 1410
1408 1411 with dstrepo.wlock(), dstrepo.lock():
1409 1412 backuppath = _upgraderepo(
1410 1413 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1411 1414 )
1412 1415 if not (backup or backuppath is None):
1413 1416 ui.status(
1414 1417 _(b'removing old repository content%s\n') % backuppath
1415 1418 )
1416 1419 repo.vfs.rmtree(backuppath, forcibly=True)
1417 1420 backuppath = None
1418 1421
1419 1422 finally:
1420 1423 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1421 1424 repo.vfs.rmtree(tmppath, forcibly=True)
1422 1425
1423 1426 if backuppath and not ui.quiet:
1424 1427 ui.warn(
1425 1428 _(b'copy of old repository backed up at %s\n') % backuppath
1426 1429 )
1427 1430 ui.warn(
1428 1431 _(
1429 1432 b'the old repository will not be deleted; remove '
1430 1433 b'it to free up disk space once the upgraded '
1431 1434 b'repository is verified\n'
1432 1435 )
1433 1436 )
General Comments 0
You need to be logged in to leave comments. Login now