##// END OF EJS Templates
upgrade: extract code in its own module...
Pierre-Yves David -
r31864:70d163b8 default
parent child Browse files
Show More
@@ -62,6 +62,7 from . import (
62 62 streamclone,
63 63 templater,
64 64 treediscovery,
65 upgrade,
65 66 util,
66 67 vfs as vfsmod,
67 68 )
@@ -2072,7 +2073,7 def debugupgraderepo(ui, repo, run=False
2072 2073 should complete almost instantaneously and the chances of a consumer being
2073 2074 unable to access the repository should be low.
2074 2075 """
2075 return repair.upgraderepo(ui, repo, run=run, optimize=optimize)
2076 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2076 2077
2077 2078 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2078 2079 inferrepo=True)
This diff has been collapsed as it changes many lines, (742 lines changed) Show them Hide them
@@ -10,23 +10,16 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 import stat
14 import tempfile
15 13
16 14 from .i18n import _
17 15 from .node import short
18 16 from . import (
19 17 bundle2,
20 18 changegroup,
21 changelog,
22 19 error,
23 20 exchange,
24 manifest,
25 21 obsolete,
26 revlog,
27 scmutil,
28 22 util,
29 vfs as vfsmod,
30 23 )
31 24
32 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
@@ -359,738 +352,3 def deleteobsmarkers(obsstore, indices):
359 352 newobsstorefile.write(bytes)
360 353 newobsstorefile.close()
361 354 return n
362
363 def upgraderequiredsourcerequirements(repo):
364 """Obtain requirements required to be present to upgrade a repo.
365
366 An upgrade will not be allowed if the repository doesn't have the
367 requirements returned by this function.
368 """
369 return set([
370 # Introduced in Mercurial 0.9.2.
371 'revlogv1',
372 # Introduced in Mercurial 0.9.2.
373 'store',
374 ])
375
376 def upgradeblocksourcerequirements(repo):
377 """Obtain requirements that will prevent an upgrade from occurring.
378
379 An upgrade cannot be performed if the source repository contains a
380 requirements in the returned set.
381 """
382 return set([
383 # The upgrade code does not yet support these experimental features.
384 # This is an artificial limitation.
385 'manifestv2',
386 'treemanifest',
387 # This was a precursor to generaldelta and was never enabled by default.
388 # It should (hopefully) not exist in the wild.
389 'parentdelta',
390 # Upgrade should operate on the actual store, not the shared link.
391 'shared',
392 ])
393
394 def upgradesupportremovedrequirements(repo):
395 """Obtain requirements that can be removed during an upgrade.
396
397 If an upgrade were to create a repository that dropped a requirement,
398 the dropped requirement must appear in the returned set for the upgrade
399 to be allowed.
400 """
401 return set()
402
403 def upgradesupporteddestrequirements(repo):
404 """Obtain requirements that upgrade supports in the destination.
405
406 If the result of the upgrade would create requirements not in this set,
407 the upgrade is disallowed.
408
409 Extensions should monkeypatch this to add their custom requirements.
410 """
411 return set([
412 'dotencode',
413 'fncache',
414 'generaldelta',
415 'revlogv1',
416 'store',
417 ])
418
419 def upgradeallowednewrequirements(repo):
420 """Obtain requirements that can be added to a repository during upgrade.
421
422 This is used to disallow proposed requirements from being added when
423 they weren't present before.
424
425 We use a list of allowed requirement additions instead of a list of known
426 bad additions because the whitelist approach is safer and will prevent
427 future, unknown requirements from accidentally being added.
428 """
429 return set([
430 'dotencode',
431 'fncache',
432 'generaldelta',
433 ])
434
435 deficiency = 'deficiency'
436 optimisation = 'optimization'
437
438 class upgradeimprovement(object):
439 """Represents an improvement that can be made as part of an upgrade.
440
441 The following attributes are defined on each instance:
442
443 name
444 Machine-readable string uniquely identifying this improvement. It
445 will be mapped to an action later in the upgrade process.
446
447 type
448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 problem. An optimization is an action (sometimes optional) that
450 can be taken to further improve the state of the repository.
451
452 description
453 Message intended for humans explaining the improvement in more detail,
454 including the implications of it. For ``deficiency`` types, should be
455 worded in the present tense. For ``optimisation`` types, should be
456 worded in the future tense.
457
458 upgrademessage
459 Message intended for humans explaining what an upgrade addressing this
460 issue will do. Should be worded in the future tense.
461
462 fromdefault (``deficiency`` types only)
463 Boolean indicating whether the current (deficient) state deviates
464 from Mercurial's default configuration.
465
466 fromconfig (``deficiency`` types only)
467 Boolean indicating whether the current (deficient) state deviates
468 from the current Mercurial configuration.
469 """
470 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 self.name = name
472 self.type = type
473 self.description = description
474 self.upgrademessage = upgrademessage
475
476 for k, v in kwargs.items():
477 setattr(self, k, v)
478
479 def upgradefindimprovements(repo):
480 """Determine improvements that can be made to the repo during upgrade.
481
482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 and optimizations.
484 """
485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 from . import localrepo
487
488 newreporeqs = localrepo.newreporequirements(repo)
489
490 improvements = []
491
492 # We could detect lack of revlogv1 and store here, but they were added
493 # in 0.9.2 and we don't support upgrading repos without these
494 # requirements, so let's not bother.
495
496 if 'fncache' not in repo.requirements:
497 improvements.append(upgradeimprovement(
498 name='fncache',
499 type=deficiency,
500 description=_('long and reserved filenames may not work correctly; '
501 'repository performance is sub-optimal'),
502 upgrademessage=_('repository will be more resilient to storing '
503 'certain paths and performance of certain '
504 'operations should be improved'),
505 fromdefault=True,
506 fromconfig='fncache' in newreporeqs))
507
508 if 'dotencode' not in repo.requirements:
509 improvements.append(upgradeimprovement(
510 name='dotencode',
511 type=deficiency,
512 description=_('storage of filenames beginning with a period or '
513 'space may not work correctly'),
514 upgrademessage=_('repository will be better able to store files '
515 'beginning with a space or period'),
516 fromdefault=True,
517 fromconfig='dotencode' in newreporeqs))
518
519 if 'generaldelta' not in repo.requirements:
520 improvements.append(upgradeimprovement(
521 name='generaldelta',
522 type=deficiency,
523 description=_('deltas within internal storage are unable to '
524 'choose optimal revisions; repository is larger and '
525 'slower than it could be; interaction with other '
526 'repositories may require extra network and CPU '
527 'resources, making "hg push" and "hg pull" slower'),
528 upgrademessage=_('repository storage will be able to create '
529 'optimal deltas; new repository data will be '
530 'smaller and read times should decrease; '
531 'interacting with other repositories using this '
532 'storage model should require less network and '
533 'CPU resources, making "hg push" and "hg pull" '
534 'faster'),
535 fromdefault=True,
536 fromconfig='generaldelta' in newreporeqs))
537
538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # changelogs with deltas.
540 cl = repo.changelog
541 for rev in cl:
542 chainbase = cl.chainbase(rev)
543 if chainbase != rev:
544 improvements.append(upgradeimprovement(
545 name='removecldeltachain',
546 type=deficiency,
547 description=_('changelog storage is using deltas instead of '
548 'raw entries; changelog reading and any '
549 'operation relying on changelog data are slower '
550 'than they could be'),
551 upgrademessage=_('changelog storage will be reformated to '
552 'store raw entries; changelog reading will be '
553 'faster; changelog size may be reduced'),
554 fromdefault=True,
555 fromconfig=True))
556 break
557
558 # Now for the optimizations.
559
560 # These are unconditionally added. There is logic later that figures out
561 # which ones to apply.
562
563 improvements.append(upgradeimprovement(
564 name='redeltaparent',
565 type=optimisation,
566 description=_('deltas within internal storage will be recalculated to '
567 'choose an optimal base revision where this was not '
568 'already done; the size of the repository may shrink and '
569 'various operations may become faster; the first time '
570 'this optimization is performed could slow down upgrade '
571 'execution considerably; subsequent invocations should '
572 'not run noticeably slower'),
573 upgrademessage=_('deltas within internal storage will choose a new '
574 'base revision if needed')))
575
576 improvements.append(upgradeimprovement(
577 name='redeltamultibase',
578 type=optimisation,
579 description=_('deltas within internal storage will be recalculated '
580 'against multiple base revision and the smallest '
581 'difference will be used; the size of the repository may '
582 'shrink significantly when there are many merges; this '
583 'optimization will slow down execution in proportion to '
584 'the number of merges in the repository and the amount '
585 'of files in the repository; this slow down should not '
586 'be significant unless there are tens of thousands of '
587 'files and thousands of merges'),
588 upgrademessage=_('deltas within internal storage will choose an '
589 'optimal delta by computing deltas against multiple '
590 'parents; may slow down execution time '
591 'significantly')))
592
593 improvements.append(upgradeimprovement(
594 name='redeltaall',
595 type=optimisation,
596 description=_('deltas within internal storage will always be '
597 'recalculated without reusing prior deltas; this will '
598 'likely make execution run several times slower; this '
599 'optimization is typically not needed'),
600 upgrademessage=_('deltas within internal storage will be fully '
601 'recomputed; this will likely drastically slow down '
602 'execution time')))
603
604 return improvements
605
606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 optimize):
608 """Determine upgrade actions that will be performed.
609
610 Given a list of improvements as returned by ``upgradefindimprovements``,
611 determine the list of upgrade actions that will be performed.
612
613 The role of this function is to filter improvements if needed, apply
614 recommended optimizations from the improvements list that make sense,
615 etc.
616
617 Returns a list of action names.
618 """
619 newactions = []
620
621 knownreqs = upgradesupporteddestrequirements(repo)
622
623 for i in improvements:
624 name = i.name
625
626 # If the action is a requirement that doesn't show up in the
627 # destination requirements, prune the action.
628 if name in knownreqs and name not in destreqs:
629 continue
630
631 if i.type == deficiency:
632 newactions.append(name)
633
634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635
636 # FUTURE consider adding some optimizations here for certain transitions.
637 # e.g. adding generaldelta could schedule parent redeltas.
638
639 return newactions
640
641 def _revlogfrompath(repo, path):
642 """Obtain a revlog from a repo path.
643
644 An instance of the appropriate class is returned.
645 """
646 if path == '00changelog.i':
647 return changelog.changelog(repo.svfs)
648 elif path.endswith('00manifest.i'):
649 mandir = path[:-len('00manifest.i')]
650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 else:
652 # Filelogs don't do anything special with settings. So we can use a
653 # vanilla revlog.
654 return revlog.revlog(repo.svfs, path)
655
656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 """Copy revlogs between 2 repos."""
658 revcount = 0
659 srcsize = 0
660 srcrawsize = 0
661 dstsize = 0
662 fcount = 0
663 frevcount = 0
664 fsrcsize = 0
665 frawsize = 0
666 fdstsize = 0
667 mcount = 0
668 mrevcount = 0
669 msrcsize = 0
670 mrawsize = 0
671 mdstsize = 0
672 crevcount = 0
673 csrcsize = 0
674 crawsize = 0
675 cdstsize = 0
676
677 # Perform a pass to collect metadata. This validates we can open all
678 # source files and allows a unified progress bar to be displayed.
679 for unencoded, encoded, size in srcrepo.store.walk():
680 if unencoded.endswith('.d'):
681 continue
682
683 rl = _revlogfrompath(srcrepo, unencoded)
684 revcount += len(rl)
685
686 datasize = 0
687 rawsize = 0
688 idx = rl.index
689 for rev in rl:
690 e = idx[rev]
691 datasize += e[1]
692 rawsize += e[2]
693
694 srcsize += datasize
695 srcrawsize += rawsize
696
697 # This is for the separate progress bars.
698 if isinstance(rl, changelog.changelog):
699 crevcount += len(rl)
700 csrcsize += datasize
701 crawsize += rawsize
702 elif isinstance(rl, manifest.manifestrevlog):
703 mcount += 1
704 mrevcount += len(rl)
705 msrcsize += datasize
706 mrawsize += rawsize
707 elif isinstance(rl, revlog.revlog):
708 fcount += 1
709 frevcount += len(rl)
710 fsrcsize += datasize
711 frawsize += rawsize
712
713 if not revcount:
714 return
715
716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 '%d in changelog)\n') %
718 (revcount, frevcount, mrevcount, crevcount))
719 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721
722 # Used to keep track of progress.
723 progress = []
724 def oncopiedrevision(rl, rev, node):
725 progress[1] += 1
726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727
728 # Do the actual copying.
729 # FUTURE this operation can be farmed off to worker processes.
730 seen = set()
731 for unencoded, encoded, size in srcrepo.store.walk():
732 if unencoded.endswith('.d'):
733 continue
734
735 oldrl = _revlogfrompath(srcrepo, unencoded)
736 newrl = _revlogfrompath(dstrepo, unencoded)
737
738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 ui.write(_('finished migrating %d manifest revisions across %d '
740 'manifests; change in size: %s\n') %
741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742
743 ui.write(_('migrating changelog containing %d revisions '
744 '(%s in store; %s tracked data)\n') %
745 (crevcount, util.bytecount(csrcsize),
746 util.bytecount(crawsize)))
747 seen.add('c')
748 progress[:] = [_('changelog revisions'), 0, crevcount]
749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 ui.write(_('finished migrating %d filelog revisions across %d '
751 'filelogs; change in size: %s\n') %
752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753
754 ui.write(_('migrating %d manifests containing %d revisions '
755 '(%s in store; %s tracked data)\n') %
756 (mcount, mrevcount, util.bytecount(msrcsize),
757 util.bytecount(mrawsize)))
758 seen.add('m')
759 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 elif 'f' not in seen:
761 ui.write(_('migrating %d filelogs containing %d revisions '
762 '(%s in store; %s tracked data)\n') %
763 (fcount, frevcount, util.bytecount(fsrcsize),
764 util.bytecount(frawsize)))
765 seen.add('f')
766 progress[:] = [_('file revisions'), 0, frevcount]
767
768 ui.progress(progress[0], progress[1], total=progress[2])
769
770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 deltareuse=deltareuse,
773 aggressivemergedeltas=aggressivemergedeltas)
774
775 datasize = 0
776 idx = newrl.index
777 for rev in newrl:
778 datasize += idx[rev][1]
779
780 dstsize += datasize
781
782 if isinstance(newrl, changelog.changelog):
783 cdstsize += datasize
784 elif isinstance(newrl, manifest.manifestrevlog):
785 mdstsize += datasize
786 else:
787 fdstsize += datasize
788
789 ui.progress(progress[0], None)
790
791 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793
794 ui.write(_('finished migrating %d total revisions; total change in store '
795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796
797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 """Determine whether to copy a store file during upgrade.
799
800 This function is called when migrating store files from ``srcrepo`` to
801 ``dstrepo`` as part of upgrading a repository.
802
803 Args:
804 srcrepo: repo we are copying from
805 dstrepo: repo we are copying to
806 requirements: set of requirements for ``dstrepo``
807 path: store file being examined
808 mode: the ``ST_MODE`` file type of ``path``
809 st: ``stat`` data structure for ``path``
810
811 Function should return ``True`` if the file is to be copied.
812 """
813 # Skip revlogs.
814 if path.endswith(('.i', '.d')):
815 return False
816 # Skip transaction related files.
817 if path.startswith('undo'):
818 return False
819 # Only copy regular files.
820 if mode != stat.S_IFREG:
821 return False
822 # Skip other skipped files.
823 if path in ('lock', 'fncache'):
824 return False
825
826 return True
827
828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 """Hook point for extensions to perform additional actions during upgrade.
830
831 This function is called after revlogs and store files have been copied but
832 before the new store is swapped into the original location.
833 """
834
835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 """Do the low-level work of upgrading a repository.
837
838 The upgrade is effectively performed as a copy between a source
839 repository and a temporary destination repository.
840
841 The source repository is unmodified for as long as possible so the
842 upgrade can abort at any time without causing loss of service for
843 readers and without corrupting the source repository.
844 """
845 assert srcrepo.currentwlock()
846 assert dstrepo.currentwlock()
847
848 ui.write(_('(it is safe to interrupt this process any time before '
849 'data migration completes)\n'))
850
851 if 'redeltaall' in actions:
852 deltareuse = revlog.revlog.DELTAREUSENEVER
853 elif 'redeltaparent' in actions:
854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 elif 'redeltamultibase' in actions:
856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 else:
858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859
860 with dstrepo.transaction('upgrade') as tr:
861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 'redeltamultibase' in actions)
863
864 # Now copy other files in the store directory.
865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 p, kind, st):
868 continue
869
870 srcrepo.ui.write(_('copying %s\n') % p)
871 src = srcrepo.store.vfs.join(p)
872 dst = dstrepo.store.vfs.join(p)
873 util.copyfile(src, dst, copystat=True)
874
875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876
877 ui.write(_('data fully migrated to temporary repository\n'))
878
879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 backupvfs = vfsmod.vfs(backuppath)
881
882 # Make a backup of requires file first, as it is the first to be modified.
883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884
885 # We install an arbitrary requirement that clients must not support
886 # as a mechanism to lock out new clients during the data swap. This is
887 # better than allowing a client to continue while the repository is in
888 # an inconsistent state.
889 ui.write(_('marking source repository as being upgraded; clients will be '
890 'unable to read from repository\n'))
891 scmutil.writerequires(srcrepo.vfs,
892 srcrepo.requirements | set(['upgradeinprogress']))
893
894 ui.write(_('starting in-place swap of repository data\n'))
895 ui.write(_('replaced files will be backed up at %s\n') %
896 backuppath)
897
898 # Now swap in the new store directory. Doing it as a rename should make
899 # the operation nearly instantaneous and atomic (at least in well-behaved
900 # environments).
901 ui.write(_('replacing store...\n'))
902 tstart = util.timer()
903 util.rename(srcrepo.spath, backupvfs.join('store'))
904 util.rename(dstrepo.spath, srcrepo.spath)
905 elapsed = util.timer() - tstart
906 ui.write(_('store replacement complete; repository was inconsistent for '
907 '%0.1fs\n') % elapsed)
908
909 # We first write the requirements file. Any new requirements will lock
910 # out legacy clients.
911 ui.write(_('finalizing requirements file and making repository readable '
912 'again\n'))
913 scmutil.writerequires(srcrepo.vfs, requirements)
914
915 # The lock file from the old store won't be removed because nothing has a
916 # reference to its new location. So clean it up manually. Alternatively, we
917 # could update srcrepo.svfs and other variables to point to the new
918 # location. This is simpler.
919 backupvfs.unlink('store/lock')
920
921 return backuppath
922
923 def upgraderepo(ui, repo, run=False, optimize=None):
924 """Upgrade a repository in place."""
925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 from . import localrepo
927
928 optimize = set(optimize or [])
929 repo = repo.unfiltered()
930
931 # Ensure the repository can be upgraded.
932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 if missingreqs:
934 raise error.Abort(_('cannot upgrade repository; requirement '
935 'missing: %s') % _(', ').join(sorted(missingreqs)))
936
937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 if blockedreqs:
939 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 'requirement: %s') %
941 _(', ').join(sorted(blockedreqs)))
942
943 # FUTURE there is potentially a need to control the wanted requirements via
944 # command arguments or via an extension hook point.
945 newreqs = localrepo.newreporequirements(repo)
946
947 noremovereqs = (repo.requirements - newreqs -
948 upgradesupportremovedrequirements(repo))
949 if noremovereqs:
950 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952
953 noaddreqs = (newreqs - repo.requirements -
954 upgradeallowednewrequirements(repo))
955 if noaddreqs:
956 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 'requirement: %s') %
958 _(', ').join(sorted(noaddreqs)))
959
960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 if unsupportedreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support '
963 'destination requirement: %s') %
964 _(', ').join(sorted(unsupportedreqs)))
965
966 # Find and validate all improvements that can be made.
967 improvements = upgradefindimprovements(repo)
968 for i in improvements:
969 if i.type not in (deficiency, optimisation):
970 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 i.type, i.name))
972
973 # Validate arguments.
974 unknownoptimize = optimize - set(i.name for i in improvements
975 if i.type == optimisation)
976 if unknownoptimize:
977 raise error.Abort(_('unknown optimization action requested: %s') %
978 ', '.join(sorted(unknownoptimize)),
979 hint=_('run without arguments to see valid '
980 'optimizations'))
981
982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 newreqs, optimize)
984
985 def printrequirements():
986 ui.write(_('requirements\n'))
987 ui.write(_(' preserved: %s\n') %
988 _(', ').join(sorted(newreqs & repo.requirements)))
989
990 if repo.requirements - newreqs:
991 ui.write(_(' removed: %s\n') %
992 _(', ').join(sorted(repo.requirements - newreqs)))
993
994 if newreqs - repo.requirements:
995 ui.write(_(' added: %s\n') %
996 _(', ').join(sorted(newreqs - repo.requirements)))
997
998 ui.write('\n')
999
1000 def printupgradeactions():
1001 for action in actions:
1002 for i in improvements:
1003 if i.name == action:
1004 ui.write('%s\n %s\n\n' %
1005 (i.name, i.upgrademessage))
1006
1007 if not run:
1008 fromdefault = []
1009 fromconfig = []
1010 optimizations = []
1011
1012 for i in improvements:
1013 assert i.type in (deficiency, optimisation)
1014 if i.type == deficiency:
1015 if i.fromdefault:
1016 fromdefault.append(i)
1017 if i.fromconfig:
1018 fromconfig.append(i)
1019 else:
1020 optimizations.append(i)
1021
1022 if fromdefault or fromconfig:
1023 fromconfignames = set(x.name for x in fromconfig)
1024 onlydefault = [i for i in fromdefault
1025 if i.name not in fromconfignames]
1026
1027 if fromconfig:
1028 ui.write(_('repository lacks features recommended by '
1029 'current config options:\n\n'))
1030 for i in fromconfig:
1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032
1033 if onlydefault:
1034 ui.write(_('repository lacks features used by the default '
1035 'config options:\n\n'))
1036 for i in onlydefault:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
1039 ui.write('\n')
1040 else:
1041 ui.write(_('(no feature deficiencies found in existing '
1042 'repository)\n'))
1043
1044 ui.write(_('performing an upgrade with "--run" will make the following '
1045 'changes:\n\n'))
1046
1047 printrequirements()
1048 printupgradeactions()
1049
1050 unusedoptimize = [i for i in improvements
1051 if i.name not in actions and i.type == optimisation]
1052 if unusedoptimize:
1053 ui.write(_('additional optimizations are available by specifying '
1054 '"--optimize <name>":\n\n'))
1055 for i in unusedoptimize:
1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 return
1058
1059 # Else we're in the run=true case.
1060 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 printrequirements()
1062 printupgradeactions()
1063
1064 ui.write(_('beginning upgrade...\n'))
1065 with repo.wlock():
1066 with repo.lock():
1067 ui.write(_('repository locked and read-only\n'))
1068 # Our strategy for upgrading the repository is to create a new,
1069 # temporary repository, write data to it, then do a swap of the
1070 # data. There are less heavyweight ways to do this, but it is easier
1071 # to create a new repo object than to instantiate all the components
1072 # (like the store) separately.
1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 backuppath = None
1075 try:
1076 ui.write(_('creating temporary repository to stage migrated '
1077 'data: %s\n') % tmppath)
1078 dstrepo = localrepo.localrepository(repo.baseui,
1079 path=tmppath,
1080 create=True)
1081
1082 with dstrepo.wlock():
1083 with dstrepo.lock():
1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 actions)
1086
1087 finally:
1088 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 repo.vfs.rmtree(tmppath, forcibly=True)
1090
1091 if backuppath:
1092 ui.warn(_('copy of old repository backed up at %s\n') %
1093 backuppath)
1094 ui.warn(_('the old repository will not be deleted; remove '
1095 'it to free up disk space once the upgraded '
1096 'repository is verified\n'))
@@ -8,358 +8,20
8 8
9 9 from __future__ import absolute_import
10 10
11 import errno
12 import hashlib
13 11 import stat
14 12 import tempfile
15 13
16 14 from .i18n import _
17 from .node import short
18 15 from . import (
19 bundle2,
20 changegroup,
21 16 changelog,
22 17 error,
23 exchange,
24 18 manifest,
25 obsolete,
26 19 revlog,
27 20 scmutil,
28 21 util,
29 22 vfs as vfsmod,
30 23 )
31 24
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
38 backupdir = "strip-backup"
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
42
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
49 comp = None
50 if cgversion != '01':
51 bundletype = "HG20"
52 if compress:
53 comp = 'BZ'
54 elif compress:
55 bundletype = "HG10BZ"
56 else:
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
60
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
63 files = set()
64
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
67
68 return sorted(files)
69
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
72 s = set()
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
76
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
79 collectone(repo.file(fname))
80
81 return s
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
87 # argument.
88 if backup in ['none', 'strip']:
89 backup = False
90
91 repo = repo.unfiltered()
92 repo.destroying()
93
94 cl = repo.changelog
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
100
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
116
117 if r not in tostrip:
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
122
123 # compute base nodes
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
136 newbmtarget = '.'
137
138 bm = repo._bookmarks
139 updatebm = []
140 for m in bm:
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
143 updatebm.append(m)
144
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
147 vfs = repo.vfs
148 node = nodelist[-1]
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
155 tmpbundlefile = None
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
160
161 mfst = repo.manifestlog._revlog
162
163 curtr = repo.currenttransaction()
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
166 raise error.ProgrammingError('cannot strip from inside a transaction')
167
168 try:
169 with repo.transaction("strip") as tr:
170 offset = len(tr.entries)
171
172 tr.startgroup()
173 cl.strip(striprev, tr)
174 mfst.strip(striprev, tr)
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 # otherwise
177 for unencoded, encoded, size in repo.store.datafiles():
178 if (unencoded.startswith('meta/') and
179 unencoded.endswith('00manifest.i')):
180 dir = unencoded[5:-12]
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 for fn in files:
183 repo.file(fn).strip(striprev, tr)
184 tr.endgroup()
185
186 for i in xrange(offset, len(tr.entries)):
187 file, troffset, ignore = tr.entries[i]
188 with repo.svfs(file, 'a', checkambig=True) as fp:
189 fp.truncate(troffset)
190 if troffset == 0:
191 repo.store.markremoved(file)
192
193 if tmpbundlefile:
194 ui.note(_("adding branch\n"))
195 f = vfs.open(tmpbundlefile, "rb")
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 if not repo.ui.verbose:
198 # silence internal shuffling chatter
199 repo.ui.pushbuffer()
200 if isinstance(gen, bundle2.unbundle20):
201 with repo.transaction('strip') as tr:
202 tr.hookargs = {'source': 'strip',
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 bundle2.applybundle(repo, gen, tr, source='strip',
205 url='bundle:' + vfs.join(tmpbundlefile))
206 else:
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 True)
209 if not repo.ui.verbose:
210 repo.ui.popbuffer()
211 f.close()
212 repo._phasecache.invalidate()
213
214 for m in updatebm:
215 bm[m] = repo[newbmtarget].node()
216
217 with repo.lock():
218 with repo.transaction('repair') as tr:
219 bm.recordchange(tr)
220
221 # remove undo files
222 for undovfs, undofile in repo.undofiles():
223 try:
224 undovfs.unlink(undofile)
225 except OSError as e:
226 if e.errno != errno.ENOENT:
227 ui.warn(_('error removing %s: %s\n') %
228 (undovfs.join(undofile), str(e)))
229
230 except: # re-raises
231 if backupfile:
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 % vfs.join(backupfile))
234 if tmpbundlefile:
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 % vfs.join(tmpbundlefile))
237 ui.warn(_("(fix the problem, then recover the changesets with "
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 raise
240 else:
241 if tmpbundlefile:
242 # Remove temporary bundle only if there were no exceptions
243 vfs.unlink(tmpbundlefile)
244
245 repo.destroyed()
246 # return the backup file path (or None if 'backup' was False) so
247 # extensions can use it
248 return backupfile
249
250 def rebuildfncache(ui, repo):
251 """Rebuilds the fncache file from repo history.
252
253 Missing entries will be added. Extra entries will be removed.
254 """
255 repo = repo.unfiltered()
256
257 if 'fncache' not in repo.requirements:
258 ui.warn(_('(not rebuilding fncache because repository does not '
259 'support fncache)\n'))
260 return
261
262 with repo.lock():
263 fnc = repo.store.fncache
264 # Trigger load of fncache.
265 if 'irrelevant' in fnc:
266 pass
267
268 oldentries = set(fnc.entries)
269 newentries = set()
270 seenfiles = set()
271
272 repolen = len(repo)
273 for rev in repo:
274 ui.progress(_('rebuilding'), rev, total=repolen,
275 unit=_('changesets'))
276
277 ctx = repo[rev]
278 for f in ctx.files():
279 # This is to minimize I/O.
280 if f in seenfiles:
281 continue
282 seenfiles.add(f)
283
284 i = 'data/%s.i' % f
285 d = 'data/%s.d' % f
286
287 if repo.store._exists(i):
288 newentries.add(i)
289 if repo.store._exists(d):
290 newentries.add(d)
291
292 ui.progress(_('rebuilding'), None)
293
294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 for dir in util.dirs(seenfiles):
296 i = 'meta/%s/00manifest.i' % dir
297 d = 'meta/%s/00manifest.d' % dir
298
299 if repo.store._exists(i):
300 newentries.add(i)
301 if repo.store._exists(d):
302 newentries.add(d)
303
304 addcount = len(newentries - oldentries)
305 removecount = len(oldentries - newentries)
306 for p in sorted(oldentries - newentries):
307 ui.write(_('removing %s\n') % p)
308 for p in sorted(newentries - oldentries):
309 ui.write(_('adding %s\n') % p)
310
311 if addcount or removecount:
312 ui.write(_('%d items added, %d removed from fncache\n') %
313 (addcount, removecount))
314 fnc.entries = newentries
315 fnc._dirty = True
316
317 with repo.transaction('fncache') as tr:
318 fnc.write(tr)
319 else:
320 ui.write(_('fncache already up to date\n'))
321
322 def stripbmrevset(repo, mark):
323 """
324 The revset to strip when strip is called with -B mark
325
326 Needs to live here so extensions can use it and wrap it even when strip is
327 not enabled or not present on a box.
328 """
329 return repo.revs("ancestors(bookmark(%s)) - "
330 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(bookmark() and not bookmark(%s))",
332 mark, mark, mark)
333
334 def deleteobsmarkers(obsstore, indices):
335 """Delete some obsmarkers from obsstore and return how many were deleted
336
337 'indices' is a list of ints which are the indices
338 of the markers to be deleted.
339
340 Every invocation of this function completely rewrites the obsstore file,
341 skipping the markers we want to be removed. The new temporary file is
342 created, remaining markers are written there and on .close() this file
343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 if not indices:
345 # we don't want to rewrite the obsstore with the same content
346 return
347
348 left = []
349 current = obsstore._all
350 n = 0
351 for i, m in enumerate(current):
352 if i in indices:
353 n += 1
354 continue
355 left.append(m)
356
357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 newobsstorefile.write(bytes)
360 newobsstorefile.close()
361 return n
362
363 25 def upgraderequiredsourcerequirements(repo):
364 26 """Obtain requirements required to be present to upgrade a repo.
365 27
General Comments 0
You need to be logged in to leave comments. Login now