Show More
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 |
@@ -34,6 +34,8 b' from mercurial import (' | |||||
34 | wireprotov1server, |
|
34 | wireprotov1server, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
|
37 | from mercurial.upgrade_utils import engine as upgrade_engine | |||
|
38 | ||||
37 | from mercurial.interfaces import repository |
|
39 | from mercurial.interfaces import repository | |
38 |
|
40 | |||
39 | from mercurial.utils import ( |
|
41 | from mercurial.utils import ( | |
@@ -520,7 +522,7 b' def uploadblobs(repo, pointers):' | |||||
520 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
522 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
521 |
|
523 | |||
522 |
|
524 | |||
523 | @eh.wrapfunction(upgrade, b'_finishdatamigration') |
|
525 | @eh.wrapfunction(upgrade_engine, b'_finishdatamigration') | |
524 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
526 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
525 | orig(ui, srcrepo, dstrepo, requirements) |
|
527 | orig(ui, srcrepo, dstrepo, requirements) | |
526 |
|
528 |
This diff has been collapsed as it changes many lines, (502 lines changed) Show them Hide them | |||||
@@ -7,24 +7,18 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import stat |
|
|||
11 |
|
||||
12 | from .i18n import _ |
|
10 | from .i18n import _ | |
13 | from .pycompat import getattr |
|
|||
14 | from . import ( |
|
11 | from . import ( | |
15 | changelog, |
|
|||
16 | error, |
|
12 | error, | |
17 | filelog, |
|
|||
18 | hg, |
|
13 | hg, | |
19 | localrepo, |
|
14 | localrepo, | |
20 | manifest, |
|
|||
21 | metadata, |
|
|||
22 | pycompat, |
|
15 | pycompat, | |
23 | requirements, |
|
16 | requirements, | |
24 | revlog, |
|
|||
25 | scmutil, |
|
|||
26 | util, |
|
17 | util, | |
27 | vfs as vfsmod, |
|
18 | ) | |
|
19 | ||||
|
20 | from .upgrade_utils import ( | |||
|
21 | engine as upgrade_engine, | |||
28 | ) |
|
22 | ) | |
29 |
|
23 | |||
30 | from .utils import compression |
|
24 | from .utils import compression | |
@@ -692,480 +686,6 b' def determineactions(repo, deficiencies,' | |||||
692 | return newactions |
|
686 | return newactions | |
693 |
|
687 | |||
694 |
|
688 | |||
695 | def _revlogfrompath(repo, path): |
|
|||
696 | """Obtain a revlog from a repo path. |
|
|||
697 |
|
||||
698 | An instance of the appropriate class is returned. |
|
|||
699 | """ |
|
|||
700 | if path == b'00changelog.i': |
|
|||
701 | return changelog.changelog(repo.svfs) |
|
|||
702 | elif path.endswith(b'00manifest.i'): |
|
|||
703 | mandir = path[: -len(b'00manifest.i')] |
|
|||
704 | return manifest.manifestrevlog(repo.svfs, tree=mandir) |
|
|||
705 | else: |
|
|||
706 | # reverse of "/".join(("data", path + ".i")) |
|
|||
707 | return filelog.filelog(repo.svfs, path[5:-2]) |
|
|||
708 |
|
||||
709 |
|
||||
710 | def _copyrevlog(tr, destrepo, oldrl, unencodedname): |
|
|||
711 | """copy all relevant files for `oldrl` into `destrepo` store |
|
|||
712 |
|
||||
713 | Files are copied "as is" without any transformation. The copy is performed |
|
|||
714 | without extra checks. Callers are responsible for making sure the copied |
|
|||
715 | content is compatible with format of the destination repository. |
|
|||
716 | """ |
|
|||
717 | oldrl = getattr(oldrl, '_revlog', oldrl) |
|
|||
718 | newrl = _revlogfrompath(destrepo, unencodedname) |
|
|||
719 | newrl = getattr(newrl, '_revlog', newrl) |
|
|||
720 |
|
||||
721 | oldvfs = oldrl.opener |
|
|||
722 | newvfs = newrl.opener |
|
|||
723 | oldindex = oldvfs.join(oldrl.indexfile) |
|
|||
724 | newindex = newvfs.join(newrl.indexfile) |
|
|||
725 | olddata = oldvfs.join(oldrl.datafile) |
|
|||
726 | newdata = newvfs.join(newrl.datafile) |
|
|||
727 |
|
||||
728 | with newvfs(newrl.indexfile, b'w'): |
|
|||
729 | pass # create all the directories |
|
|||
730 |
|
||||
731 | util.copyfile(oldindex, newindex) |
|
|||
732 | copydata = oldrl.opener.exists(oldrl.datafile) |
|
|||
733 | if copydata: |
|
|||
734 | util.copyfile(olddata, newdata) |
|
|||
735 |
|
||||
736 | if not ( |
|
|||
737 | unencodedname.endswith(b'00changelog.i') |
|
|||
738 | or unencodedname.endswith(b'00manifest.i') |
|
|||
739 | ): |
|
|||
740 | destrepo.svfs.fncache.add(unencodedname) |
|
|||
741 | if copydata: |
|
|||
742 | destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') |
|
|||
743 |
|
||||
744 |
|
||||
745 | UPGRADE_CHANGELOG = b"changelog" |
|
|||
746 | UPGRADE_MANIFEST = b"manifest" |
|
|||
747 | UPGRADE_FILELOGS = b"all-filelogs" |
|
|||
748 |
|
||||
749 | UPGRADE_ALL_REVLOGS = frozenset( |
|
|||
750 | [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] |
|
|||
751 | ) |
|
|||
752 |
|
||||
753 |
|
||||
754 | def getsidedatacompanion(srcrepo, dstrepo): |
|
|||
755 | sidedatacompanion = None |
|
|||
756 | removedreqs = srcrepo.requirements - dstrepo.requirements |
|
|||
757 | addedreqs = dstrepo.requirements - srcrepo.requirements |
|
|||
758 | if requirements.SIDEDATA_REQUIREMENT in removedreqs: |
|
|||
759 |
|
||||
760 | def sidedatacompanion(rl, rev): |
|
|||
761 | rl = getattr(rl, '_revlog', rl) |
|
|||
762 | if rl.flags(rev) & revlog.REVIDX_SIDEDATA: |
|
|||
763 | return True, (), {}, 0, 0 |
|
|||
764 | return False, (), {}, 0, 0 |
|
|||
765 |
|
||||
766 | elif requirements.COPIESSDC_REQUIREMENT in addedreqs: |
|
|||
767 | sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo) |
|
|||
768 | elif requirements.COPIESSDC_REQUIREMENT in removedreqs: |
|
|||
769 | sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo) |
|
|||
770 | return sidedatacompanion |
|
|||
771 |
|
||||
772 |
|
||||
773 | def matchrevlog(revlogfilter, entry): |
|
|||
774 | """check if a revlog is selected for cloning. |
|
|||
775 |
|
||||
776 | In other words, are there any updates which need to be done on revlog |
|
|||
777 | or it can be blindly copied. |
|
|||
778 |
|
||||
779 | The store entry is checked against the passed filter""" |
|
|||
780 | if entry.endswith(b'00changelog.i'): |
|
|||
781 | return UPGRADE_CHANGELOG in revlogfilter |
|
|||
782 | elif entry.endswith(b'00manifest.i'): |
|
|||
783 | return UPGRADE_MANIFEST in revlogfilter |
|
|||
784 | return UPGRADE_FILELOGS in revlogfilter |
|
|||
785 |
|
||||
786 |
|
||||
787 | def _clonerevlogs( |
|
|||
788 | ui, |
|
|||
789 | srcrepo, |
|
|||
790 | dstrepo, |
|
|||
791 | tr, |
|
|||
792 | deltareuse, |
|
|||
793 | forcedeltabothparents, |
|
|||
794 | revlogs=UPGRADE_ALL_REVLOGS, |
|
|||
795 | ): |
|
|||
796 | """Copy revlogs between 2 repos.""" |
|
|||
797 | revcount = 0 |
|
|||
798 | srcsize = 0 |
|
|||
799 | srcrawsize = 0 |
|
|||
800 | dstsize = 0 |
|
|||
801 | fcount = 0 |
|
|||
802 | frevcount = 0 |
|
|||
803 | fsrcsize = 0 |
|
|||
804 | frawsize = 0 |
|
|||
805 | fdstsize = 0 |
|
|||
806 | mcount = 0 |
|
|||
807 | mrevcount = 0 |
|
|||
808 | msrcsize = 0 |
|
|||
809 | mrawsize = 0 |
|
|||
810 | mdstsize = 0 |
|
|||
811 | crevcount = 0 |
|
|||
812 | csrcsize = 0 |
|
|||
813 | crawsize = 0 |
|
|||
814 | cdstsize = 0 |
|
|||
815 |
|
||||
816 | alldatafiles = list(srcrepo.store.walk()) |
|
|||
817 |
|
||||
818 | # Perform a pass to collect metadata. This validates we can open all |
|
|||
819 | # source files and allows a unified progress bar to be displayed. |
|
|||
820 | for unencoded, encoded, size in alldatafiles: |
|
|||
821 | if unencoded.endswith(b'.d'): |
|
|||
822 | continue |
|
|||
823 |
|
||||
824 | rl = _revlogfrompath(srcrepo, unencoded) |
|
|||
825 |
|
||||
826 | info = rl.storageinfo( |
|
|||
827 | exclusivefiles=True, |
|
|||
828 | revisionscount=True, |
|
|||
829 | trackedsize=True, |
|
|||
830 | storedsize=True, |
|
|||
831 | ) |
|
|||
832 |
|
||||
833 | revcount += info[b'revisionscount'] or 0 |
|
|||
834 | datasize = info[b'storedsize'] or 0 |
|
|||
835 | rawsize = info[b'trackedsize'] or 0 |
|
|||
836 |
|
||||
837 | srcsize += datasize |
|
|||
838 | srcrawsize += rawsize |
|
|||
839 |
|
||||
840 | # This is for the separate progress bars. |
|
|||
841 | if isinstance(rl, changelog.changelog): |
|
|||
842 | crevcount += len(rl) |
|
|||
843 | csrcsize += datasize |
|
|||
844 | crawsize += rawsize |
|
|||
845 | elif isinstance(rl, manifest.manifestrevlog): |
|
|||
846 | mcount += 1 |
|
|||
847 | mrevcount += len(rl) |
|
|||
848 | msrcsize += datasize |
|
|||
849 | mrawsize += rawsize |
|
|||
850 | elif isinstance(rl, filelog.filelog): |
|
|||
851 | fcount += 1 |
|
|||
852 | frevcount += len(rl) |
|
|||
853 | fsrcsize += datasize |
|
|||
854 | frawsize += rawsize |
|
|||
855 | else: |
|
|||
856 | error.ProgrammingError(b'unknown revlog type') |
|
|||
857 |
|
||||
858 | if not revcount: |
|
|||
859 | return |
|
|||
860 |
|
||||
861 | ui.status( |
|
|||
862 | _( |
|
|||
863 | b'migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
|||
864 | b'%d in changelog)\n' |
|
|||
865 | ) |
|
|||
866 | % (revcount, frevcount, mrevcount, crevcount) |
|
|||
867 | ) |
|
|||
868 | ui.status( |
|
|||
869 | _(b'migrating %s in store; %s tracked data\n') |
|
|||
870 | % ((util.bytecount(srcsize), util.bytecount(srcrawsize))) |
|
|||
871 | ) |
|
|||
872 |
|
||||
873 | # Used to keep track of progress. |
|
|||
874 | progress = None |
|
|||
875 |
|
||||
876 | def oncopiedrevision(rl, rev, node): |
|
|||
877 | progress.increment() |
|
|||
878 |
|
||||
879 | sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo) |
|
|||
880 |
|
||||
881 | # Do the actual copying. |
|
|||
882 | # FUTURE this operation can be farmed off to worker processes. |
|
|||
883 | seen = set() |
|
|||
884 | for unencoded, encoded, size in alldatafiles: |
|
|||
885 | if unencoded.endswith(b'.d'): |
|
|||
886 | continue |
|
|||
887 |
|
||||
888 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
|||
889 |
|
||||
890 | if isinstance(oldrl, changelog.changelog) and b'c' not in seen: |
|
|||
891 | ui.status( |
|
|||
892 | _( |
|
|||
893 | b'finished migrating %d manifest revisions across %d ' |
|
|||
894 | b'manifests; change in size: %s\n' |
|
|||
895 | ) |
|
|||
896 | % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) |
|
|||
897 | ) |
|
|||
898 |
|
||||
899 | ui.status( |
|
|||
900 | _( |
|
|||
901 | b'migrating changelog containing %d revisions ' |
|
|||
902 | b'(%s in store; %s tracked data)\n' |
|
|||
903 | ) |
|
|||
904 | % ( |
|
|||
905 | crevcount, |
|
|||
906 | util.bytecount(csrcsize), |
|
|||
907 | util.bytecount(crawsize), |
|
|||
908 | ) |
|
|||
909 | ) |
|
|||
910 | seen.add(b'c') |
|
|||
911 | progress = srcrepo.ui.makeprogress( |
|
|||
912 | _(b'changelog revisions'), total=crevcount |
|
|||
913 | ) |
|
|||
914 | elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen: |
|
|||
915 | ui.status( |
|
|||
916 | _( |
|
|||
917 | b'finished migrating %d filelog revisions across %d ' |
|
|||
918 | b'filelogs; change in size: %s\n' |
|
|||
919 | ) |
|
|||
920 | % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) |
|
|||
921 | ) |
|
|||
922 |
|
||||
923 | ui.status( |
|
|||
924 | _( |
|
|||
925 | b'migrating %d manifests containing %d revisions ' |
|
|||
926 | b'(%s in store; %s tracked data)\n' |
|
|||
927 | ) |
|
|||
928 | % ( |
|
|||
929 | mcount, |
|
|||
930 | mrevcount, |
|
|||
931 | util.bytecount(msrcsize), |
|
|||
932 | util.bytecount(mrawsize), |
|
|||
933 | ) |
|
|||
934 | ) |
|
|||
935 | seen.add(b'm') |
|
|||
936 | if progress: |
|
|||
937 | progress.complete() |
|
|||
938 | progress = srcrepo.ui.makeprogress( |
|
|||
939 | _(b'manifest revisions'), total=mrevcount |
|
|||
940 | ) |
|
|||
941 | elif b'f' not in seen: |
|
|||
942 | ui.status( |
|
|||
943 | _( |
|
|||
944 | b'migrating %d filelogs containing %d revisions ' |
|
|||
945 | b'(%s in store; %s tracked data)\n' |
|
|||
946 | ) |
|
|||
947 | % ( |
|
|||
948 | fcount, |
|
|||
949 | frevcount, |
|
|||
950 | util.bytecount(fsrcsize), |
|
|||
951 | util.bytecount(frawsize), |
|
|||
952 | ) |
|
|||
953 | ) |
|
|||
954 | seen.add(b'f') |
|
|||
955 | if progress: |
|
|||
956 | progress.complete() |
|
|||
957 | progress = srcrepo.ui.makeprogress( |
|
|||
958 | _(b'file revisions'), total=frevcount |
|
|||
959 | ) |
|
|||
960 |
|
||||
961 | if matchrevlog(revlogs, unencoded): |
|
|||
962 | ui.note( |
|
|||
963 | _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded) |
|
|||
964 | ) |
|
|||
965 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
|||
966 | oldrl.clone( |
|
|||
967 | tr, |
|
|||
968 | newrl, |
|
|||
969 | addrevisioncb=oncopiedrevision, |
|
|||
970 | deltareuse=deltareuse, |
|
|||
971 | forcedeltabothparents=forcedeltabothparents, |
|
|||
972 | sidedatacompanion=sidedatacompanion, |
|
|||
973 | ) |
|
|||
974 | else: |
|
|||
975 | msg = _(b'blindly copying %s containing %i revisions\n') |
|
|||
976 | ui.note(msg % (unencoded, len(oldrl))) |
|
|||
977 | _copyrevlog(tr, dstrepo, oldrl, unencoded) |
|
|||
978 |
|
||||
979 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
|||
980 |
|
||||
981 | info = newrl.storageinfo(storedsize=True) |
|
|||
982 | datasize = info[b'storedsize'] or 0 |
|
|||
983 |
|
||||
984 | dstsize += datasize |
|
|||
985 |
|
||||
986 | if isinstance(newrl, changelog.changelog): |
|
|||
987 | cdstsize += datasize |
|
|||
988 | elif isinstance(newrl, manifest.manifestrevlog): |
|
|||
989 | mdstsize += datasize |
|
|||
990 | else: |
|
|||
991 | fdstsize += datasize |
|
|||
992 |
|
||||
993 | progress.complete() |
|
|||
994 |
|
||||
995 | ui.status( |
|
|||
996 | _( |
|
|||
997 | b'finished migrating %d changelog revisions; change in size: ' |
|
|||
998 | b'%s\n' |
|
|||
999 | ) |
|
|||
1000 | % (crevcount, util.bytecount(cdstsize - csrcsize)) |
|
|||
1001 | ) |
|
|||
1002 |
|
||||
1003 | ui.status( |
|
|||
1004 | _( |
|
|||
1005 | b'finished migrating %d total revisions; total change in store ' |
|
|||
1006 | b'size: %s\n' |
|
|||
1007 | ) |
|
|||
1008 | % (revcount, util.bytecount(dstsize - srcsize)) |
|
|||
1009 | ) |
|
|||
1010 |
|
||||
1011 |
|
||||
1012 | def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
|||
1013 | """Determine whether to copy a store file during upgrade. |
|
|||
1014 |
|
||||
1015 | This function is called when migrating store files from ``srcrepo`` to |
|
|||
1016 | ``dstrepo`` as part of upgrading a repository. |
|
|||
1017 |
|
||||
1018 | Args: |
|
|||
1019 | srcrepo: repo we are copying from |
|
|||
1020 | dstrepo: repo we are copying to |
|
|||
1021 | requirements: set of requirements for ``dstrepo`` |
|
|||
1022 | path: store file being examined |
|
|||
1023 | mode: the ``ST_MODE`` file type of ``path`` |
|
|||
1024 | st: ``stat`` data structure for ``path`` |
|
|||
1025 |
|
||||
1026 | Function should return ``True`` if the file is to be copied. |
|
|||
1027 | """ |
|
|||
1028 | # Skip revlogs. |
|
|||
1029 | if path.endswith((b'.i', b'.d', b'.n', b'.nd')): |
|
|||
1030 | return False |
|
|||
1031 | # Skip transaction related files. |
|
|||
1032 | if path.startswith(b'undo'): |
|
|||
1033 | return False |
|
|||
1034 | # Only copy regular files. |
|
|||
1035 | if mode != stat.S_IFREG: |
|
|||
1036 | return False |
|
|||
1037 | # Skip other skipped files. |
|
|||
1038 | if path in (b'lock', b'fncache'): |
|
|||
1039 | return False |
|
|||
1040 |
|
||||
1041 | return True |
|
|||
1042 |
|
||||
1043 |
|
||||
1044 | def _finishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
|||
1045 | """Hook point for extensions to perform additional actions during upgrade. |
|
|||
1046 |
|
||||
1047 | This function is called after revlogs and store files have been copied but |
|
|||
1048 | before the new store is swapped into the original location. |
|
|||
1049 | """ |
|
|||
1050 |
|
||||
1051 |
|
||||
1052 | def _upgraderepo( |
|
|||
1053 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
|||
1054 | ): |
|
|||
1055 | """Do the low-level work of upgrading a repository. |
|
|||
1056 |
|
||||
1057 | The upgrade is effectively performed as a copy between a source |
|
|||
1058 | repository and a temporary destination repository. |
|
|||
1059 |
|
||||
1060 | The source repository is unmodified for as long as possible so the |
|
|||
1061 | upgrade can abort at any time without causing loss of service for |
|
|||
1062 | readers and without corrupting the source repository. |
|
|||
1063 | """ |
|
|||
1064 | assert srcrepo.currentwlock() |
|
|||
1065 | assert dstrepo.currentwlock() |
|
|||
1066 |
|
||||
1067 | ui.status( |
|
|||
1068 | _( |
|
|||
1069 | b'(it is safe to interrupt this process any time before ' |
|
|||
1070 | b'data migration completes)\n' |
|
|||
1071 | ) |
|
|||
1072 | ) |
|
|||
1073 |
|
||||
1074 | if b're-delta-all' in actions: |
|
|||
1075 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
|||
1076 | elif b're-delta-parent' in actions: |
|
|||
1077 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
|||
1078 | elif b're-delta-multibase' in actions: |
|
|||
1079 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
|||
1080 | elif b're-delta-fulladd' in actions: |
|
|||
1081 | deltareuse = revlog.revlog.DELTAREUSEFULLADD |
|
|||
1082 | else: |
|
|||
1083 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
|||
1084 |
|
||||
1085 | with dstrepo.transaction(b'upgrade') as tr: |
|
|||
1086 | _clonerevlogs( |
|
|||
1087 | ui, |
|
|||
1088 | srcrepo, |
|
|||
1089 | dstrepo, |
|
|||
1090 | tr, |
|
|||
1091 | deltareuse, |
|
|||
1092 | b're-delta-multibase' in actions, |
|
|||
1093 | revlogs=revlogs, |
|
|||
1094 | ) |
|
|||
1095 |
|
||||
1096 | # Now copy other files in the store directory. |
|
|||
1097 | # The sorted() makes execution deterministic. |
|
|||
1098 | for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): |
|
|||
1099 | if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st): |
|
|||
1100 | continue |
|
|||
1101 |
|
||||
1102 | srcrepo.ui.status(_(b'copying %s\n') % p) |
|
|||
1103 | src = srcrepo.store.rawvfs.join(p) |
|
|||
1104 | dst = dstrepo.store.rawvfs.join(p) |
|
|||
1105 | util.copyfile(src, dst, copystat=True) |
|
|||
1106 |
|
||||
1107 | _finishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
|||
1108 |
|
||||
1109 | ui.status(_(b'data fully migrated to temporary repository\n')) |
|
|||
1110 |
|
||||
1111 | backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path) |
|
|||
1112 | backupvfs = vfsmod.vfs(backuppath) |
|
|||
1113 |
|
||||
1114 | # Make a backup of requires file first, as it is the first to be modified. |
|
|||
1115 | util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')) |
|
|||
1116 |
|
||||
1117 | # We install an arbitrary requirement that clients must not support |
|
|||
1118 | # as a mechanism to lock out new clients during the data swap. This is |
|
|||
1119 | # better than allowing a client to continue while the repository is in |
|
|||
1120 | # an inconsistent state. |
|
|||
1121 | ui.status( |
|
|||
1122 | _( |
|
|||
1123 | b'marking source repository as being upgraded; clients will be ' |
|
|||
1124 | b'unable to read from repository\n' |
|
|||
1125 | ) |
|
|||
1126 | ) |
|
|||
1127 | scmutil.writereporequirements( |
|
|||
1128 | srcrepo, srcrepo.requirements | {b'upgradeinprogress'} |
|
|||
1129 | ) |
|
|||
1130 |
|
||||
1131 | ui.status(_(b'starting in-place swap of repository data\n')) |
|
|||
1132 | ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) |
|
|||
1133 |
|
||||
1134 | # Now swap in the new store directory. Doing it as a rename should make |
|
|||
1135 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
|||
1136 | # environments). |
|
|||
1137 | ui.status(_(b'replacing store...\n')) |
|
|||
1138 | tstart = util.timer() |
|
|||
1139 | util.rename(srcrepo.spath, backupvfs.join(b'store')) |
|
|||
1140 | util.rename(dstrepo.spath, srcrepo.spath) |
|
|||
1141 | elapsed = util.timer() - tstart |
|
|||
1142 | ui.status( |
|
|||
1143 | _( |
|
|||
1144 | b'store replacement complete; repository was inconsistent for ' |
|
|||
1145 | b'%0.1fs\n' |
|
|||
1146 | ) |
|
|||
1147 | % elapsed |
|
|||
1148 | ) |
|
|||
1149 |
|
||||
1150 | # We first write the requirements file. Any new requirements will lock |
|
|||
1151 | # out legacy clients. |
|
|||
1152 | ui.status( |
|
|||
1153 | _( |
|
|||
1154 | b'finalizing requirements file and making repository readable ' |
|
|||
1155 | b'again\n' |
|
|||
1156 | ) |
|
|||
1157 | ) |
|
|||
1158 | scmutil.writereporequirements(srcrepo, requirements) |
|
|||
1159 |
|
||||
1160 | # The lock file from the old store won't be removed because nothing has a |
|
|||
1161 | # reference to its new location. So clean it up manually. Alternatively, we |
|
|||
1162 | # could update srcrepo.svfs and other variables to point to the new |
|
|||
1163 | # location. This is simpler. |
|
|||
1164 | backupvfs.unlink(b'store/lock') |
|
|||
1165 |
|
||||
1166 | return backuppath |
|
|||
1167 |
|
||||
1168 |
|
||||
1169 | def upgraderepo( |
|
689 | def upgraderepo( | |
1170 | ui, |
|
690 | ui, | |
1171 | repo, |
|
691 | repo, | |
@@ -1182,11 +702,11 b' def upgraderepo(' | |||||
1182 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
702 | optimize = {legacy_opts_map.get(o, o) for o in optimize} | |
1183 | repo = repo.unfiltered() |
|
703 | repo = repo.unfiltered() | |
1184 |
|
704 | |||
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) |
|
705 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) | |
1186 | specentries = ( |
|
706 | specentries = ( | |
1187 | (UPGRADE_CHANGELOG, changelog), |
|
707 | (upgrade_engine.UPGRADE_CHANGELOG, changelog), | |
1188 | (UPGRADE_MANIFEST, manifest), |
|
708 | (upgrade_engine.UPGRADE_MANIFEST, manifest), | |
1189 | (UPGRADE_FILELOGS, filelogs), |
|
709 | (upgrade_engine.UPGRADE_FILELOGS, filelogs), | |
1190 | ) |
|
710 | ) | |
1191 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
711 | specified = [(y, x) for (y, x) in specentries if x is not None] | |
1192 | if specified: |
|
712 | if specified: | |
@@ -1287,7 +807,7 b' def upgraderepo(' | |||||
1287 | removedreqs = repo.requirements - newreqs |
|
807 | removedreqs = repo.requirements - newreqs | |
1288 | addedreqs = newreqs - repo.requirements |
|
808 | addedreqs = newreqs - repo.requirements | |
1289 |
|
809 | |||
1290 | if revlogs != UPGRADE_ALL_REVLOGS: |
|
810 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: | |
1291 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
811 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) | |
1292 | if incompatible: |
|
812 | if incompatible: | |
1293 | msg = _( |
|
813 | msg = _( | |
@@ -1295,7 +815,7 b' def upgraderepo(' | |||||
1295 | b'change: %s\n' |
|
815 | b'change: %s\n' | |
1296 | ) |
|
816 | ) | |
1297 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
817 | ui.warn(msg % b', '.join(sorted(incompatible))) | |
1298 | revlogs = UPGRADE_ALL_REVLOGS |
|
818 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS | |
1299 |
|
819 | |||
1300 | def write_labeled(l, label): |
|
820 | def write_labeled(l, label): | |
1301 | first = True |
|
821 | first = True | |
@@ -1447,7 +967,7 b' def upgraderepo(' | |||||
1447 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
967 | dstrepo = hg.repository(repoui, path=tmppath, create=True) | |
1448 |
|
968 | |||
1449 | with dstrepo.wlock(), dstrepo.lock(): |
|
969 | with dstrepo.wlock(), dstrepo.lock(): | |
1450 |
backuppath = |
|
970 | backuppath = upgrade_engine.upgrade( | |
1451 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
971 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs | |
1452 | ) |
|
972 | ) | |
1453 | if not (backup or backuppath is None): |
|
973 | if not (backup or backuppath is None): |
This diff has been collapsed as it changes many lines, (1000 lines changed) Show them Hide them | |||||
@@ -9,14 +9,12 b' from __future__ import absolute_import' | |||||
9 |
|
9 | |||
10 | import stat |
|
10 | import stat | |
11 |
|
11 | |||
12 | from .i18n import _ |
|
12 | from ..i18n import _ | |
13 | from .pycompat import getattr |
|
13 | from ..pycompat import getattr | |
14 | from . import ( |
|
14 | from .. import ( | |
15 | changelog, |
|
15 | changelog, | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | hg, |
|
|||
19 | localrepo, |
|
|||
20 | manifest, |
|
18 | manifest, | |
21 | metadata, |
|
19 | metadata, | |
22 | pycompat, |
|
20 | pycompat, | |
@@ -27,670 +25,6 b' from . import (' | |||||
27 | vfs as vfsmod, |
|
25 | vfs as vfsmod, | |
28 | ) |
|
26 | ) | |
29 |
|
27 | |||
30 | from .utils import compression |
|
|||
31 |
|
||||
32 | # list of requirements that request a clone of all revlog if added/removed |
|
|||
33 | RECLONES_REQUIREMENTS = { |
|
|||
34 | b'generaldelta', |
|
|||
35 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
36 | } |
|
|||
37 |
|
||||
38 |
|
||||
39 | def requiredsourcerequirements(repo): |
|
|||
40 | """Obtain requirements required to be present to upgrade a repo. |
|
|||
41 |
|
||||
42 | An upgrade will not be allowed if the repository doesn't have the |
|
|||
43 | requirements returned by this function. |
|
|||
44 | """ |
|
|||
45 | return { |
|
|||
46 | # Introduced in Mercurial 0.9.2. |
|
|||
47 | b'revlogv1', |
|
|||
48 | # Introduced in Mercurial 0.9.2. |
|
|||
49 | b'store', |
|
|||
50 | } |
|
|||
51 |
|
||||
52 |
|
||||
53 | def blocksourcerequirements(repo): |
|
|||
54 | """Obtain requirements that will prevent an upgrade from occurring. |
|
|||
55 |
|
||||
56 | An upgrade cannot be performed if the source repository contains a |
|
|||
57 | requirements in the returned set. |
|
|||
58 | """ |
|
|||
59 | return { |
|
|||
60 | # The upgrade code does not yet support these experimental features. |
|
|||
61 | # This is an artificial limitation. |
|
|||
62 | requirements.TREEMANIFEST_REQUIREMENT, |
|
|||
63 | # This was a precursor to generaldelta and was never enabled by default. |
|
|||
64 | # It should (hopefully) not exist in the wild. |
|
|||
65 | b'parentdelta', |
|
|||
66 | # Upgrade should operate on the actual store, not the shared link. |
|
|||
67 | requirements.SHARED_REQUIREMENT, |
|
|||
68 | } |
|
|||
69 |
|
||||
70 |
|
||||
71 | def supportremovedrequirements(repo): |
|
|||
72 | """Obtain requirements that can be removed during an upgrade. |
|
|||
73 |
|
||||
74 | If an upgrade were to create a repository that dropped a requirement, |
|
|||
75 | the dropped requirement must appear in the returned set for the upgrade |
|
|||
76 | to be allowed. |
|
|||
77 | """ |
|
|||
78 | supported = { |
|
|||
79 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
80 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
81 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
82 | requirements.NODEMAP_REQUIREMENT, |
|
|||
83 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
84 | } |
|
|||
85 | for name in compression.compengines: |
|
|||
86 | engine = compression.compengines[name] |
|
|||
87 | if engine.available() and engine.revlogheader(): |
|
|||
88 | supported.add(b'exp-compression-%s' % name) |
|
|||
89 | if engine.name() == b'zstd': |
|
|||
90 | supported.add(b'revlog-compression-zstd') |
|
|||
91 | return supported |
|
|||
92 |
|
||||
93 |
|
||||
94 | def supporteddestrequirements(repo): |
|
|||
95 | """Obtain requirements that upgrade supports in the destination. |
|
|||
96 |
|
||||
97 | If the result of the upgrade would create requirements not in this set, |
|
|||
98 | the upgrade is disallowed. |
|
|||
99 |
|
||||
100 | Extensions should monkeypatch this to add their custom requirements. |
|
|||
101 | """ |
|
|||
102 | supported = { |
|
|||
103 | b'dotencode', |
|
|||
104 | b'fncache', |
|
|||
105 | b'generaldelta', |
|
|||
106 | b'revlogv1', |
|
|||
107 | b'store', |
|
|||
108 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
109 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
110 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
111 | requirements.NODEMAP_REQUIREMENT, |
|
|||
112 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
113 | } |
|
|||
114 | for name in compression.compengines: |
|
|||
115 | engine = compression.compengines[name] |
|
|||
116 | if engine.available() and engine.revlogheader(): |
|
|||
117 | supported.add(b'exp-compression-%s' % name) |
|
|||
118 | if engine.name() == b'zstd': |
|
|||
119 | supported.add(b'revlog-compression-zstd') |
|
|||
120 | return supported |
|
|||
121 |
|
||||
122 |
|
||||
123 | def allowednewrequirements(repo): |
|
|||
124 | """Obtain requirements that can be added to a repository during upgrade. |
|
|||
125 |
|
||||
126 | This is used to disallow proposed requirements from being added when |
|
|||
127 | they weren't present before. |
|
|||
128 |
|
||||
129 | We use a list of allowed requirement additions instead of a list of known |
|
|||
130 | bad additions because the whitelist approach is safer and will prevent |
|
|||
131 | future, unknown requirements from accidentally being added. |
|
|||
132 | """ |
|
|||
133 | supported = { |
|
|||
134 | b'dotencode', |
|
|||
135 | b'fncache', |
|
|||
136 | b'generaldelta', |
|
|||
137 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
|||
138 | requirements.SIDEDATA_REQUIREMENT, |
|
|||
139 | requirements.COPIESSDC_REQUIREMENT, |
|
|||
140 | requirements.NODEMAP_REQUIREMENT, |
|
|||
141 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
142 | } |
|
|||
143 | for name in compression.compengines: |
|
|||
144 | engine = compression.compengines[name] |
|
|||
145 | if engine.available() and engine.revlogheader(): |
|
|||
146 | supported.add(b'exp-compression-%s' % name) |
|
|||
147 | if engine.name() == b'zstd': |
|
|||
148 | supported.add(b'revlog-compression-zstd') |
|
|||
149 | return supported |
|
|||
150 |
|
||||
151 |
|
||||
152 | def preservedrequirements(repo): |
|
|||
153 | return set() |
|
|||
154 |
|
||||
155 |
|
||||
156 | DEFICIENCY = b'deficiency' |
|
|||
157 | OPTIMISATION = b'optimization' |
|
|||
158 |
|
||||
159 |
|
||||
160 | class improvement(object): |
|
|||
161 | """Represents an improvement that can be made as part of an upgrade. |
|
|||
162 |
|
||||
163 | The following attributes are defined on each instance: |
|
|||
164 |
|
||||
165 | name |
|
|||
166 | Machine-readable string uniquely identifying this improvement. It |
|
|||
167 | will be mapped to an action later in the upgrade process. |
|
|||
168 |
|
||||
169 | type |
|
|||
170 | Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious |
|
|||
171 | problem. An optimization is an action (sometimes optional) that |
|
|||
172 | can be taken to further improve the state of the repository. |
|
|||
173 |
|
||||
174 | description |
|
|||
175 | Message intended for humans explaining the improvement in more detail, |
|
|||
176 | including the implications of it. For ``DEFICIENCY`` types, should be |
|
|||
177 | worded in the present tense. For ``OPTIMISATION`` types, should be |
|
|||
178 | worded in the future tense. |
|
|||
179 |
|
||||
180 | upgrademessage |
|
|||
181 | Message intended for humans explaining what an upgrade addressing this |
|
|||
182 | issue will do. Should be worded in the future tense. |
|
|||
183 | """ |
|
|||
184 |
|
||||
185 | def __init__(self, name, type, description, upgrademessage): |
|
|||
186 | self.name = name |
|
|||
187 | self.type = type |
|
|||
188 | self.description = description |
|
|||
189 | self.upgrademessage = upgrademessage |
|
|||
190 |
|
||||
191 | def __eq__(self, other): |
|
|||
192 | if not isinstance(other, improvement): |
|
|||
193 | # This is what python tell use to do |
|
|||
194 | return NotImplemented |
|
|||
195 | return self.name == other.name |
|
|||
196 |
|
||||
197 | def __ne__(self, other): |
|
|||
198 | return not (self == other) |
|
|||
199 |
|
||||
200 | def __hash__(self): |
|
|||
201 | return hash(self.name) |
|
|||
202 |
|
||||
203 |
|
||||
204 | allformatvariant = [] |
|
|||
205 |
|
||||
206 |
|
||||
207 | def registerformatvariant(cls): |
|
|||
208 | allformatvariant.append(cls) |
|
|||
209 | return cls |
|
|||
210 |
|
||||
211 |
|
||||
212 | class formatvariant(improvement): |
|
|||
213 | """an improvement subclass dedicated to repository format""" |
|
|||
214 |
|
||||
215 | type = DEFICIENCY |
|
|||
216 | ### The following attributes should be defined for each class: |
|
|||
217 |
|
||||
218 | # machine-readable string uniquely identifying this improvement. it will be |
|
|||
219 | # mapped to an action later in the upgrade process. |
|
|||
220 | name = None |
|
|||
221 |
|
||||
222 | # message intended for humans explaining the improvement in more detail, |
|
|||
223 | # including the implications of it ``DEFICIENCY`` types, should be worded |
|
|||
224 | # in the present tense. |
|
|||
225 | description = None |
|
|||
226 |
|
||||
227 | # message intended for humans explaining what an upgrade addressing this |
|
|||
228 | # issue will do. should be worded in the future tense. |
|
|||
229 | upgrademessage = None |
|
|||
230 |
|
||||
231 | # value of current Mercurial default for new repository |
|
|||
232 | default = None |
|
|||
233 |
|
||||
234 | def __init__(self): |
|
|||
235 | raise NotImplementedError() |
|
|||
236 |
|
||||
237 | @staticmethod |
|
|||
238 | def fromrepo(repo): |
|
|||
239 | """current value of the variant in the repository""" |
|
|||
240 | raise NotImplementedError() |
|
|||
241 |
|
||||
242 | @staticmethod |
|
|||
243 | def fromconfig(repo): |
|
|||
244 | """current value of the variant in the configuration""" |
|
|||
245 | raise NotImplementedError() |
|
|||
246 |
|
||||
247 |
|
||||
248 | class requirementformatvariant(formatvariant): |
|
|||
249 | """formatvariant based on a 'requirement' name. |
|
|||
250 |
|
||||
251 | Many format variant are controlled by a 'requirement'. We define a small |
|
|||
252 | subclass to factor the code. |
|
|||
253 | """ |
|
|||
254 |
|
||||
255 | # the requirement that control this format variant |
|
|||
256 | _requirement = None |
|
|||
257 |
|
||||
258 | @staticmethod |
|
|||
259 | def _newreporequirements(ui): |
|
|||
260 | return localrepo.newreporequirements( |
|
|||
261 | ui, localrepo.defaultcreateopts(ui) |
|
|||
262 | ) |
|
|||
263 |
|
||||
264 | @classmethod |
|
|||
265 | def fromrepo(cls, repo): |
|
|||
266 | assert cls._requirement is not None |
|
|||
267 | return cls._requirement in repo.requirements |
|
|||
268 |
|
||||
269 | @classmethod |
|
|||
270 | def fromconfig(cls, repo): |
|
|||
271 | assert cls._requirement is not None |
|
|||
272 | return cls._requirement in cls._newreporequirements(repo.ui) |
|
|||
273 |
|
||||
274 |
|
||||
275 | @registerformatvariant |
|
|||
276 | class fncache(requirementformatvariant): |
|
|||
277 | name = b'fncache' |
|
|||
278 |
|
||||
279 | _requirement = b'fncache' |
|
|||
280 |
|
||||
281 | default = True |
|
|||
282 |
|
||||
283 | description = _( |
|
|||
284 | b'long and reserved filenames may not work correctly; ' |
|
|||
285 | b'repository performance is sub-optimal' |
|
|||
286 | ) |
|
|||
287 |
|
||||
288 | upgrademessage = _( |
|
|||
289 | b'repository will be more resilient to storing ' |
|
|||
290 | b'certain paths and performance of certain ' |
|
|||
291 | b'operations should be improved' |
|
|||
292 | ) |
|
|||
293 |
|
||||
294 |
|
||||
295 | @registerformatvariant |
|
|||
296 | class dotencode(requirementformatvariant): |
|
|||
297 | name = b'dotencode' |
|
|||
298 |
|
||||
299 | _requirement = b'dotencode' |
|
|||
300 |
|
||||
301 | default = True |
|
|||
302 |
|
||||
303 | description = _( |
|
|||
304 | b'storage of filenames beginning with a period or ' |
|
|||
305 | b'space may not work correctly' |
|
|||
306 | ) |
|
|||
307 |
|
||||
308 | upgrademessage = _( |
|
|||
309 | b'repository will be better able to store files ' |
|
|||
310 | b'beginning with a space or period' |
|
|||
311 | ) |
|
|||
312 |
|
||||
313 |
|
||||
314 | @registerformatvariant |
|
|||
315 | class generaldelta(requirementformatvariant): |
|
|||
316 | name = b'generaldelta' |
|
|||
317 |
|
||||
318 | _requirement = b'generaldelta' |
|
|||
319 |
|
||||
320 | default = True |
|
|||
321 |
|
||||
322 | description = _( |
|
|||
323 | b'deltas within internal storage are unable to ' |
|
|||
324 | b'choose optimal revisions; repository is larger and ' |
|
|||
325 | b'slower than it could be; interaction with other ' |
|
|||
326 | b'repositories may require extra network and CPU ' |
|
|||
327 | b'resources, making "hg push" and "hg pull" slower' |
|
|||
328 | ) |
|
|||
329 |
|
||||
330 | upgrademessage = _( |
|
|||
331 | b'repository storage will be able to create ' |
|
|||
332 | b'optimal deltas; new repository data will be ' |
|
|||
333 | b'smaller and read times should decrease; ' |
|
|||
334 | b'interacting with other repositories using this ' |
|
|||
335 | b'storage model should require less network and ' |
|
|||
336 | b'CPU resources, making "hg push" and "hg pull" ' |
|
|||
337 | b'faster' |
|
|||
338 | ) |
|
|||
339 |
|
||||
340 |
|
||||
341 | @registerformatvariant |
|
|||
342 | class sharedsafe(requirementformatvariant): |
|
|||
343 | name = b'exp-sharesafe' |
|
|||
344 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
|||
345 |
|
||||
346 | default = False |
|
|||
347 |
|
||||
348 | description = _( |
|
|||
349 | b'old shared repositories do not share source repository ' |
|
|||
350 | b'requirements and config. This leads to various problems ' |
|
|||
351 | b'when the source repository format is upgraded or some new ' |
|
|||
352 | b'extensions are enabled.' |
|
|||
353 | ) |
|
|||
354 |
|
||||
355 | upgrademessage = _( |
|
|||
356 | b'Upgrades a repository to share-safe format so that future ' |
|
|||
357 | b'shares of this repository share its requirements and configs.' |
|
|||
358 | ) |
|
|||
359 |
|
||||
360 |
|
||||
361 | @registerformatvariant |
|
|||
362 | class sparserevlog(requirementformatvariant): |
|
|||
363 | name = b'sparserevlog' |
|
|||
364 |
|
||||
365 | _requirement = requirements.SPARSEREVLOG_REQUIREMENT |
|
|||
366 |
|
||||
367 | default = True |
|
|||
368 |
|
||||
369 | description = _( |
|
|||
370 | b'in order to limit disk reading and memory usage on older ' |
|
|||
371 | b'version, the span of a delta chain from its root to its ' |
|
|||
372 | b'end is limited, whatever the relevant data in this span. ' |
|
|||
373 | b'This can severly limit Mercurial ability to build good ' |
|
|||
374 | b'chain of delta resulting is much more storage space being ' |
|
|||
375 | b'taken and limit reusability of on disk delta during ' |
|
|||
376 | b'exchange.' |
|
|||
377 | ) |
|
|||
378 |
|
||||
379 | upgrademessage = _( |
|
|||
380 | b'Revlog supports delta chain with more unused data ' |
|
|||
381 | b'between payload. These gaps will be skipped at read ' |
|
|||
382 | b'time. This allows for better delta chains, making a ' |
|
|||
383 | b'better compression and faster exchange with server.' |
|
|||
384 | ) |
|
|||
385 |
|
||||
386 |
|
||||
387 | @registerformatvariant |
|
|||
388 | class sidedata(requirementformatvariant): |
|
|||
389 | name = b'sidedata' |
|
|||
390 |
|
||||
391 | _requirement = requirements.SIDEDATA_REQUIREMENT |
|
|||
392 |
|
||||
393 | default = False |
|
|||
394 |
|
||||
395 | description = _( |
|
|||
396 | b'Allows storage of extra data alongside a revision, ' |
|
|||
397 | b'unlocking various caching options.' |
|
|||
398 | ) |
|
|||
399 |
|
||||
400 | upgrademessage = _(b'Allows storage of extra data alongside a revision.') |
|
|||
401 |
|
||||
402 |
|
||||
403 | @registerformatvariant |
|
|||
404 | class persistentnodemap(requirementformatvariant): |
|
|||
405 | name = b'persistent-nodemap' |
|
|||
406 |
|
||||
407 | _requirement = requirements.NODEMAP_REQUIREMENT |
|
|||
408 |
|
||||
409 | default = False |
|
|||
410 |
|
||||
411 | description = _( |
|
|||
412 | b'persist the node -> rev mapping on disk to speedup lookup' |
|
|||
413 | ) |
|
|||
414 |
|
||||
415 | upgrademessage = _(b'Speedup revision lookup by node id.') |
|
|||
416 |
|
||||
417 |
|
||||
418 | @registerformatvariant |
|
|||
419 | class copiessdc(requirementformatvariant): |
|
|||
420 | name = b'copies-sdc' |
|
|||
421 |
|
||||
422 | _requirement = requirements.COPIESSDC_REQUIREMENT |
|
|||
423 |
|
||||
424 | default = False |
|
|||
425 |
|
||||
426 | description = _(b'Stores copies information alongside changesets.') |
|
|||
427 |
|
||||
428 | upgrademessage = _( |
|
|||
429 | b'Allows to use more efficient algorithm to deal with ' b'copy tracing.' |
|
|||
430 | ) |
|
|||
431 |
|
||||
432 |
|
||||
433 | @registerformatvariant |
|
|||
434 | class removecldeltachain(formatvariant): |
|
|||
435 | name = b'plain-cl-delta' |
|
|||
436 |
|
||||
437 | default = True |
|
|||
438 |
|
||||
439 | description = _( |
|
|||
440 | b'changelog storage is using deltas instead of ' |
|
|||
441 | b'raw entries; changelog reading and any ' |
|
|||
442 | b'operation relying on changelog data are slower ' |
|
|||
443 | b'than they could be' |
|
|||
444 | ) |
|
|||
445 |
|
||||
446 | upgrademessage = _( |
|
|||
447 | b'changelog storage will be reformated to ' |
|
|||
448 | b'store raw entries; changelog reading will be ' |
|
|||
449 | b'faster; changelog size may be reduced' |
|
|||
450 | ) |
|
|||
451 |
|
||||
452 | @staticmethod |
|
|||
453 | def fromrepo(repo): |
|
|||
454 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
|||
455 | # changelogs with deltas. |
|
|||
456 | cl = repo.changelog |
|
|||
457 | chainbase = cl.chainbase |
|
|||
458 | return all(rev == chainbase(rev) for rev in cl) |
|
|||
459 |
|
||||
460 | @staticmethod |
|
|||
461 | def fromconfig(repo): |
|
|||
462 | return True |
|
|||
463 |
|
||||
464 |
|
||||
465 | @registerformatvariant |
|
|||
466 | class compressionengine(formatvariant): |
|
|||
467 | name = b'compression' |
|
|||
468 | default = b'zlib' |
|
|||
469 |
|
||||
470 | description = _( |
|
|||
471 | b'Compresion algorithm used to compress data. ' |
|
|||
472 | b'Some engine are faster than other' |
|
|||
473 | ) |
|
|||
474 |
|
||||
475 | upgrademessage = _( |
|
|||
476 | b'revlog content will be recompressed with the new algorithm.' |
|
|||
477 | ) |
|
|||
478 |
|
||||
479 | @classmethod |
|
|||
480 | def fromrepo(cls, repo): |
|
|||
481 | # we allow multiple compression engine requirement to co-exist because |
|
|||
482 | # strickly speaking, revlog seems to support mixed compression style. |
|
|||
483 | # |
|
|||
484 | # The compression used for new entries will be "the last one" |
|
|||
485 | compression = b'zlib' |
|
|||
486 | for req in repo.requirements: |
|
|||
487 | prefix = req.startswith |
|
|||
488 | if prefix(b'revlog-compression-') or prefix(b'exp-compression-'): |
|
|||
489 | compression = req.split(b'-', 2)[2] |
|
|||
490 | return compression |
|
|||
491 |
|
||||
492 | @classmethod |
|
|||
493 | def fromconfig(cls, repo): |
|
|||
494 | compengines = repo.ui.configlist(b'format', b'revlog-compression') |
|
|||
495 | # return the first valid value as the selection code would do |
|
|||
496 | for comp in compengines: |
|
|||
497 | if comp in util.compengines: |
|
|||
498 | return comp |
|
|||
499 |
|
||||
500 | # no valide compression found lets display it all for clarity |
|
|||
501 | return b','.join(compengines) |
|
|||
502 |
|
||||
503 |
|
||||
504 | @registerformatvariant |
|
|||
505 | class compressionlevel(formatvariant): |
|
|||
506 | name = b'compression-level' |
|
|||
507 | default = b'default' |
|
|||
508 |
|
||||
509 | description = _(b'compression level') |
|
|||
510 |
|
||||
511 | upgrademessage = _(b'revlog content will be recompressed') |
|
|||
512 |
|
||||
513 | @classmethod |
|
|||
514 | def fromrepo(cls, repo): |
|
|||
515 | comp = compressionengine.fromrepo(repo) |
|
|||
516 | level = None |
|
|||
517 | if comp == b'zlib': |
|
|||
518 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
519 | elif comp == b'zstd': |
|
|||
520 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
521 | if level is None: |
|
|||
522 | return b'default' |
|
|||
523 | return bytes(level) |
|
|||
524 |
|
||||
525 | @classmethod |
|
|||
526 | def fromconfig(cls, repo): |
|
|||
527 | comp = compressionengine.fromconfig(repo) |
|
|||
528 | level = None |
|
|||
529 | if comp == b'zlib': |
|
|||
530 | level = repo.ui.configint(b'storage', b'revlog.zlib.level') |
|
|||
531 | elif comp == b'zstd': |
|
|||
532 | level = repo.ui.configint(b'storage', b'revlog.zstd.level') |
|
|||
533 | if level is None: |
|
|||
534 | return b'default' |
|
|||
535 | return bytes(level) |
|
|||
536 |
|
||||
537 |
|
||||
538 | def finddeficiencies(repo): |
|
|||
539 | """returns a list of deficiencies that the repo suffer from""" |
|
|||
540 | deficiencies = [] |
|
|||
541 |
|
||||
542 | # We could detect lack of revlogv1 and store here, but they were added |
|
|||
543 | # in 0.9.2 and we don't support upgrading repos without these |
|
|||
544 | # requirements, so let's not bother. |
|
|||
545 |
|
||||
546 | for fv in allformatvariant: |
|
|||
547 | if not fv.fromrepo(repo): |
|
|||
548 | deficiencies.append(fv) |
|
|||
549 |
|
||||
550 | return deficiencies |
|
|||
551 |
|
||||
552 |
|
||||
553 | # search without '-' to support older form on newer client. |
|
|||
554 | # |
|
|||
555 | # We don't enforce backward compatibility for debug command so this |
|
|||
556 | # might eventually be dropped. However, having to use two different |
|
|||
557 | # forms in script when comparing result is anoying enough to add |
|
|||
558 | # backward compatibility for a while. |
|
|||
559 | legacy_opts_map = { |
|
|||
560 | b'redeltaparent': b're-delta-parent', |
|
|||
561 | b'redeltamultibase': b're-delta-multibase', |
|
|||
562 | b'redeltaall': b're-delta-all', |
|
|||
563 | b'redeltafulladd': b're-delta-fulladd', |
|
|||
564 | } |
|
|||
565 |
|
||||
566 | ALL_OPTIMISATIONS = [] |
|
|||
567 |
|
||||
568 |
|
||||
569 | def register_optimization(obj): |
|
|||
570 | ALL_OPTIMISATIONS.append(obj) |
|
|||
571 | return obj |
|
|||
572 |
|
||||
573 |
|
||||
574 | register_optimization( |
|
|||
575 | improvement( |
|
|||
576 | name=b're-delta-parent', |
|
|||
577 | type=OPTIMISATION, |
|
|||
578 | description=_( |
|
|||
579 | b'deltas within internal storage will be recalculated to ' |
|
|||
580 | b'choose an optimal base revision where this was not ' |
|
|||
581 | b'already done; the size of the repository may shrink and ' |
|
|||
582 | b'various operations may become faster; the first time ' |
|
|||
583 | b'this optimization is performed could slow down upgrade ' |
|
|||
584 | b'execution considerably; subsequent invocations should ' |
|
|||
585 | b'not run noticeably slower' |
|
|||
586 | ), |
|
|||
587 | upgrademessage=_( |
|
|||
588 | b'deltas within internal storage will choose a new ' |
|
|||
589 | b'base revision if needed' |
|
|||
590 | ), |
|
|||
591 | ) |
|
|||
592 | ) |
|
|||
593 |
|
||||
594 | register_optimization( |
|
|||
595 | improvement( |
|
|||
596 | name=b're-delta-multibase', |
|
|||
597 | type=OPTIMISATION, |
|
|||
598 | description=_( |
|
|||
599 | b'deltas within internal storage will be recalculated ' |
|
|||
600 | b'against multiple base revision and the smallest ' |
|
|||
601 | b'difference will be used; the size of the repository may ' |
|
|||
602 | b'shrink significantly when there are many merges; this ' |
|
|||
603 | b'optimization will slow down execution in proportion to ' |
|
|||
604 | b'the number of merges in the repository and the amount ' |
|
|||
605 | b'of files in the repository; this slow down should not ' |
|
|||
606 | b'be significant unless there are tens of thousands of ' |
|
|||
607 | b'files and thousands of merges' |
|
|||
608 | ), |
|
|||
609 | upgrademessage=_( |
|
|||
610 | b'deltas within internal storage will choose an ' |
|
|||
611 | b'optimal delta by computing deltas against multiple ' |
|
|||
612 | b'parents; may slow down execution time ' |
|
|||
613 | b'significantly' |
|
|||
614 | ), |
|
|||
615 | ) |
|
|||
616 | ) |
|
|||
617 |
|
||||
618 | register_optimization( |
|
|||
619 | improvement( |
|
|||
620 | name=b're-delta-all', |
|
|||
621 | type=OPTIMISATION, |
|
|||
622 | description=_( |
|
|||
623 | b'deltas within internal storage will always be ' |
|
|||
624 | b'recalculated without reusing prior deltas; this will ' |
|
|||
625 | b'likely make execution run several times slower; this ' |
|
|||
626 | b'optimization is typically not needed' |
|
|||
627 | ), |
|
|||
628 | upgrademessage=_( |
|
|||
629 | b'deltas within internal storage will be fully ' |
|
|||
630 | b'recomputed; this will likely drastically slow down ' |
|
|||
631 | b'execution time' |
|
|||
632 | ), |
|
|||
633 | ) |
|
|||
634 | ) |
|
|||
635 |
|
||||
636 | register_optimization( |
|
|||
637 | improvement( |
|
|||
638 | name=b're-delta-fulladd', |
|
|||
639 | type=OPTIMISATION, |
|
|||
640 | description=_( |
|
|||
641 | b'every revision will be re-added as if it was new ' |
|
|||
642 | b'content. It will go through the full storage ' |
|
|||
643 | b'mechanism giving extensions a chance to process it ' |
|
|||
644 | b'(eg. lfs). This is similar to "re-delta-all" but even ' |
|
|||
645 | b'slower since more logic is involved.' |
|
|||
646 | ), |
|
|||
647 | upgrademessage=_( |
|
|||
648 | b'each revision will be added as new content to the ' |
|
|||
649 | b'internal storage; this will likely drastically slow ' |
|
|||
650 | b'down execution time, but some extensions might need ' |
|
|||
651 | b'it' |
|
|||
652 | ), |
|
|||
653 | ) |
|
|||
654 | ) |
|
|||
655 |
|
||||
656 |
|
||||
657 | def findoptimizations(repo): |
|
|||
658 | """Determine optimisation that could be used during upgrade""" |
|
|||
659 | # These are unconditionally added. There is logic later that figures out |
|
|||
660 | # which ones to apply. |
|
|||
661 | return list(ALL_OPTIMISATIONS) |
|
|||
662 |
|
||||
663 |
|
||||
664 | def determineactions(repo, deficiencies, sourcereqs, destreqs): |
|
|||
665 | """Determine upgrade actions that will be performed. |
|
|||
666 |
|
||||
667 | Given a list of improvements as returned by ``finddeficiencies`` and |
|
|||
668 | ``findoptimizations``, determine the list of upgrade actions that |
|
|||
669 | will be performed. |
|
|||
670 |
|
||||
671 | The role of this function is to filter improvements if needed, apply |
|
|||
672 | recommended optimizations from the improvements list that make sense, |
|
|||
673 | etc. |
|
|||
674 |
|
||||
675 | Returns a list of action names. |
|
|||
676 | """ |
|
|||
677 | newactions = [] |
|
|||
678 |
|
||||
679 | for d in deficiencies: |
|
|||
680 | name = d._requirement |
|
|||
681 |
|
||||
682 | # If the action is a requirement that doesn't show up in the |
|
|||
683 | # destination requirements, prune the action. |
|
|||
684 | if name is not None and name not in destreqs: |
|
|||
685 | continue |
|
|||
686 |
|
||||
687 | newactions.append(d) |
|
|||
688 |
|
||||
689 | # FUTURE consider adding some optimizations here for certain transitions. |
|
|||
690 | # e.g. adding generaldelta could schedule parent redeltas. |
|
|||
691 |
|
||||
692 | return newactions |
|
|||
693 |
|
||||
694 |
|
28 | |||
695 | def _revlogfrompath(repo, path): |
|
29 | def _revlogfrompath(repo, path): | |
696 | """Obtain a revlog from a repo path. |
|
30 | """Obtain a revlog from a repo path. | |
@@ -1049,7 +383,7 b' def _finishdatamigration(ui, srcrepo, ds' | |||||
1049 | """ |
|
383 | """ | |
1050 |
|
384 | |||
1051 |
|
385 | |||
1052 |
def |
|
386 | def upgrade( | |
1053 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS |
|
387 | ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS | |
1054 | ): |
|
388 | ): | |
1055 | """Do the low-level work of upgrading a repository. |
|
389 | """Do the low-level work of upgrading a repository. | |
@@ -1164,329 +498,3 b' def _upgraderepo(' | |||||
1164 | backupvfs.unlink(b'store/lock') |
|
498 | backupvfs.unlink(b'store/lock') | |
1165 |
|
499 | |||
1166 | return backuppath |
|
500 | return backuppath | |
1167 |
|
||||
1168 |
|
||||
1169 | def upgraderepo( |
|
|||
1170 | ui, |
|
|||
1171 | repo, |
|
|||
1172 | run=False, |
|
|||
1173 | optimize=None, |
|
|||
1174 | backup=True, |
|
|||
1175 | manifest=None, |
|
|||
1176 | changelog=None, |
|
|||
1177 | filelogs=None, |
|
|||
1178 | ): |
|
|||
1179 | """Upgrade a repository in place.""" |
|
|||
1180 | if optimize is None: |
|
|||
1181 | optimize = [] |
|
|||
1182 | optimize = {legacy_opts_map.get(o, o) for o in optimize} |
|
|||
1183 | repo = repo.unfiltered() |
|
|||
1184 |
|
||||
1185 | revlogs = set(UPGRADE_ALL_REVLOGS) |
|
|||
1186 | specentries = ( |
|
|||
1187 | (UPGRADE_CHANGELOG, changelog), |
|
|||
1188 | (UPGRADE_MANIFEST, manifest), |
|
|||
1189 | (UPGRADE_FILELOGS, filelogs), |
|
|||
1190 | ) |
|
|||
1191 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
|||
1192 | if specified: |
|
|||
1193 | # we have some limitation on revlogs to be recloned |
|
|||
1194 | if any(x for y, x in specified): |
|
|||
1195 | revlogs = set() |
|
|||
1196 | for upgrade, enabled in specified: |
|
|||
1197 | if enabled: |
|
|||
1198 | revlogs.add(upgrade) |
|
|||
1199 | else: |
|
|||
1200 | # none are enabled |
|
|||
1201 | for upgrade, __ in specified: |
|
|||
1202 | revlogs.discard(upgrade) |
|
|||
1203 |
|
||||
1204 | # Ensure the repository can be upgraded. |
|
|||
1205 | missingreqs = requiredsourcerequirements(repo) - repo.requirements |
|
|||
1206 | if missingreqs: |
|
|||
1207 | raise error.Abort( |
|
|||
1208 | _(b'cannot upgrade repository; requirement missing: %s') |
|
|||
1209 | % _(b', ').join(sorted(missingreqs)) |
|
|||
1210 | ) |
|
|||
1211 |
|
||||
1212 | blockedreqs = blocksourcerequirements(repo) & repo.requirements |
|
|||
1213 | if blockedreqs: |
|
|||
1214 | raise error.Abort( |
|
|||
1215 | _( |
|
|||
1216 | b'cannot upgrade repository; unsupported source ' |
|
|||
1217 | b'requirement: %s' |
|
|||
1218 | ) |
|
|||
1219 | % _(b', ').join(sorted(blockedreqs)) |
|
|||
1220 | ) |
|
|||
1221 |
|
||||
1222 | # FUTURE there is potentially a need to control the wanted requirements via |
|
|||
1223 | # command arguments or via an extension hook point. |
|
|||
1224 | newreqs = localrepo.newreporequirements( |
|
|||
1225 | repo.ui, localrepo.defaultcreateopts(repo.ui) |
|
|||
1226 | ) |
|
|||
1227 | newreqs.update(preservedrequirements(repo)) |
|
|||
1228 |
|
||||
1229 | noremovereqs = ( |
|
|||
1230 | repo.requirements - newreqs - supportremovedrequirements(repo) |
|
|||
1231 | ) |
|
|||
1232 | if noremovereqs: |
|
|||
1233 | raise error.Abort( |
|
|||
1234 | _( |
|
|||
1235 | b'cannot upgrade repository; requirement would be ' |
|
|||
1236 | b'removed: %s' |
|
|||
1237 | ) |
|
|||
1238 | % _(b', ').join(sorted(noremovereqs)) |
|
|||
1239 | ) |
|
|||
1240 |
|
||||
1241 | noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo) |
|
|||
1242 | if noaddreqs: |
|
|||
1243 | raise error.Abort( |
|
|||
1244 | _( |
|
|||
1245 | b'cannot upgrade repository; do not support adding ' |
|
|||
1246 | b'requirement: %s' |
|
|||
1247 | ) |
|
|||
1248 | % _(b', ').join(sorted(noaddreqs)) |
|
|||
1249 | ) |
|
|||
1250 |
|
||||
1251 | unsupportedreqs = newreqs - supporteddestrequirements(repo) |
|
|||
1252 | if unsupportedreqs: |
|
|||
1253 | raise error.Abort( |
|
|||
1254 | _( |
|
|||
1255 | b'cannot upgrade repository; do not support ' |
|
|||
1256 | b'destination requirement: %s' |
|
|||
1257 | ) |
|
|||
1258 | % _(b', ').join(sorted(unsupportedreqs)) |
|
|||
1259 | ) |
|
|||
1260 |
|
||||
1261 | # Find and validate all improvements that can be made. |
|
|||
1262 | alloptimizations = findoptimizations(repo) |
|
|||
1263 |
|
||||
1264 | # Apply and Validate arguments. |
|
|||
1265 | optimizations = [] |
|
|||
1266 | for o in alloptimizations: |
|
|||
1267 | if o.name in optimize: |
|
|||
1268 | optimizations.append(o) |
|
|||
1269 | optimize.discard(o.name) |
|
|||
1270 |
|
||||
1271 | if optimize: # anything left is unknown |
|
|||
1272 | raise error.Abort( |
|
|||
1273 | _(b'unknown optimization action requested: %s') |
|
|||
1274 | % b', '.join(sorted(optimize)), |
|
|||
1275 | hint=_(b'run without arguments to see valid optimizations'), |
|
|||
1276 | ) |
|
|||
1277 |
|
||||
1278 | deficiencies = finddeficiencies(repo) |
|
|||
1279 | actions = determineactions(repo, deficiencies, repo.requirements, newreqs) |
|
|||
1280 | actions.extend( |
|
|||
1281 | o |
|
|||
1282 | for o in sorted(optimizations) |
|
|||
1283 | # determineactions could have added optimisation |
|
|||
1284 | if o not in actions |
|
|||
1285 | ) |
|
|||
1286 |
|
||||
1287 | removedreqs = repo.requirements - newreqs |
|
|||
1288 | addedreqs = newreqs - repo.requirements |
|
|||
1289 |
|
||||
1290 | if revlogs != UPGRADE_ALL_REVLOGS: |
|
|||
1291 | incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs) |
|
|||
1292 | if incompatible: |
|
|||
1293 | msg = _( |
|
|||
1294 | b'ignoring revlogs selection flags, format requirements ' |
|
|||
1295 | b'change: %s\n' |
|
|||
1296 | ) |
|
|||
1297 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
|||
1298 | revlogs = UPGRADE_ALL_REVLOGS |
|
|||
1299 |
|
||||
1300 | def write_labeled(l, label): |
|
|||
1301 | first = True |
|
|||
1302 | for r in sorted(l): |
|
|||
1303 | if not first: |
|
|||
1304 | ui.write(b', ') |
|
|||
1305 | ui.write(r, label=label) |
|
|||
1306 | first = False |
|
|||
1307 |
|
||||
1308 | def printrequirements(): |
|
|||
1309 | ui.write(_(b'requirements\n')) |
|
|||
1310 | ui.write(_(b' preserved: ')) |
|
|||
1311 | write_labeled( |
|
|||
1312 | newreqs & repo.requirements, "upgrade-repo.requirement.preserved" |
|
|||
1313 | ) |
|
|||
1314 | ui.write((b'\n')) |
|
|||
1315 | removed = repo.requirements - newreqs |
|
|||
1316 | if repo.requirements - newreqs: |
|
|||
1317 | ui.write(_(b' removed: ')) |
|
|||
1318 | write_labeled(removed, "upgrade-repo.requirement.removed") |
|
|||
1319 | ui.write((b'\n')) |
|
|||
1320 | added = newreqs - repo.requirements |
|
|||
1321 | if added: |
|
|||
1322 | ui.write(_(b' added: ')) |
|
|||
1323 | write_labeled(added, "upgrade-repo.requirement.added") |
|
|||
1324 | ui.write((b'\n')) |
|
|||
1325 | ui.write(b'\n') |
|
|||
1326 |
|
||||
1327 | def printoptimisations(): |
|
|||
1328 | optimisations = [a for a in actions if a.type == OPTIMISATION] |
|
|||
1329 | optimisations.sort(key=lambda a: a.name) |
|
|||
1330 | if optimisations: |
|
|||
1331 | ui.write(_(b'optimisations: ')) |
|
|||
1332 | write_labeled( |
|
|||
1333 | [a.name for a in optimisations], |
|
|||
1334 | "upgrade-repo.optimisation.performed", |
|
|||
1335 | ) |
|
|||
1336 | ui.write(b'\n\n') |
|
|||
1337 |
|
||||
1338 | def printupgradeactions(): |
|
|||
1339 | for a in actions: |
|
|||
1340 | ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage)) |
|
|||
1341 |
|
||||
1342 | def print_affected_revlogs(): |
|
|||
1343 | if not revlogs: |
|
|||
1344 | ui.write((b'no revlogs to process\n')) |
|
|||
1345 | else: |
|
|||
1346 | ui.write((b'processed revlogs:\n')) |
|
|||
1347 | for r in sorted(revlogs): |
|
|||
1348 | ui.write((b' - %s\n' % r)) |
|
|||
1349 | ui.write((b'\n')) |
|
|||
1350 |
|
||||
1351 | if not run: |
|
|||
1352 | fromconfig = [] |
|
|||
1353 | onlydefault = [] |
|
|||
1354 |
|
||||
1355 | for d in deficiencies: |
|
|||
1356 | if d.fromconfig(repo): |
|
|||
1357 | fromconfig.append(d) |
|
|||
1358 | elif d.default: |
|
|||
1359 | onlydefault.append(d) |
|
|||
1360 |
|
||||
1361 | if fromconfig or onlydefault: |
|
|||
1362 |
|
||||
1363 | if fromconfig: |
|
|||
1364 | ui.status( |
|
|||
1365 | _( |
|
|||
1366 | b'repository lacks features recommended by ' |
|
|||
1367 | b'current config options:\n\n' |
|
|||
1368 | ) |
|
|||
1369 | ) |
|
|||
1370 | for i in fromconfig: |
|
|||
1371 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
1372 |
|
||||
1373 | if onlydefault: |
|
|||
1374 | ui.status( |
|
|||
1375 | _( |
|
|||
1376 | b'repository lacks features used by the default ' |
|
|||
1377 | b'config options:\n\n' |
|
|||
1378 | ) |
|
|||
1379 | ) |
|
|||
1380 | for i in onlydefault: |
|
|||
1381 | ui.status(b'%s\n %s\n\n' % (i.name, i.description)) |
|
|||
1382 |
|
||||
1383 | ui.status(b'\n') |
|
|||
1384 | else: |
|
|||
1385 | ui.status( |
|
|||
1386 | _( |
|
|||
1387 | b'(no feature deficiencies found in existing ' |
|
|||
1388 | b'repository)\n' |
|
|||
1389 | ) |
|
|||
1390 | ) |
|
|||
1391 |
|
||||
1392 | ui.status( |
|
|||
1393 | _( |
|
|||
1394 | b'performing an upgrade with "--run" will make the following ' |
|
|||
1395 | b'changes:\n\n' |
|
|||
1396 | ) |
|
|||
1397 | ) |
|
|||
1398 |
|
||||
1399 | printrequirements() |
|
|||
1400 | printoptimisations() |
|
|||
1401 | printupgradeactions() |
|
|||
1402 | print_affected_revlogs() |
|
|||
1403 |
|
||||
1404 | unusedoptimize = [i for i in alloptimizations if i not in actions] |
|
|||
1405 |
|
||||
1406 | if unusedoptimize: |
|
|||
1407 | ui.status( |
|
|||
1408 | _( |
|
|||
1409 | b'additional optimizations are available by specifying ' |
|
|||
1410 | b'"--optimize <name>":\n\n' |
|
|||
1411 | ) |
|
|||
1412 | ) |
|
|||
1413 | for i in unusedoptimize: |
|
|||
1414 | ui.status(_(b'%s\n %s\n\n') % (i.name, i.description)) |
|
|||
1415 | return |
|
|||
1416 |
|
||||
1417 | # Else we're in the run=true case. |
|
|||
1418 | ui.write(_(b'upgrade will perform the following actions:\n\n')) |
|
|||
1419 | printrequirements() |
|
|||
1420 | printoptimisations() |
|
|||
1421 | printupgradeactions() |
|
|||
1422 | print_affected_revlogs() |
|
|||
1423 |
|
||||
1424 | upgradeactions = [a.name for a in actions] |
|
|||
1425 |
|
||||
1426 | ui.status(_(b'beginning upgrade...\n')) |
|
|||
1427 | with repo.wlock(), repo.lock(): |
|
|||
1428 | ui.status(_(b'repository locked and read-only\n')) |
|
|||
1429 | # Our strategy for upgrading the repository is to create a new, |
|
|||
1430 | # temporary repository, write data to it, then do a swap of the |
|
|||
1431 | # data. There are less heavyweight ways to do this, but it is easier |
|
|||
1432 | # to create a new repo object than to instantiate all the components |
|
|||
1433 | # (like the store) separately. |
|
|||
1434 | tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path) |
|
|||
1435 | backuppath = None |
|
|||
1436 | try: |
|
|||
1437 | ui.status( |
|
|||
1438 | _( |
|
|||
1439 | b'creating temporary repository to stage migrated ' |
|
|||
1440 | b'data: %s\n' |
|
|||
1441 | ) |
|
|||
1442 | % tmppath |
|
|||
1443 | ) |
|
|||
1444 |
|
||||
1445 | # clone ui without using ui.copy because repo.ui is protected |
|
|||
1446 | repoui = repo.ui.__class__(repo.ui) |
|
|||
1447 | dstrepo = hg.repository(repoui, path=tmppath, create=True) |
|
|||
1448 |
|
||||
1449 | with dstrepo.wlock(), dstrepo.lock(): |
|
|||
1450 | backuppath = _upgraderepo( |
|
|||
1451 | ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs |
|
|||
1452 | ) |
|
|||
1453 | if not (backup or backuppath is None): |
|
|||
1454 | ui.status( |
|
|||
1455 | _(b'removing old repository content%s\n') % backuppath |
|
|||
1456 | ) |
|
|||
1457 | repo.vfs.rmtree(backuppath, forcibly=True) |
|
|||
1458 | backuppath = None |
|
|||
1459 |
|
||||
1460 | finally: |
|
|||
1461 | ui.status(_(b'removing temporary repository %s\n') % tmppath) |
|
|||
1462 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
|||
1463 |
|
||||
1464 | if backuppath and not ui.quiet: |
|
|||
1465 | ui.warn( |
|
|||
1466 | _(b'copy of old repository backed up at %s\n') % backuppath |
|
|||
1467 | ) |
|
|||
1468 | ui.warn( |
|
|||
1469 | _( |
|
|||
1470 | b'the old repository will not be deleted; remove ' |
|
|||
1471 | b'it to free up disk space once the upgraded ' |
|
|||
1472 | b'repository is verified\n' |
|
|||
1473 | ) |
|
|||
1474 | ) |
|
|||
1475 |
|
||||
1476 | if sharedsafe.name in addedreqs: |
|
|||
1477 | ui.warn( |
|
|||
1478 | _( |
|
|||
1479 | b'repository upgraded to share safe mode, existing' |
|
|||
1480 | b' shares will still work in old non-safe mode. ' |
|
|||
1481 | b'Re-share existing shares to use them in safe mode' |
|
|||
1482 | b' New shares will be created in safe mode.\n' |
|
|||
1483 | ) |
|
|||
1484 | ) |
|
|||
1485 | if sharedsafe.name in removedreqs: |
|
|||
1486 | ui.warn( |
|
|||
1487 | _( |
|
|||
1488 | b'repository downgraded to not use share safe mode, ' |
|
|||
1489 | b'existing shares will not work and needs to' |
|
|||
1490 | b' be reshared.\n' |
|
|||
1491 | ) |
|
|||
1492 | ) |
|
@@ -1287,6 +1287,7 b' packages = [' | |||||
1287 | 'mercurial.thirdparty.attr', |
|
1287 | 'mercurial.thirdparty.attr', | |
1288 | 'mercurial.thirdparty.zope', |
|
1288 | 'mercurial.thirdparty.zope', | |
1289 | 'mercurial.thirdparty.zope.interface', |
|
1289 | 'mercurial.thirdparty.zope.interface', | |
|
1290 | 'mercurial.upgrade_utils', | |||
1290 | 'mercurial.utils', |
|
1291 | 'mercurial.utils', | |
1291 | 'mercurial.revlogutils', |
|
1292 | 'mercurial.revlogutils', | |
1292 | 'mercurial.testing', |
|
1293 | 'mercurial.testing', |
@@ -15,9 +15,10 b' from mercurial import (' | |||||
15 | node, |
|
15 | node, | |
16 | requirements, |
|
16 | requirements, | |
17 | revlog, |
|
17 | revlog, | |
18 | upgrade, |
|
|||
19 | ) |
|
18 | ) | |
20 |
|
19 | |||
|
20 | from mercurial.upgrade_utils import engine as upgrade_engine | |||
|
21 | ||||
21 | from mercurial.revlogutils import sidedata |
|
22 | from mercurial.revlogutils import sidedata | |
22 |
|
23 | |||
23 |
|
24 | |||
@@ -79,5 +80,5 b' def extsetup(ui):' | |||||
79 | extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision) |
|
80 | extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision) | |
80 | extensions.wrapfunction(revlog.revlog, 'revision', wraprevision) |
|
81 | extensions.wrapfunction(revlog.revlog, 'revision', wraprevision) | |
81 | extensions.wrapfunction( |
|
82 | extensions.wrapfunction( | |
82 | upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion |
|
83 | upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion | |
83 | ) |
|
84 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now