##// END OF EJS Templates
upgrade: use rawsize() instead of revlog index...
Gregory Szorc -
r39895:32d3ed30 default
parent child Browse files
Show More
@@ -1,262 +1,262 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # Full name of the user visible file, relative to the repository root.
25 # Full name of the user visible file, relative to the repository root.
26 # Used by LFS.
26 # Used by LFS.
27 self._revlog.filename = path
27 self._revlog.filename = path
28 # Used by repo upgrade.
28 # Used by repo upgrade.
29 self.index = self._revlog.index
29 self.index = self._revlog.index
30 # Used by changegroup generation.
30 # Used by changegroup generation.
31 self._generaldelta = self._revlog._generaldelta
31 self._generaldelta = self._revlog._generaldelta
32
32
33 def __len__(self):
33 def __len__(self):
34 return len(self._revlog)
34 return len(self._revlog)
35
35
36 def __iter__(self):
36 def __iter__(self):
37 return self._revlog.__iter__()
37 return self._revlog.__iter__()
38
38
39 def revs(self, start=0, stop=None):
39 def revs(self, start=0, stop=None):
40 return self._revlog.revs(start=start, stop=stop)
40 return self._revlog.revs(start=start, stop=stop)
41
41
42 def parents(self, node):
42 def parents(self, node):
43 return self._revlog.parents(node)
43 return self._revlog.parents(node)
44
44
45 def parentrevs(self, rev):
45 def parentrevs(self, rev):
46 return self._revlog.parentrevs(rev)
46 return self._revlog.parentrevs(rev)
47
47
48 def rev(self, node):
48 def rev(self, node):
49 return self._revlog.rev(node)
49 return self._revlog.rev(node)
50
50
51 def node(self, rev):
51 def node(self, rev):
52 return self._revlog.node(rev)
52 return self._revlog.node(rev)
53
53
54 def lookup(self, node):
54 def lookup(self, node):
55 return self._revlog.lookup(node)
55 return self._revlog.lookup(node)
56
56
57 def linkrev(self, rev):
57 def linkrev(self, rev):
58 return self._revlog.linkrev(rev)
58 return self._revlog.linkrev(rev)
59
59
60 # Used by verify.
60 # Used by verify.
61 def flags(self, rev):
61 def flags(self, rev):
62 return self._revlog.flags(rev)
62 return self._revlog.flags(rev)
63
63
64 def commonancestorsheads(self, node1, node2):
64 def commonancestorsheads(self, node1, node2):
65 return self._revlog.commonancestorsheads(node1, node2)
65 return self._revlog.commonancestorsheads(node1, node2)
66
66
67 # Used by dagop.blockdescendants().
67 # Used by dagop.blockdescendants().
68 def descendants(self, revs):
68 def descendants(self, revs):
69 return self._revlog.descendants(revs)
69 return self._revlog.descendants(revs)
70
70
71 def heads(self, start=None, stop=None):
71 def heads(self, start=None, stop=None):
72 return self._revlog.heads(start, stop)
72 return self._revlog.heads(start, stop)
73
73
74 # Used by hgweb, children extension.
74 # Used by hgweb, children extension.
75 def children(self, node):
75 def children(self, node):
76 return self._revlog.children(node)
76 return self._revlog.children(node)
77
77
78 def deltaparent(self, rev):
78 def deltaparent(self, rev):
79 return self._revlog.deltaparent(rev)
79 return self._revlog.deltaparent(rev)
80
80
81 def iscensored(self, rev):
81 def iscensored(self, rev):
82 return self._revlog.iscensored(rev)
82 return self._revlog.iscensored(rev)
83
83
84 # Used by verify.
84 # Used by repo upgrade, verify.
85 def rawsize(self, rev):
85 def rawsize(self, rev):
86 return self._revlog.rawsize(rev)
86 return self._revlog.rawsize(rev)
87
87
88 # Might be unused.
88 # Might be unused.
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
91
91
92 def revision(self, node, _df=None, raw=False):
92 def revision(self, node, _df=None, raw=False):
93 return self._revlog.revision(node, _df=_df, raw=raw)
93 return self._revlog.revision(node, _df=_df, raw=raw)
94
94
95 def revdiff(self, rev1, rev2):
95 def revdiff(self, rev1, rev2):
96 return self._revlog.revdiff(rev1, rev2)
96 return self._revlog.revdiff(rev1, rev2)
97
97
98 def emitrevisiondeltas(self, requests):
98 def emitrevisiondeltas(self, requests):
99 return self._revlog.emitrevisiondeltas(requests)
99 return self._revlog.emitrevisiondeltas(requests)
100
100
101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
103 cachedelta=None):
103 cachedelta=None):
104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
105 p1, p2, node=node, flags=flags,
105 p1, p2, node=node, flags=flags,
106 cachedelta=cachedelta)
106 cachedelta=cachedelta)
107
107
108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
109 return self._revlog.addgroup(deltas, linkmapper, transaction,
109 return self._revlog.addgroup(deltas, linkmapper, transaction,
110 addrevisioncb=addrevisioncb)
110 addrevisioncb=addrevisioncb)
111
111
112 def getstrippoint(self, minlink):
112 def getstrippoint(self, minlink):
113 return self._revlog.getstrippoint(minlink)
113 return self._revlog.getstrippoint(minlink)
114
114
115 def strip(self, minlink, transaction):
115 def strip(self, minlink, transaction):
116 return self._revlog.strip(minlink, transaction)
116 return self._revlog.strip(minlink, transaction)
117
117
118 def censorrevision(self, tr, node, tombstone=b''):
118 def censorrevision(self, tr, node, tombstone=b''):
119 return self._revlog.censorrevision(node, tombstone=tombstone)
119 return self._revlog.censorrevision(node, tombstone=tombstone)
120
120
121 def files(self):
121 def files(self):
122 return self._revlog.files()
122 return self._revlog.files()
123
123
124 def read(self, node):
124 def read(self, node):
125 t = self.revision(node)
125 t = self.revision(node)
126 if not t.startswith('\1\n'):
126 if not t.startswith('\1\n'):
127 return t
127 return t
128 s = t.index('\1\n', 2)
128 s = t.index('\1\n', 2)
129 return t[s + 2:]
129 return t[s + 2:]
130
130
131 def add(self, text, meta, transaction, link, p1=None, p2=None):
131 def add(self, text, meta, transaction, link, p1=None, p2=None):
132 if meta or text.startswith('\1\n'):
132 if meta or text.startswith('\1\n'):
133 text = revlog.packmeta(meta, text)
133 text = revlog.packmeta(meta, text)
134 return self.addrevision(text, transaction, link, p1, p2)
134 return self.addrevision(text, transaction, link, p1, p2)
135
135
136 def renamed(self, node):
136 def renamed(self, node):
137 if self.parents(node)[0] != revlog.nullid:
137 if self.parents(node)[0] != revlog.nullid:
138 return False
138 return False
139 t = self.revision(node)
139 t = self.revision(node)
140 m = revlog.parsemeta(t)[0]
140 m = revlog.parsemeta(t)[0]
141 # copy and copyrev occur in pairs. In rare cases due to bugs,
141 # copy and copyrev occur in pairs. In rare cases due to bugs,
142 # one can occur without the other.
142 # one can occur without the other.
143 if m and "copy" in m and "copyrev" in m:
143 if m and "copy" in m and "copyrev" in m:
144 return (m["copy"], revlog.bin(m["copyrev"]))
144 return (m["copy"], revlog.bin(m["copyrev"]))
145 return False
145 return False
146
146
147 def size(self, rev):
147 def size(self, rev):
148 """return the size of a given revision"""
148 """return the size of a given revision"""
149
149
150 # for revisions with renames, we have to go the slow way
150 # for revisions with renames, we have to go the slow way
151 node = self.node(rev)
151 node = self.node(rev)
152 if self.renamed(node):
152 if self.renamed(node):
153 return len(self.read(node))
153 return len(self.read(node))
154 if self.iscensored(rev):
154 if self.iscensored(rev):
155 return 0
155 return 0
156
156
157 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
157 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
158 return self._revlog.size(rev)
158 return self._revlog.size(rev)
159
159
160 def cmp(self, node, text):
160 def cmp(self, node, text):
161 """compare text with a given file revision
161 """compare text with a given file revision
162
162
163 returns True if text is different than what is stored.
163 returns True if text is different than what is stored.
164 """
164 """
165
165
166 t = text
166 t = text
167 if text.startswith('\1\n'):
167 if text.startswith('\1\n'):
168 t = '\1\n\1\n' + text
168 t = '\1\n\1\n' + text
169
169
170 samehashes = not self._revlog.cmp(node, t)
170 samehashes = not self._revlog.cmp(node, t)
171 if samehashes:
171 if samehashes:
172 return False
172 return False
173
173
174 # censored files compare against the empty file
174 # censored files compare against the empty file
175 if self.iscensored(self.rev(node)):
175 if self.iscensored(self.rev(node)):
176 return text != ''
176 return text != ''
177
177
178 # renaming a file produces a different hash, even if the data
178 # renaming a file produces a different hash, even if the data
179 # remains unchanged. Check if it's the case (slow):
179 # remains unchanged. Check if it's the case (slow):
180 if self.renamed(node):
180 if self.renamed(node):
181 t2 = self.read(node)
181 t2 = self.read(node)
182 return t2 != text
182 return t2 != text
183
183
184 return True
184 return True
185
185
186 def verifyintegrity(self, state):
186 def verifyintegrity(self, state):
187 return self._revlog.verifyintegrity(state)
187 return self._revlog.verifyintegrity(state)
188
188
189 # TODO these aren't part of the interface and aren't internal methods.
189 # TODO these aren't part of the interface and aren't internal methods.
190 # Callers should be fixed to not use them.
190 # Callers should be fixed to not use them.
191
191
192 # Used by bundlefilelog, unionfilelog.
192 # Used by bundlefilelog, unionfilelog.
193 @property
193 @property
194 def indexfile(self):
194 def indexfile(self):
195 return self._revlog.indexfile
195 return self._revlog.indexfile
196
196
197 @indexfile.setter
197 @indexfile.setter
198 def indexfile(self, value):
198 def indexfile(self, value):
199 self._revlog.indexfile = value
199 self._revlog.indexfile = value
200
200
201 # Used by repo upgrade.
201 # Used by repo upgrade.
202 @property
202 @property
203 def opener(self):
203 def opener(self):
204 return self._revlog.opener
204 return self._revlog.opener
205
205
206 # Used by repo upgrade.
206 # Used by repo upgrade.
207 def clone(self, tr, destrevlog, **kwargs):
207 def clone(self, tr, destrevlog, **kwargs):
208 if not isinstance(destrevlog, filelog):
208 if not isinstance(destrevlog, filelog):
209 raise error.ProgrammingError('expected filelog to clone()')
209 raise error.ProgrammingError('expected filelog to clone()')
210
210
211 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
211 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
212
212
213 class narrowfilelog(filelog):
213 class narrowfilelog(filelog):
214 """Filelog variation to be used with narrow stores."""
214 """Filelog variation to be used with narrow stores."""
215
215
216 def __init__(self, opener, path, narrowmatch):
216 def __init__(self, opener, path, narrowmatch):
217 super(narrowfilelog, self).__init__(opener, path)
217 super(narrowfilelog, self).__init__(opener, path)
218 self._narrowmatch = narrowmatch
218 self._narrowmatch = narrowmatch
219
219
220 def renamed(self, node):
220 def renamed(self, node):
221 res = super(narrowfilelog, self).renamed(node)
221 res = super(narrowfilelog, self).renamed(node)
222
222
223 # Renames that come from outside the narrowspec are problematic
223 # Renames that come from outside the narrowspec are problematic
224 # because we may lack the base text for the rename. This can result
224 # because we may lack the base text for the rename. This can result
225 # in code attempting to walk the ancestry or compute a diff
225 # in code attempting to walk the ancestry or compute a diff
226 # encountering a missing revision. We address this by silently
226 # encountering a missing revision. We address this by silently
227 # removing rename metadata if the source file is outside the
227 # removing rename metadata if the source file is outside the
228 # narrow spec.
228 # narrow spec.
229 #
229 #
230 # A better solution would be to see if the base revision is available,
230 # A better solution would be to see if the base revision is available,
231 # rather than assuming it isn't.
231 # rather than assuming it isn't.
232 #
232 #
233 # An even better solution would be to teach all consumers of rename
233 # An even better solution would be to teach all consumers of rename
234 # metadata that the base revision may not be available.
234 # metadata that the base revision may not be available.
235 #
235 #
236 # TODO consider better ways of doing this.
236 # TODO consider better ways of doing this.
237 if res and not self._narrowmatch(res[0]):
237 if res and not self._narrowmatch(res[0]):
238 return None
238 return None
239
239
240 return res
240 return res
241
241
242 def size(self, rev):
242 def size(self, rev):
243 # Because we have a custom renamed() that may lie, we need to call
243 # Because we have a custom renamed() that may lie, we need to call
244 # the base renamed() to report accurate results.
244 # the base renamed() to report accurate results.
245 node = self.node(rev)
245 node = self.node(rev)
246 if super(narrowfilelog, self).renamed(node):
246 if super(narrowfilelog, self).renamed(node):
247 return len(self.read(node))
247 return len(self.read(node))
248 else:
248 else:
249 return super(narrowfilelog, self).size(rev)
249 return super(narrowfilelog, self).size(rev)
250
250
251 def cmp(self, node, text):
251 def cmp(self, node, text):
252 different = super(narrowfilelog, self).cmp(node, text)
252 different = super(narrowfilelog, self).cmp(node, text)
253
253
254 # Because renamed() may lie, we may get false positives for
254 # Because renamed() may lie, we may get false positives for
255 # different content. Check for this by comparing against the original
255 # different content. Check for this by comparing against the original
256 # renamed() implementation.
256 # renamed() implementation.
257 if different:
257 if different:
258 if super(narrowfilelog, self).renamed(node):
258 if super(narrowfilelog, self).renamed(node):
259 t2 = self.read(node)
259 t2 = self.read(node)
260 return t2 != text
260 return t2 != text
261
261
262 return different
262 return different
@@ -1,901 +1,898 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 changelog,
14 changelog,
15 error,
15 error,
16 filelog,
16 filelog,
17 hg,
17 hg,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 def requiredsourcerequirements(repo):
27 def requiredsourcerequirements(repo):
28 """Obtain requirements required to be present to upgrade a repo.
28 """Obtain requirements required to be present to upgrade a repo.
29
29
30 An upgrade will not be allowed if the repository doesn't have the
30 An upgrade will not be allowed if the repository doesn't have the
31 requirements returned by this function.
31 requirements returned by this function.
32 """
32 """
33 return {
33 return {
34 # Introduced in Mercurial 0.9.2.
34 # Introduced in Mercurial 0.9.2.
35 'revlogv1',
35 'revlogv1',
36 # Introduced in Mercurial 0.9.2.
36 # Introduced in Mercurial 0.9.2.
37 'store',
37 'store',
38 }
38 }
39
39
40 def blocksourcerequirements(repo):
40 def blocksourcerequirements(repo):
41 """Obtain requirements that will prevent an upgrade from occurring.
41 """Obtain requirements that will prevent an upgrade from occurring.
42
42
43 An upgrade cannot be performed if the source repository contains a
43 An upgrade cannot be performed if the source repository contains a
44 requirements in the returned set.
44 requirements in the returned set.
45 """
45 """
46 return {
46 return {
47 # The upgrade code does not yet support these experimental features.
47 # The upgrade code does not yet support these experimental features.
48 # This is an artificial limitation.
48 # This is an artificial limitation.
49 'treemanifest',
49 'treemanifest',
50 # This was a precursor to generaldelta and was never enabled by default.
50 # This was a precursor to generaldelta and was never enabled by default.
51 # It should (hopefully) not exist in the wild.
51 # It should (hopefully) not exist in the wild.
52 'parentdelta',
52 'parentdelta',
53 # Upgrade should operate on the actual store, not the shared link.
53 # Upgrade should operate on the actual store, not the shared link.
54 'shared',
54 'shared',
55 }
55 }
56
56
57 def supportremovedrequirements(repo):
57 def supportremovedrequirements(repo):
58 """Obtain requirements that can be removed during an upgrade.
58 """Obtain requirements that can be removed during an upgrade.
59
59
60 If an upgrade were to create a repository that dropped a requirement,
60 If an upgrade were to create a repository that dropped a requirement,
61 the dropped requirement must appear in the returned set for the upgrade
61 the dropped requirement must appear in the returned set for the upgrade
62 to be allowed.
62 to be allowed.
63 """
63 """
64 return {
64 return {
65 localrepo.SPARSEREVLOG_REQUIREMENT,
65 localrepo.SPARSEREVLOG_REQUIREMENT,
66 }
66 }
67
67
68 def supporteddestrequirements(repo):
68 def supporteddestrequirements(repo):
69 """Obtain requirements that upgrade supports in the destination.
69 """Obtain requirements that upgrade supports in the destination.
70
70
71 If the result of the upgrade would create requirements not in this set,
71 If the result of the upgrade would create requirements not in this set,
72 the upgrade is disallowed.
72 the upgrade is disallowed.
73
73
74 Extensions should monkeypatch this to add their custom requirements.
74 Extensions should monkeypatch this to add their custom requirements.
75 """
75 """
76 return {
76 return {
77 'dotencode',
77 'dotencode',
78 'fncache',
78 'fncache',
79 'generaldelta',
79 'generaldelta',
80 'revlogv1',
80 'revlogv1',
81 'store',
81 'store',
82 localrepo.SPARSEREVLOG_REQUIREMENT,
82 localrepo.SPARSEREVLOG_REQUIREMENT,
83 }
83 }
84
84
85 def allowednewrequirements(repo):
85 def allowednewrequirements(repo):
86 """Obtain requirements that can be added to a repository during upgrade.
86 """Obtain requirements that can be added to a repository during upgrade.
87
87
88 This is used to disallow proposed requirements from being added when
88 This is used to disallow proposed requirements from being added when
89 they weren't present before.
89 they weren't present before.
90
90
91 We use a list of allowed requirement additions instead of a list of known
91 We use a list of allowed requirement additions instead of a list of known
92 bad additions because the whitelist approach is safer and will prevent
92 bad additions because the whitelist approach is safer and will prevent
93 future, unknown requirements from accidentally being added.
93 future, unknown requirements from accidentally being added.
94 """
94 """
95 return {
95 return {
96 'dotencode',
96 'dotencode',
97 'fncache',
97 'fncache',
98 'generaldelta',
98 'generaldelta',
99 localrepo.SPARSEREVLOG_REQUIREMENT,
99 localrepo.SPARSEREVLOG_REQUIREMENT,
100 }
100 }
101
101
102 def preservedrequirements(repo):
102 def preservedrequirements(repo):
103 return set()
103 return set()
104
104
105 deficiency = 'deficiency'
105 deficiency = 'deficiency'
106 optimisation = 'optimization'
106 optimisation = 'optimization'
107
107
108 class improvement(object):
108 class improvement(object):
109 """Represents an improvement that can be made as part of an upgrade.
109 """Represents an improvement that can be made as part of an upgrade.
110
110
111 The following attributes are defined on each instance:
111 The following attributes are defined on each instance:
112
112
113 name
113 name
114 Machine-readable string uniquely identifying this improvement. It
114 Machine-readable string uniquely identifying this improvement. It
115 will be mapped to an action later in the upgrade process.
115 will be mapped to an action later in the upgrade process.
116
116
117 type
117 type
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
118 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
119 problem. An optimization is an action (sometimes optional) that
119 problem. An optimization is an action (sometimes optional) that
120 can be taken to further improve the state of the repository.
120 can be taken to further improve the state of the repository.
121
121
122 description
122 description
123 Message intended for humans explaining the improvement in more detail,
123 Message intended for humans explaining the improvement in more detail,
124 including the implications of it. For ``deficiency`` types, should be
124 including the implications of it. For ``deficiency`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
125 worded in the present tense. For ``optimisation`` types, should be
126 worded in the future tense.
126 worded in the future tense.
127
127
128 upgrademessage
128 upgrademessage
129 Message intended for humans explaining what an upgrade addressing this
129 Message intended for humans explaining what an upgrade addressing this
130 issue will do. Should be worded in the future tense.
130 issue will do. Should be worded in the future tense.
131 """
131 """
132 def __init__(self, name, type, description, upgrademessage):
132 def __init__(self, name, type, description, upgrademessage):
133 self.name = name
133 self.name = name
134 self.type = type
134 self.type = type
135 self.description = description
135 self.description = description
136 self.upgrademessage = upgrademessage
136 self.upgrademessage = upgrademessage
137
137
138 def __eq__(self, other):
138 def __eq__(self, other):
139 if not isinstance(other, improvement):
139 if not isinstance(other, improvement):
140 # This is what python tell use to do
140 # This is what python tell use to do
141 return NotImplemented
141 return NotImplemented
142 return self.name == other.name
142 return self.name == other.name
143
143
144 def __ne__(self, other):
144 def __ne__(self, other):
145 return not self == other
145 return not self == other
146
146
147 def __hash__(self):
147 def __hash__(self):
148 return hash(self.name)
148 return hash(self.name)
149
149
150 allformatvariant = []
150 allformatvariant = []
151
151
152 def registerformatvariant(cls):
152 def registerformatvariant(cls):
153 allformatvariant.append(cls)
153 allformatvariant.append(cls)
154 return cls
154 return cls
155
155
156 class formatvariant(improvement):
156 class formatvariant(improvement):
157 """an improvement subclass dedicated to repository format"""
157 """an improvement subclass dedicated to repository format"""
158 type = deficiency
158 type = deficiency
159 ### The following attributes should be defined for each class:
159 ### The following attributes should be defined for each class:
160
160
161 # machine-readable string uniquely identifying this improvement. it will be
161 # machine-readable string uniquely identifying this improvement. it will be
162 # mapped to an action later in the upgrade process.
162 # mapped to an action later in the upgrade process.
163 name = None
163 name = None
164
164
165 # message intended for humans explaining the improvement in more detail,
165 # message intended for humans explaining the improvement in more detail,
166 # including the implications of it ``deficiency`` types, should be worded
166 # including the implications of it ``deficiency`` types, should be worded
167 # in the present tense.
167 # in the present tense.
168 description = None
168 description = None
169
169
170 # message intended for humans explaining what an upgrade addressing this
170 # message intended for humans explaining what an upgrade addressing this
171 # issue will do. should be worded in the future tense.
171 # issue will do. should be worded in the future tense.
172 upgrademessage = None
172 upgrademessage = None
173
173
174 # value of current Mercurial default for new repository
174 # value of current Mercurial default for new repository
175 default = None
175 default = None
176
176
177 def __init__(self):
177 def __init__(self):
178 raise NotImplementedError()
178 raise NotImplementedError()
179
179
180 @staticmethod
180 @staticmethod
181 def fromrepo(repo):
181 def fromrepo(repo):
182 """current value of the variant in the repository"""
182 """current value of the variant in the repository"""
183 raise NotImplementedError()
183 raise NotImplementedError()
184
184
185 @staticmethod
185 @staticmethod
186 def fromconfig(repo):
186 def fromconfig(repo):
187 """current value of the variant in the configuration"""
187 """current value of the variant in the configuration"""
188 raise NotImplementedError()
188 raise NotImplementedError()
189
189
190 class requirementformatvariant(formatvariant):
190 class requirementformatvariant(formatvariant):
191 """formatvariant based on a 'requirement' name.
191 """formatvariant based on a 'requirement' name.
192
192
193 Many format variant are controlled by a 'requirement'. We define a small
193 Many format variant are controlled by a 'requirement'. We define a small
194 subclass to factor the code.
194 subclass to factor the code.
195 """
195 """
196
196
197 # the requirement that control this format variant
197 # the requirement that control this format variant
198 _requirement = None
198 _requirement = None
199
199
200 @staticmethod
200 @staticmethod
201 def _newreporequirements(ui):
201 def _newreporequirements(ui):
202 return localrepo.newreporequirements(ui)
202 return localrepo.newreporequirements(ui)
203
203
204 @classmethod
204 @classmethod
205 def fromrepo(cls, repo):
205 def fromrepo(cls, repo):
206 assert cls._requirement is not None
206 assert cls._requirement is not None
207 return cls._requirement in repo.requirements
207 return cls._requirement in repo.requirements
208
208
209 @classmethod
209 @classmethod
210 def fromconfig(cls, repo):
210 def fromconfig(cls, repo):
211 assert cls._requirement is not None
211 assert cls._requirement is not None
212 return cls._requirement in cls._newreporequirements(repo.ui)
212 return cls._requirement in cls._newreporequirements(repo.ui)
213
213
214 @registerformatvariant
214 @registerformatvariant
215 class fncache(requirementformatvariant):
215 class fncache(requirementformatvariant):
216 name = 'fncache'
216 name = 'fncache'
217
217
218 _requirement = 'fncache'
218 _requirement = 'fncache'
219
219
220 default = True
220 default = True
221
221
222 description = _('long and reserved filenames may not work correctly; '
222 description = _('long and reserved filenames may not work correctly; '
223 'repository performance is sub-optimal')
223 'repository performance is sub-optimal')
224
224
225 upgrademessage = _('repository will be more resilient to storing '
225 upgrademessage = _('repository will be more resilient to storing '
226 'certain paths and performance of certain '
226 'certain paths and performance of certain '
227 'operations should be improved')
227 'operations should be improved')
228
228
229 @registerformatvariant
229 @registerformatvariant
230 class dotencode(requirementformatvariant):
230 class dotencode(requirementformatvariant):
231 name = 'dotencode'
231 name = 'dotencode'
232
232
233 _requirement = 'dotencode'
233 _requirement = 'dotencode'
234
234
235 default = True
235 default = True
236
236
237 description = _('storage of filenames beginning with a period or '
237 description = _('storage of filenames beginning with a period or '
238 'space may not work correctly')
238 'space may not work correctly')
239
239
240 upgrademessage = _('repository will be better able to store files '
240 upgrademessage = _('repository will be better able to store files '
241 'beginning with a space or period')
241 'beginning with a space or period')
242
242
243 @registerformatvariant
243 @registerformatvariant
244 class generaldelta(requirementformatvariant):
244 class generaldelta(requirementformatvariant):
245 name = 'generaldelta'
245 name = 'generaldelta'
246
246
247 _requirement = 'generaldelta'
247 _requirement = 'generaldelta'
248
248
249 default = True
249 default = True
250
250
251 description = _('deltas within internal storage are unable to '
251 description = _('deltas within internal storage are unable to '
252 'choose optimal revisions; repository is larger and '
252 'choose optimal revisions; repository is larger and '
253 'slower than it could be; interaction with other '
253 'slower than it could be; interaction with other '
254 'repositories may require extra network and CPU '
254 'repositories may require extra network and CPU '
255 'resources, making "hg push" and "hg pull" slower')
255 'resources, making "hg push" and "hg pull" slower')
256
256
257 upgrademessage = _('repository storage will be able to create '
257 upgrademessage = _('repository storage will be able to create '
258 'optimal deltas; new repository data will be '
258 'optimal deltas; new repository data will be '
259 'smaller and read times should decrease; '
259 'smaller and read times should decrease; '
260 'interacting with other repositories using this '
260 'interacting with other repositories using this '
261 'storage model should require less network and '
261 'storage model should require less network and '
262 'CPU resources, making "hg push" and "hg pull" '
262 'CPU resources, making "hg push" and "hg pull" '
263 'faster')
263 'faster')
264
264
265 @registerformatvariant
265 @registerformatvariant
266 class sparserevlog(requirementformatvariant):
266 class sparserevlog(requirementformatvariant):
267 name = 'sparserevlog'
267 name = 'sparserevlog'
268
268
269 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
269 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
270
270
271 default = False
271 default = False
272
272
273 description = _('in order to limit disk reading and memory usage on older '
273 description = _('in order to limit disk reading and memory usage on older '
274 'version, the span of a delta chain from its root to its '
274 'version, the span of a delta chain from its root to its '
275 'end is limited, whatever the relevant data in this span. '
275 'end is limited, whatever the relevant data in this span. '
276 'This can severly limit Mercurial ability to build good '
276 'This can severly limit Mercurial ability to build good '
277 'chain of delta resulting is much more storage space being '
277 'chain of delta resulting is much more storage space being '
278 'taken and limit reusability of on disk delta during '
278 'taken and limit reusability of on disk delta during '
279 'exchange.'
279 'exchange.'
280 )
280 )
281
281
282 upgrademessage = _('Revlog supports delta chain with more unused data '
282 upgrademessage = _('Revlog supports delta chain with more unused data '
283 'between payload. These gaps will be skipped at read '
283 'between payload. These gaps will be skipped at read '
284 'time. This allows for better delta chains, making a '
284 'time. This allows for better delta chains, making a '
285 'better compression and faster exchange with server.')
285 'better compression and faster exchange with server.')
286
286
287 @registerformatvariant
287 @registerformatvariant
288 class removecldeltachain(formatvariant):
288 class removecldeltachain(formatvariant):
289 name = 'plain-cl-delta'
289 name = 'plain-cl-delta'
290
290
291 default = True
291 default = True
292
292
293 description = _('changelog storage is using deltas instead of '
293 description = _('changelog storage is using deltas instead of '
294 'raw entries; changelog reading and any '
294 'raw entries; changelog reading and any '
295 'operation relying on changelog data are slower '
295 'operation relying on changelog data are slower '
296 'than they could be')
296 'than they could be')
297
297
298 upgrademessage = _('changelog storage will be reformated to '
298 upgrademessage = _('changelog storage will be reformated to '
299 'store raw entries; changelog reading will be '
299 'store raw entries; changelog reading will be '
300 'faster; changelog size may be reduced')
300 'faster; changelog size may be reduced')
301
301
302 @staticmethod
302 @staticmethod
303 def fromrepo(repo):
303 def fromrepo(repo):
304 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
304 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
305 # changelogs with deltas.
305 # changelogs with deltas.
306 cl = repo.changelog
306 cl = repo.changelog
307 chainbase = cl.chainbase
307 chainbase = cl.chainbase
308 return all(rev == chainbase(rev) for rev in cl)
308 return all(rev == chainbase(rev) for rev in cl)
309
309
310 @staticmethod
310 @staticmethod
311 def fromconfig(repo):
311 def fromconfig(repo):
312 return True
312 return True
313
313
314 @registerformatvariant
314 @registerformatvariant
315 class compressionengine(formatvariant):
315 class compressionengine(formatvariant):
316 name = 'compression'
316 name = 'compression'
317 default = 'zlib'
317 default = 'zlib'
318
318
319 description = _('Compresion algorithm used to compress data. '
319 description = _('Compresion algorithm used to compress data. '
320 'Some engine are faster than other')
320 'Some engine are faster than other')
321
321
322 upgrademessage = _('revlog content will be recompressed with the new '
322 upgrademessage = _('revlog content will be recompressed with the new '
323 'algorithm.')
323 'algorithm.')
324
324
325 @classmethod
325 @classmethod
326 def fromrepo(cls, repo):
326 def fromrepo(cls, repo):
327 for req in repo.requirements:
327 for req in repo.requirements:
328 if req.startswith('exp-compression-'):
328 if req.startswith('exp-compression-'):
329 return req.split('-', 2)[2]
329 return req.split('-', 2)[2]
330 return 'zlib'
330 return 'zlib'
331
331
332 @classmethod
332 @classmethod
333 def fromconfig(cls, repo):
333 def fromconfig(cls, repo):
334 return repo.ui.config('experimental', 'format.compression')
334 return repo.ui.config('experimental', 'format.compression')
335
335
336 def finddeficiencies(repo):
336 def finddeficiencies(repo):
337 """returns a list of deficiencies that the repo suffer from"""
337 """returns a list of deficiencies that the repo suffer from"""
338 deficiencies = []
338 deficiencies = []
339
339
340 # We could detect lack of revlogv1 and store here, but they were added
340 # We could detect lack of revlogv1 and store here, but they were added
341 # in 0.9.2 and we don't support upgrading repos without these
341 # in 0.9.2 and we don't support upgrading repos without these
342 # requirements, so let's not bother.
342 # requirements, so let's not bother.
343
343
344 for fv in allformatvariant:
344 for fv in allformatvariant:
345 if not fv.fromrepo(repo):
345 if not fv.fromrepo(repo):
346 deficiencies.append(fv)
346 deficiencies.append(fv)
347
347
348 return deficiencies
348 return deficiencies
349
349
350 def findoptimizations(repo):
350 def findoptimizations(repo):
351 """Determine optimisation that could be used during upgrade"""
351 """Determine optimisation that could be used during upgrade"""
352 # These are unconditionally added. There is logic later that figures out
352 # These are unconditionally added. There is logic later that figures out
353 # which ones to apply.
353 # which ones to apply.
354 optimizations = []
354 optimizations = []
355
355
356 optimizations.append(improvement(
356 optimizations.append(improvement(
357 name='redeltaparent',
357 name='redeltaparent',
358 type=optimisation,
358 type=optimisation,
359 description=_('deltas within internal storage will be recalculated to '
359 description=_('deltas within internal storage will be recalculated to '
360 'choose an optimal base revision where this was not '
360 'choose an optimal base revision where this was not '
361 'already done; the size of the repository may shrink and '
361 'already done; the size of the repository may shrink and '
362 'various operations may become faster; the first time '
362 'various operations may become faster; the first time '
363 'this optimization is performed could slow down upgrade '
363 'this optimization is performed could slow down upgrade '
364 'execution considerably; subsequent invocations should '
364 'execution considerably; subsequent invocations should '
365 'not run noticeably slower'),
365 'not run noticeably slower'),
366 upgrademessage=_('deltas within internal storage will choose a new '
366 upgrademessage=_('deltas within internal storage will choose a new '
367 'base revision if needed')))
367 'base revision if needed')))
368
368
369 optimizations.append(improvement(
369 optimizations.append(improvement(
370 name='redeltamultibase',
370 name='redeltamultibase',
371 type=optimisation,
371 type=optimisation,
372 description=_('deltas within internal storage will be recalculated '
372 description=_('deltas within internal storage will be recalculated '
373 'against multiple base revision and the smallest '
373 'against multiple base revision and the smallest '
374 'difference will be used; the size of the repository may '
374 'difference will be used; the size of the repository may '
375 'shrink significantly when there are many merges; this '
375 'shrink significantly when there are many merges; this '
376 'optimization will slow down execution in proportion to '
376 'optimization will slow down execution in proportion to '
377 'the number of merges in the repository and the amount '
377 'the number of merges in the repository and the amount '
378 'of files in the repository; this slow down should not '
378 'of files in the repository; this slow down should not '
379 'be significant unless there are tens of thousands of '
379 'be significant unless there are tens of thousands of '
380 'files and thousands of merges'),
380 'files and thousands of merges'),
381 upgrademessage=_('deltas within internal storage will choose an '
381 upgrademessage=_('deltas within internal storage will choose an '
382 'optimal delta by computing deltas against multiple '
382 'optimal delta by computing deltas against multiple '
383 'parents; may slow down execution time '
383 'parents; may slow down execution time '
384 'significantly')))
384 'significantly')))
385
385
386 optimizations.append(improvement(
386 optimizations.append(improvement(
387 name='redeltaall',
387 name='redeltaall',
388 type=optimisation,
388 type=optimisation,
389 description=_('deltas within internal storage will always be '
389 description=_('deltas within internal storage will always be '
390 'recalculated without reusing prior deltas; this will '
390 'recalculated without reusing prior deltas; this will '
391 'likely make execution run several times slower; this '
391 'likely make execution run several times slower; this '
392 'optimization is typically not needed'),
392 'optimization is typically not needed'),
393 upgrademessage=_('deltas within internal storage will be fully '
393 upgrademessage=_('deltas within internal storage will be fully '
394 'recomputed; this will likely drastically slow down '
394 'recomputed; this will likely drastically slow down '
395 'execution time')))
395 'execution time')))
396
396
397 optimizations.append(improvement(
397 optimizations.append(improvement(
398 name='redeltafulladd',
398 name='redeltafulladd',
399 type=optimisation,
399 type=optimisation,
400 description=_('every revision will be re-added as if it was new '
400 description=_('every revision will be re-added as if it was new '
401 'content. It will go through the full storage '
401 'content. It will go through the full storage '
402 'mechanism giving extensions a chance to process it '
402 'mechanism giving extensions a chance to process it '
403 '(eg. lfs). This is similar to "redeltaall" but even '
403 '(eg. lfs). This is similar to "redeltaall" but even '
404 'slower since more logic is involved.'),
404 'slower since more logic is involved.'),
405 upgrademessage=_('each revision will be added as new content to the '
405 upgrademessage=_('each revision will be added as new content to the '
406 'internal storage; this will likely drastically slow '
406 'internal storage; this will likely drastically slow '
407 'down execution time, but some extensions might need '
407 'down execution time, but some extensions might need '
408 'it')))
408 'it')))
409
409
410 return optimizations
410 return optimizations
411
411
412 def determineactions(repo, deficiencies, sourcereqs, destreqs):
412 def determineactions(repo, deficiencies, sourcereqs, destreqs):
413 """Determine upgrade actions that will be performed.
413 """Determine upgrade actions that will be performed.
414
414
415 Given a list of improvements as returned by ``finddeficiencies`` and
415 Given a list of improvements as returned by ``finddeficiencies`` and
416 ``findoptimizations``, determine the list of upgrade actions that
416 ``findoptimizations``, determine the list of upgrade actions that
417 will be performed.
417 will be performed.
418
418
419 The role of this function is to filter improvements if needed, apply
419 The role of this function is to filter improvements if needed, apply
420 recommended optimizations from the improvements list that make sense,
420 recommended optimizations from the improvements list that make sense,
421 etc.
421 etc.
422
422
423 Returns a list of action names.
423 Returns a list of action names.
424 """
424 """
425 newactions = []
425 newactions = []
426
426
427 knownreqs = supporteddestrequirements(repo)
427 knownreqs = supporteddestrequirements(repo)
428
428
429 for d in deficiencies:
429 for d in deficiencies:
430 name = d.name
430 name = d.name
431
431
432 # If the action is a requirement that doesn't show up in the
432 # If the action is a requirement that doesn't show up in the
433 # destination requirements, prune the action.
433 # destination requirements, prune the action.
434 if name in knownreqs and name not in destreqs:
434 if name in knownreqs and name not in destreqs:
435 continue
435 continue
436
436
437 newactions.append(d)
437 newactions.append(d)
438
438
439 # FUTURE consider adding some optimizations here for certain transitions.
439 # FUTURE consider adding some optimizations here for certain transitions.
440 # e.g. adding generaldelta could schedule parent redeltas.
440 # e.g. adding generaldelta could schedule parent redeltas.
441
441
442 return newactions
442 return newactions
443
443
444 def _revlogfrompath(repo, path):
444 def _revlogfrompath(repo, path):
445 """Obtain a revlog from a repo path.
445 """Obtain a revlog from a repo path.
446
446
447 An instance of the appropriate class is returned.
447 An instance of the appropriate class is returned.
448 """
448 """
449 if path == '00changelog.i':
449 if path == '00changelog.i':
450 return changelog.changelog(repo.svfs)
450 return changelog.changelog(repo.svfs)
451 elif path.endswith('00manifest.i'):
451 elif path.endswith('00manifest.i'):
452 mandir = path[:-len('00manifest.i')]
452 mandir = path[:-len('00manifest.i')]
453 return manifest.manifestrevlog(repo.svfs, tree=mandir)
453 return manifest.manifestrevlog(repo.svfs, tree=mandir)
454 else:
454 else:
455 #reverse of "/".join(("data", path + ".i"))
455 #reverse of "/".join(("data", path + ".i"))
456 return filelog.filelog(repo.svfs, path[5:-2])
456 return filelog.filelog(repo.svfs, path[5:-2])
457
457
458 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, deltabothparents):
458 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, deltabothparents):
459 """Copy revlogs between 2 repos."""
459 """Copy revlogs between 2 repos."""
460 revcount = 0
460 revcount = 0
461 srcsize = 0
461 srcsize = 0
462 srcrawsize = 0
462 srcrawsize = 0
463 dstsize = 0
463 dstsize = 0
464 fcount = 0
464 fcount = 0
465 frevcount = 0
465 frevcount = 0
466 fsrcsize = 0
466 fsrcsize = 0
467 frawsize = 0
467 frawsize = 0
468 fdstsize = 0
468 fdstsize = 0
469 mcount = 0
469 mcount = 0
470 mrevcount = 0
470 mrevcount = 0
471 msrcsize = 0
471 msrcsize = 0
472 mrawsize = 0
472 mrawsize = 0
473 mdstsize = 0
473 mdstsize = 0
474 crevcount = 0
474 crevcount = 0
475 csrcsize = 0
475 csrcsize = 0
476 crawsize = 0
476 crawsize = 0
477 cdstsize = 0
477 cdstsize = 0
478
478
479 # Perform a pass to collect metadata. This validates we can open all
479 # Perform a pass to collect metadata. This validates we can open all
480 # source files and allows a unified progress bar to be displayed.
480 # source files and allows a unified progress bar to be displayed.
481 for unencoded, encoded, size in srcrepo.store.walk():
481 for unencoded, encoded, size in srcrepo.store.walk():
482 if unencoded.endswith('.d'):
482 if unencoded.endswith('.d'):
483 continue
483 continue
484
484
485 rl = _revlogfrompath(srcrepo, unencoded)
485 rl = _revlogfrompath(srcrepo, unencoded)
486 revcount += len(rl)
486 revcount += len(rl)
487
487
488 datasize = 0
488 datasize = 0
489 rawsize = 0
489 rawsize = 0
490
490
491 for path in rl.files():
491 for path in rl.files():
492 datasize += rl.opener.stat(path).st_size
492 datasize += rl.opener.stat(path).st_size
493
493
494 idx = rl.index
494 rawsize += sum(map(rl.rawsize, iter(rl)))
495 for rev in rl:
496 e = idx[rev]
497 rawsize += e[2]
498
495
499 srcsize += datasize
496 srcsize += datasize
500 srcrawsize += rawsize
497 srcrawsize += rawsize
501
498
502 # This is for the separate progress bars.
499 # This is for the separate progress bars.
503 if isinstance(rl, changelog.changelog):
500 if isinstance(rl, changelog.changelog):
504 crevcount += len(rl)
501 crevcount += len(rl)
505 csrcsize += datasize
502 csrcsize += datasize
506 crawsize += rawsize
503 crawsize += rawsize
507 elif isinstance(rl, manifest.manifestrevlog):
504 elif isinstance(rl, manifest.manifestrevlog):
508 mcount += 1
505 mcount += 1
509 mrevcount += len(rl)
506 mrevcount += len(rl)
510 msrcsize += datasize
507 msrcsize += datasize
511 mrawsize += rawsize
508 mrawsize += rawsize
512 elif isinstance(rl, filelog.filelog):
509 elif isinstance(rl, filelog.filelog):
513 fcount += 1
510 fcount += 1
514 frevcount += len(rl)
511 frevcount += len(rl)
515 fsrcsize += datasize
512 fsrcsize += datasize
516 frawsize += rawsize
513 frawsize += rawsize
517 else:
514 else:
518 error.ProgrammingError('unknown revlog type')
515 error.ProgrammingError('unknown revlog type')
519
516
520 if not revcount:
517 if not revcount:
521 return
518 return
522
519
523 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
520 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
524 '%d in changelog)\n') %
521 '%d in changelog)\n') %
525 (revcount, frevcount, mrevcount, crevcount))
522 (revcount, frevcount, mrevcount, crevcount))
526 ui.write(_('migrating %s in store; %s tracked data\n') % (
523 ui.write(_('migrating %s in store; %s tracked data\n') % (
527 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
524 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
528
525
529 # Used to keep track of progress.
526 # Used to keep track of progress.
530 progress = None
527 progress = None
531 def oncopiedrevision(rl, rev, node):
528 def oncopiedrevision(rl, rev, node):
532 progress.increment()
529 progress.increment()
533
530
534 # Do the actual copying.
531 # Do the actual copying.
535 # FUTURE this operation can be farmed off to worker processes.
532 # FUTURE this operation can be farmed off to worker processes.
536 seen = set()
533 seen = set()
537 for unencoded, encoded, size in srcrepo.store.walk():
534 for unencoded, encoded, size in srcrepo.store.walk():
538 if unencoded.endswith('.d'):
535 if unencoded.endswith('.d'):
539 continue
536 continue
540
537
541 oldrl = _revlogfrompath(srcrepo, unencoded)
538 oldrl = _revlogfrompath(srcrepo, unencoded)
542 newrl = _revlogfrompath(dstrepo, unencoded)
539 newrl = _revlogfrompath(dstrepo, unencoded)
543
540
544 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
541 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
545 ui.write(_('finished migrating %d manifest revisions across %d '
542 ui.write(_('finished migrating %d manifest revisions across %d '
546 'manifests; change in size: %s\n') %
543 'manifests; change in size: %s\n') %
547 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
544 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
548
545
549 ui.write(_('migrating changelog containing %d revisions '
546 ui.write(_('migrating changelog containing %d revisions '
550 '(%s in store; %s tracked data)\n') %
547 '(%s in store; %s tracked data)\n') %
551 (crevcount, util.bytecount(csrcsize),
548 (crevcount, util.bytecount(csrcsize),
552 util.bytecount(crawsize)))
549 util.bytecount(crawsize)))
553 seen.add('c')
550 seen.add('c')
554 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
551 progress = srcrepo.ui.makeprogress(_('changelog revisions'),
555 total=crevcount)
552 total=crevcount)
556 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
553 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
557 ui.write(_('finished migrating %d filelog revisions across %d '
554 ui.write(_('finished migrating %d filelog revisions across %d '
558 'filelogs; change in size: %s\n') %
555 'filelogs; change in size: %s\n') %
559 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
556 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
560
557
561 ui.write(_('migrating %d manifests containing %d revisions '
558 ui.write(_('migrating %d manifests containing %d revisions '
562 '(%s in store; %s tracked data)\n') %
559 '(%s in store; %s tracked data)\n') %
563 (mcount, mrevcount, util.bytecount(msrcsize),
560 (mcount, mrevcount, util.bytecount(msrcsize),
564 util.bytecount(mrawsize)))
561 util.bytecount(mrawsize)))
565 seen.add('m')
562 seen.add('m')
566 if progress:
563 if progress:
567 progress.complete()
564 progress.complete()
568 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
565 progress = srcrepo.ui.makeprogress(_('manifest revisions'),
569 total=mrevcount)
566 total=mrevcount)
570 elif 'f' not in seen:
567 elif 'f' not in seen:
571 ui.write(_('migrating %d filelogs containing %d revisions '
568 ui.write(_('migrating %d filelogs containing %d revisions '
572 '(%s in store; %s tracked data)\n') %
569 '(%s in store; %s tracked data)\n') %
573 (fcount, frevcount, util.bytecount(fsrcsize),
570 (fcount, frevcount, util.bytecount(fsrcsize),
574 util.bytecount(frawsize)))
571 util.bytecount(frawsize)))
575 seen.add('f')
572 seen.add('f')
576 if progress:
573 if progress:
577 progress.complete()
574 progress.complete()
578 progress = srcrepo.ui.makeprogress(_('file revisions'),
575 progress = srcrepo.ui.makeprogress(_('file revisions'),
579 total=frevcount)
576 total=frevcount)
580
577
581
578
582 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
579 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
583 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
580 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
584 deltareuse=deltareuse,
581 deltareuse=deltareuse,
585 deltabothparents=deltabothparents)
582 deltabothparents=deltabothparents)
586
583
587 datasize = 0
584 datasize = 0
588 for path in newrl.files():
585 for path in newrl.files():
589 datasize += newrl.opener.stat(path).st_size
586 datasize += newrl.opener.stat(path).st_size
590
587
591 dstsize += datasize
588 dstsize += datasize
592
589
593 if isinstance(newrl, changelog.changelog):
590 if isinstance(newrl, changelog.changelog):
594 cdstsize += datasize
591 cdstsize += datasize
595 elif isinstance(newrl, manifest.manifestrevlog):
592 elif isinstance(newrl, manifest.manifestrevlog):
596 mdstsize += datasize
593 mdstsize += datasize
597 else:
594 else:
598 fdstsize += datasize
595 fdstsize += datasize
599
596
600 progress.complete()
597 progress.complete()
601
598
602 ui.write(_('finished migrating %d changelog revisions; change in size: '
599 ui.write(_('finished migrating %d changelog revisions; change in size: '
603 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
600 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
604
601
605 ui.write(_('finished migrating %d total revisions; total change in store '
602 ui.write(_('finished migrating %d total revisions; total change in store '
606 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
603 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
607
604
608 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
605 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
609 """Determine whether to copy a store file during upgrade.
606 """Determine whether to copy a store file during upgrade.
610
607
611 This function is called when migrating store files from ``srcrepo`` to
608 This function is called when migrating store files from ``srcrepo`` to
612 ``dstrepo`` as part of upgrading a repository.
609 ``dstrepo`` as part of upgrading a repository.
613
610
614 Args:
611 Args:
615 srcrepo: repo we are copying from
612 srcrepo: repo we are copying from
616 dstrepo: repo we are copying to
613 dstrepo: repo we are copying to
617 requirements: set of requirements for ``dstrepo``
614 requirements: set of requirements for ``dstrepo``
618 path: store file being examined
615 path: store file being examined
619 mode: the ``ST_MODE`` file type of ``path``
616 mode: the ``ST_MODE`` file type of ``path``
620 st: ``stat`` data structure for ``path``
617 st: ``stat`` data structure for ``path``
621
618
622 Function should return ``True`` if the file is to be copied.
619 Function should return ``True`` if the file is to be copied.
623 """
620 """
624 # Skip revlogs.
621 # Skip revlogs.
625 if path.endswith(('.i', '.d')):
622 if path.endswith(('.i', '.d')):
626 return False
623 return False
627 # Skip transaction related files.
624 # Skip transaction related files.
628 if path.startswith('undo'):
625 if path.startswith('undo'):
629 return False
626 return False
630 # Only copy regular files.
627 # Only copy regular files.
631 if mode != stat.S_IFREG:
628 if mode != stat.S_IFREG:
632 return False
629 return False
633 # Skip other skipped files.
630 # Skip other skipped files.
634 if path in ('lock', 'fncache'):
631 if path in ('lock', 'fncache'):
635 return False
632 return False
636
633
637 return True
634 return True
638
635
639 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
636 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
640 """Hook point for extensions to perform additional actions during upgrade.
637 """Hook point for extensions to perform additional actions during upgrade.
641
638
642 This function is called after revlogs and store files have been copied but
639 This function is called after revlogs and store files have been copied but
643 before the new store is swapped into the original location.
640 before the new store is swapped into the original location.
644 """
641 """
645
642
646 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
643 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
647 """Do the low-level work of upgrading a repository.
644 """Do the low-level work of upgrading a repository.
648
645
649 The upgrade is effectively performed as a copy between a source
646 The upgrade is effectively performed as a copy between a source
650 repository and a temporary destination repository.
647 repository and a temporary destination repository.
651
648
652 The source repository is unmodified for as long as possible so the
649 The source repository is unmodified for as long as possible so the
653 upgrade can abort at any time without causing loss of service for
650 upgrade can abort at any time without causing loss of service for
654 readers and without corrupting the source repository.
651 readers and without corrupting the source repository.
655 """
652 """
656 assert srcrepo.currentwlock()
653 assert srcrepo.currentwlock()
657 assert dstrepo.currentwlock()
654 assert dstrepo.currentwlock()
658
655
659 ui.write(_('(it is safe to interrupt this process any time before '
656 ui.write(_('(it is safe to interrupt this process any time before '
660 'data migration completes)\n'))
657 'data migration completes)\n'))
661
658
662 if 'redeltaall' in actions:
659 if 'redeltaall' in actions:
663 deltareuse = revlog.revlog.DELTAREUSENEVER
660 deltareuse = revlog.revlog.DELTAREUSENEVER
664 elif 'redeltaparent' in actions:
661 elif 'redeltaparent' in actions:
665 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
662 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
666 elif 'redeltamultibase' in actions:
663 elif 'redeltamultibase' in actions:
667 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
664 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
668 elif 'redeltafulladd' in actions:
665 elif 'redeltafulladd' in actions:
669 deltareuse = revlog.revlog.DELTAREUSEFULLADD
666 deltareuse = revlog.revlog.DELTAREUSEFULLADD
670 else:
667 else:
671 deltareuse = revlog.revlog.DELTAREUSEALWAYS
668 deltareuse = revlog.revlog.DELTAREUSEALWAYS
672
669
673 with dstrepo.transaction('upgrade') as tr:
670 with dstrepo.transaction('upgrade') as tr:
674 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
671 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
675 'redeltamultibase' in actions)
672 'redeltamultibase' in actions)
676
673
677 # Now copy other files in the store directory.
674 # Now copy other files in the store directory.
678 # The sorted() makes execution deterministic.
675 # The sorted() makes execution deterministic.
679 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
676 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
680 if not _filterstorefile(srcrepo, dstrepo, requirements,
677 if not _filterstorefile(srcrepo, dstrepo, requirements,
681 p, kind, st):
678 p, kind, st):
682 continue
679 continue
683
680
684 srcrepo.ui.write(_('copying %s\n') % p)
681 srcrepo.ui.write(_('copying %s\n') % p)
685 src = srcrepo.store.rawvfs.join(p)
682 src = srcrepo.store.rawvfs.join(p)
686 dst = dstrepo.store.rawvfs.join(p)
683 dst = dstrepo.store.rawvfs.join(p)
687 util.copyfile(src, dst, copystat=True)
684 util.copyfile(src, dst, copystat=True)
688
685
689 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
686 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
690
687
691 ui.write(_('data fully migrated to temporary repository\n'))
688 ui.write(_('data fully migrated to temporary repository\n'))
692
689
693 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
690 backuppath = pycompat.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
694 backupvfs = vfsmod.vfs(backuppath)
691 backupvfs = vfsmod.vfs(backuppath)
695
692
696 # Make a backup of requires file first, as it is the first to be modified.
693 # Make a backup of requires file first, as it is the first to be modified.
697 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
694 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
698
695
699 # We install an arbitrary requirement that clients must not support
696 # We install an arbitrary requirement that clients must not support
700 # as a mechanism to lock out new clients during the data swap. This is
697 # as a mechanism to lock out new clients during the data swap. This is
701 # better than allowing a client to continue while the repository is in
698 # better than allowing a client to continue while the repository is in
702 # an inconsistent state.
699 # an inconsistent state.
703 ui.write(_('marking source repository as being upgraded; clients will be '
700 ui.write(_('marking source repository as being upgraded; clients will be '
704 'unable to read from repository\n'))
701 'unable to read from repository\n'))
705 scmutil.writerequires(srcrepo.vfs,
702 scmutil.writerequires(srcrepo.vfs,
706 srcrepo.requirements | {'upgradeinprogress'})
703 srcrepo.requirements | {'upgradeinprogress'})
707
704
708 ui.write(_('starting in-place swap of repository data\n'))
705 ui.write(_('starting in-place swap of repository data\n'))
709 ui.write(_('replaced files will be backed up at %s\n') %
706 ui.write(_('replaced files will be backed up at %s\n') %
710 backuppath)
707 backuppath)
711
708
712 # Now swap in the new store directory. Doing it as a rename should make
709 # Now swap in the new store directory. Doing it as a rename should make
713 # the operation nearly instantaneous and atomic (at least in well-behaved
710 # the operation nearly instantaneous and atomic (at least in well-behaved
714 # environments).
711 # environments).
715 ui.write(_('replacing store...\n'))
712 ui.write(_('replacing store...\n'))
716 tstart = util.timer()
713 tstart = util.timer()
717 util.rename(srcrepo.spath, backupvfs.join('store'))
714 util.rename(srcrepo.spath, backupvfs.join('store'))
718 util.rename(dstrepo.spath, srcrepo.spath)
715 util.rename(dstrepo.spath, srcrepo.spath)
719 elapsed = util.timer() - tstart
716 elapsed = util.timer() - tstart
720 ui.write(_('store replacement complete; repository was inconsistent for '
717 ui.write(_('store replacement complete; repository was inconsistent for '
721 '%0.1fs\n') % elapsed)
718 '%0.1fs\n') % elapsed)
722
719
723 # We first write the requirements file. Any new requirements will lock
720 # We first write the requirements file. Any new requirements will lock
724 # out legacy clients.
721 # out legacy clients.
725 ui.write(_('finalizing requirements file and making repository readable '
722 ui.write(_('finalizing requirements file and making repository readable '
726 'again\n'))
723 'again\n'))
727 scmutil.writerequires(srcrepo.vfs, requirements)
724 scmutil.writerequires(srcrepo.vfs, requirements)
728
725
729 # The lock file from the old store won't be removed because nothing has a
726 # The lock file from the old store won't be removed because nothing has a
730 # reference to its new location. So clean it up manually. Alternatively, we
727 # reference to its new location. So clean it up manually. Alternatively, we
731 # could update srcrepo.svfs and other variables to point to the new
728 # could update srcrepo.svfs and other variables to point to the new
732 # location. This is simpler.
729 # location. This is simpler.
733 backupvfs.unlink('store/lock')
730 backupvfs.unlink('store/lock')
734
731
735 return backuppath
732 return backuppath
736
733
737 def upgraderepo(ui, repo, run=False, optimize=None):
734 def upgraderepo(ui, repo, run=False, optimize=None):
738 """Upgrade a repository in place."""
735 """Upgrade a repository in place."""
739 optimize = set(optimize or [])
736 optimize = set(optimize or [])
740 repo = repo.unfiltered()
737 repo = repo.unfiltered()
741
738
742 # Ensure the repository can be upgraded.
739 # Ensure the repository can be upgraded.
743 missingreqs = requiredsourcerequirements(repo) - repo.requirements
740 missingreqs = requiredsourcerequirements(repo) - repo.requirements
744 if missingreqs:
741 if missingreqs:
745 raise error.Abort(_('cannot upgrade repository; requirement '
742 raise error.Abort(_('cannot upgrade repository; requirement '
746 'missing: %s') % _(', ').join(sorted(missingreqs)))
743 'missing: %s') % _(', ').join(sorted(missingreqs)))
747
744
748 blockedreqs = blocksourcerequirements(repo) & repo.requirements
745 blockedreqs = blocksourcerequirements(repo) & repo.requirements
749 if blockedreqs:
746 if blockedreqs:
750 raise error.Abort(_('cannot upgrade repository; unsupported source '
747 raise error.Abort(_('cannot upgrade repository; unsupported source '
751 'requirement: %s') %
748 'requirement: %s') %
752 _(', ').join(sorted(blockedreqs)))
749 _(', ').join(sorted(blockedreqs)))
753
750
754 # FUTURE there is potentially a need to control the wanted requirements via
751 # FUTURE there is potentially a need to control the wanted requirements via
755 # command arguments or via an extension hook point.
752 # command arguments or via an extension hook point.
756 newreqs = localrepo.newreporequirements(repo.ui)
753 newreqs = localrepo.newreporequirements(repo.ui)
757 newreqs.update(preservedrequirements(repo))
754 newreqs.update(preservedrequirements(repo))
758
755
759 noremovereqs = (repo.requirements - newreqs -
756 noremovereqs = (repo.requirements - newreqs -
760 supportremovedrequirements(repo))
757 supportremovedrequirements(repo))
761 if noremovereqs:
758 if noremovereqs:
762 raise error.Abort(_('cannot upgrade repository; requirement would be '
759 raise error.Abort(_('cannot upgrade repository; requirement would be '
763 'removed: %s') % _(', ').join(sorted(noremovereqs)))
760 'removed: %s') % _(', ').join(sorted(noremovereqs)))
764
761
765 noaddreqs = (newreqs - repo.requirements -
762 noaddreqs = (newreqs - repo.requirements -
766 allowednewrequirements(repo))
763 allowednewrequirements(repo))
767 if noaddreqs:
764 if noaddreqs:
768 raise error.Abort(_('cannot upgrade repository; do not support adding '
765 raise error.Abort(_('cannot upgrade repository; do not support adding '
769 'requirement: %s') %
766 'requirement: %s') %
770 _(', ').join(sorted(noaddreqs)))
767 _(', ').join(sorted(noaddreqs)))
771
768
772 unsupportedreqs = newreqs - supporteddestrequirements(repo)
769 unsupportedreqs = newreqs - supporteddestrequirements(repo)
773 if unsupportedreqs:
770 if unsupportedreqs:
774 raise error.Abort(_('cannot upgrade repository; do not support '
771 raise error.Abort(_('cannot upgrade repository; do not support '
775 'destination requirement: %s') %
772 'destination requirement: %s') %
776 _(', ').join(sorted(unsupportedreqs)))
773 _(', ').join(sorted(unsupportedreqs)))
777
774
778 # Find and validate all improvements that can be made.
775 # Find and validate all improvements that can be made.
779 alloptimizations = findoptimizations(repo)
776 alloptimizations = findoptimizations(repo)
780
777
781 # Apply and Validate arguments.
778 # Apply and Validate arguments.
782 optimizations = []
779 optimizations = []
783 for o in alloptimizations:
780 for o in alloptimizations:
784 if o.name in optimize:
781 if o.name in optimize:
785 optimizations.append(o)
782 optimizations.append(o)
786 optimize.discard(o.name)
783 optimize.discard(o.name)
787
784
788 if optimize: # anything left is unknown
785 if optimize: # anything left is unknown
789 raise error.Abort(_('unknown optimization action requested: %s') %
786 raise error.Abort(_('unknown optimization action requested: %s') %
790 ', '.join(sorted(optimize)),
787 ', '.join(sorted(optimize)),
791 hint=_('run without arguments to see valid '
788 hint=_('run without arguments to see valid '
792 'optimizations'))
789 'optimizations'))
793
790
794 deficiencies = finddeficiencies(repo)
791 deficiencies = finddeficiencies(repo)
795 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
792 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
796 actions.extend(o for o in sorted(optimizations)
793 actions.extend(o for o in sorted(optimizations)
797 # determineactions could have added optimisation
794 # determineactions could have added optimisation
798 if o not in actions)
795 if o not in actions)
799
796
800 def printrequirements():
797 def printrequirements():
801 ui.write(_('requirements\n'))
798 ui.write(_('requirements\n'))
802 ui.write(_(' preserved: %s\n') %
799 ui.write(_(' preserved: %s\n') %
803 _(', ').join(sorted(newreqs & repo.requirements)))
800 _(', ').join(sorted(newreqs & repo.requirements)))
804
801
805 if repo.requirements - newreqs:
802 if repo.requirements - newreqs:
806 ui.write(_(' removed: %s\n') %
803 ui.write(_(' removed: %s\n') %
807 _(', ').join(sorted(repo.requirements - newreqs)))
804 _(', ').join(sorted(repo.requirements - newreqs)))
808
805
809 if newreqs - repo.requirements:
806 if newreqs - repo.requirements:
810 ui.write(_(' added: %s\n') %
807 ui.write(_(' added: %s\n') %
811 _(', ').join(sorted(newreqs - repo.requirements)))
808 _(', ').join(sorted(newreqs - repo.requirements)))
812
809
813 ui.write('\n')
810 ui.write('\n')
814
811
815 def printupgradeactions():
812 def printupgradeactions():
816 for a in actions:
813 for a in actions:
817 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
814 ui.write('%s\n %s\n\n' % (a.name, a.upgrademessage))
818
815
819 if not run:
816 if not run:
820 fromconfig = []
817 fromconfig = []
821 onlydefault = []
818 onlydefault = []
822
819
823 for d in deficiencies:
820 for d in deficiencies:
824 if d.fromconfig(repo):
821 if d.fromconfig(repo):
825 fromconfig.append(d)
822 fromconfig.append(d)
826 elif d.default:
823 elif d.default:
827 onlydefault.append(d)
824 onlydefault.append(d)
828
825
829 if fromconfig or onlydefault:
826 if fromconfig or onlydefault:
830
827
831 if fromconfig:
828 if fromconfig:
832 ui.write(_('repository lacks features recommended by '
829 ui.write(_('repository lacks features recommended by '
833 'current config options:\n\n'))
830 'current config options:\n\n'))
834 for i in fromconfig:
831 for i in fromconfig:
835 ui.write('%s\n %s\n\n' % (i.name, i.description))
832 ui.write('%s\n %s\n\n' % (i.name, i.description))
836
833
837 if onlydefault:
834 if onlydefault:
838 ui.write(_('repository lacks features used by the default '
835 ui.write(_('repository lacks features used by the default '
839 'config options:\n\n'))
836 'config options:\n\n'))
840 for i in onlydefault:
837 for i in onlydefault:
841 ui.write('%s\n %s\n\n' % (i.name, i.description))
838 ui.write('%s\n %s\n\n' % (i.name, i.description))
842
839
843 ui.write('\n')
840 ui.write('\n')
844 else:
841 else:
845 ui.write(_('(no feature deficiencies found in existing '
842 ui.write(_('(no feature deficiencies found in existing '
846 'repository)\n'))
843 'repository)\n'))
847
844
848 ui.write(_('performing an upgrade with "--run" will make the following '
845 ui.write(_('performing an upgrade with "--run" will make the following '
849 'changes:\n\n'))
846 'changes:\n\n'))
850
847
851 printrequirements()
848 printrequirements()
852 printupgradeactions()
849 printupgradeactions()
853
850
854 unusedoptimize = [i for i in alloptimizations if i not in actions]
851 unusedoptimize = [i for i in alloptimizations if i not in actions]
855
852
856 if unusedoptimize:
853 if unusedoptimize:
857 ui.write(_('additional optimizations are available by specifying '
854 ui.write(_('additional optimizations are available by specifying '
858 '"--optimize <name>":\n\n'))
855 '"--optimize <name>":\n\n'))
859 for i in unusedoptimize:
856 for i in unusedoptimize:
860 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
857 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
861 return
858 return
862
859
863 # Else we're in the run=true case.
860 # Else we're in the run=true case.
864 ui.write(_('upgrade will perform the following actions:\n\n'))
861 ui.write(_('upgrade will perform the following actions:\n\n'))
865 printrequirements()
862 printrequirements()
866 printupgradeactions()
863 printupgradeactions()
867
864
868 upgradeactions = [a.name for a in actions]
865 upgradeactions = [a.name for a in actions]
869
866
870 ui.write(_('beginning upgrade...\n'))
867 ui.write(_('beginning upgrade...\n'))
871 with repo.wlock(), repo.lock():
868 with repo.wlock(), repo.lock():
872 ui.write(_('repository locked and read-only\n'))
869 ui.write(_('repository locked and read-only\n'))
873 # Our strategy for upgrading the repository is to create a new,
870 # Our strategy for upgrading the repository is to create a new,
874 # temporary repository, write data to it, then do a swap of the
871 # temporary repository, write data to it, then do a swap of the
875 # data. There are less heavyweight ways to do this, but it is easier
872 # data. There are less heavyweight ways to do this, but it is easier
876 # to create a new repo object than to instantiate all the components
873 # to create a new repo object than to instantiate all the components
877 # (like the store) separately.
874 # (like the store) separately.
878 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
875 tmppath = pycompat.mkdtemp(prefix='upgrade.', dir=repo.path)
879 backuppath = None
876 backuppath = None
880 try:
877 try:
881 ui.write(_('creating temporary repository to stage migrated '
878 ui.write(_('creating temporary repository to stage migrated '
882 'data: %s\n') % tmppath)
879 'data: %s\n') % tmppath)
883
880
884 # clone ui without using ui.copy because repo.ui is protected
881 # clone ui without using ui.copy because repo.ui is protected
885 repoui = repo.ui.__class__(repo.ui)
882 repoui = repo.ui.__class__(repo.ui)
886 dstrepo = hg.repository(repoui, path=tmppath, create=True)
883 dstrepo = hg.repository(repoui, path=tmppath, create=True)
887
884
888 with dstrepo.wlock(), dstrepo.lock():
885 with dstrepo.wlock(), dstrepo.lock():
889 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
886 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
890 upgradeactions)
887 upgradeactions)
891
888
892 finally:
889 finally:
893 ui.write(_('removing temporary repository %s\n') % tmppath)
890 ui.write(_('removing temporary repository %s\n') % tmppath)
894 repo.vfs.rmtree(tmppath, forcibly=True)
891 repo.vfs.rmtree(tmppath, forcibly=True)
895
892
896 if backuppath:
893 if backuppath:
897 ui.warn(_('copy of old repository backed up at %s\n') %
894 ui.warn(_('copy of old repository backed up at %s\n') %
898 backuppath)
895 backuppath)
899 ui.warn(_('the old repository will not be deleted; remove '
896 ui.warn(_('the old repository will not be deleted; remove '
900 'it to free up disk space once the upgraded '
897 'it to free up disk space once the upgraded '
901 'repository is verified\n'))
898 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now