Show More
@@ -1,1101 +1,1102 b'' | |||||
1 | # repair.py - functions for repository repair for mercurial |
|
1 | # repair.py - functions for repository repair for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # Copyright 2007 Matt Mackall |
|
4 | # Copyright 2007 Matt Mackall | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | import errno |
|
11 | import errno | |
12 | import hashlib |
|
12 | import hashlib | |
13 | import stat |
|
13 | import stat | |
14 | import tempfile |
|
14 | import tempfile | |
15 |
|
15 | |||
16 | from .i18n import _ |
|
16 | from .i18n import _ | |
17 | from .node import short |
|
17 | from .node import short | |
18 | from . import ( |
|
18 | from . import ( | |
19 | bundle2, |
|
19 | bundle2, | |
20 | changegroup, |
|
20 | changegroup, | |
21 | changelog, |
|
21 | changelog, | |
22 | error, |
|
22 | error, | |
23 | exchange, |
|
23 | exchange, | |
24 | manifest, |
|
24 | manifest, | |
25 | obsolete, |
|
25 | obsolete, | |
26 | revlog, |
|
26 | revlog, | |
27 | scmutil, |
|
27 | scmutil, | |
28 | util, |
|
28 | util, | |
|
29 | vfs as vfsmod, | |||
29 | ) |
|
30 | ) | |
30 |
|
31 | |||
31 | def _bundle(repo, bases, heads, node, suffix, compress=True): |
|
32 | def _bundle(repo, bases, heads, node, suffix, compress=True): | |
32 | """create a bundle with the specified revisions as a backup""" |
|
33 | """create a bundle with the specified revisions as a backup""" | |
33 | cgversion = changegroup.safeversion(repo) |
|
34 | cgversion = changegroup.safeversion(repo) | |
34 |
|
35 | |||
35 | cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', |
|
36 | cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', | |
36 | version=cgversion) |
|
37 | version=cgversion) | |
37 | backupdir = "strip-backup" |
|
38 | backupdir = "strip-backup" | |
38 | vfs = repo.vfs |
|
39 | vfs = repo.vfs | |
39 | if not vfs.isdir(backupdir): |
|
40 | if not vfs.isdir(backupdir): | |
40 | vfs.mkdir(backupdir) |
|
41 | vfs.mkdir(backupdir) | |
41 |
|
42 | |||
42 | # Include a hash of all the nodes in the filename for uniqueness |
|
43 | # Include a hash of all the nodes in the filename for uniqueness | |
43 | allcommits = repo.set('%ln::%ln', bases, heads) |
|
44 | allcommits = repo.set('%ln::%ln', bases, heads) | |
44 | allhashes = sorted(c.hex() for c in allcommits) |
|
45 | allhashes = sorted(c.hex() for c in allcommits) | |
45 | totalhash = hashlib.sha1(''.join(allhashes)).hexdigest() |
|
46 | totalhash = hashlib.sha1(''.join(allhashes)).hexdigest() | |
46 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) |
|
47 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) | |
47 |
|
48 | |||
48 | comp = None |
|
49 | comp = None | |
49 | if cgversion != '01': |
|
50 | if cgversion != '01': | |
50 | bundletype = "HG20" |
|
51 | bundletype = "HG20" | |
51 | if compress: |
|
52 | if compress: | |
52 | comp = 'BZ' |
|
53 | comp = 'BZ' | |
53 | elif compress: |
|
54 | elif compress: | |
54 | bundletype = "HG10BZ" |
|
55 | bundletype = "HG10BZ" | |
55 | else: |
|
56 | else: | |
56 | bundletype = "HG10UN" |
|
57 | bundletype = "HG10UN" | |
57 | return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs, |
|
58 | return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs, | |
58 | compression=comp) |
|
59 | compression=comp) | |
59 |
|
60 | |||
60 | def _collectfiles(repo, striprev): |
|
61 | def _collectfiles(repo, striprev): | |
61 | """find out the filelogs affected by the strip""" |
|
62 | """find out the filelogs affected by the strip""" | |
62 | files = set() |
|
63 | files = set() | |
63 |
|
64 | |||
64 | for x in xrange(striprev, len(repo)): |
|
65 | for x in xrange(striprev, len(repo)): | |
65 | files.update(repo[x].files()) |
|
66 | files.update(repo[x].files()) | |
66 |
|
67 | |||
67 | return sorted(files) |
|
68 | return sorted(files) | |
68 |
|
69 | |||
69 | def _collectbrokencsets(repo, files, striprev): |
|
70 | def _collectbrokencsets(repo, files, striprev): | |
70 | """return the changesets which will be broken by the truncation""" |
|
71 | """return the changesets which will be broken by the truncation""" | |
71 | s = set() |
|
72 | s = set() | |
72 | def collectone(revlog): |
|
73 | def collectone(revlog): | |
73 | _, brokenset = revlog.getstrippoint(striprev) |
|
74 | _, brokenset = revlog.getstrippoint(striprev) | |
74 | s.update([revlog.linkrev(r) for r in brokenset]) |
|
75 | s.update([revlog.linkrev(r) for r in brokenset]) | |
75 |
|
76 | |||
76 | collectone(repo.manifestlog._revlog) |
|
77 | collectone(repo.manifestlog._revlog) | |
77 | for fname in files: |
|
78 | for fname in files: | |
78 | collectone(repo.file(fname)) |
|
79 | collectone(repo.file(fname)) | |
79 |
|
80 | |||
80 | return s |
|
81 | return s | |
81 |
|
82 | |||
82 | def strip(ui, repo, nodelist, backup=True, topic='backup'): |
|
83 | def strip(ui, repo, nodelist, backup=True, topic='backup'): | |
83 | # This function operates within a transaction of its own, but does |
|
84 | # This function operates within a transaction of its own, but does | |
84 | # not take any lock on the repo. |
|
85 | # not take any lock on the repo. | |
85 | # Simple way to maintain backwards compatibility for this |
|
86 | # Simple way to maintain backwards compatibility for this | |
86 | # argument. |
|
87 | # argument. | |
87 | if backup in ['none', 'strip']: |
|
88 | if backup in ['none', 'strip']: | |
88 | backup = False |
|
89 | backup = False | |
89 |
|
90 | |||
90 | repo = repo.unfiltered() |
|
91 | repo = repo.unfiltered() | |
91 | repo.destroying() |
|
92 | repo.destroying() | |
92 |
|
93 | |||
93 | cl = repo.changelog |
|
94 | cl = repo.changelog | |
94 | # TODO handle undo of merge sets |
|
95 | # TODO handle undo of merge sets | |
95 | if isinstance(nodelist, str): |
|
96 | if isinstance(nodelist, str): | |
96 | nodelist = [nodelist] |
|
97 | nodelist = [nodelist] | |
97 | striplist = [cl.rev(node) for node in nodelist] |
|
98 | striplist = [cl.rev(node) for node in nodelist] | |
98 | striprev = min(striplist) |
|
99 | striprev = min(striplist) | |
99 |
|
100 | |||
100 | files = _collectfiles(repo, striprev) |
|
101 | files = _collectfiles(repo, striprev) | |
101 | saverevs = _collectbrokencsets(repo, files, striprev) |
|
102 | saverevs = _collectbrokencsets(repo, files, striprev) | |
102 |
|
103 | |||
103 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
104 | # Some revisions with rev > striprev may not be descendants of striprev. | |
104 | # We have to find these revisions and put them in a bundle, so that |
|
105 | # We have to find these revisions and put them in a bundle, so that | |
105 | # we can restore them after the truncations. |
|
106 | # we can restore them after the truncations. | |
106 | # To create the bundle we use repo.changegroupsubset which requires |
|
107 | # To create the bundle we use repo.changegroupsubset which requires | |
107 | # the list of heads and bases of the set of interesting revisions. |
|
108 | # the list of heads and bases of the set of interesting revisions. | |
108 | # (head = revision in the set that has no descendant in the set; |
|
109 | # (head = revision in the set that has no descendant in the set; | |
109 | # base = revision in the set that has no ancestor in the set) |
|
110 | # base = revision in the set that has no ancestor in the set) | |
110 | tostrip = set(striplist) |
|
111 | tostrip = set(striplist) | |
111 | saveheads = set(saverevs) |
|
112 | saveheads = set(saverevs) | |
112 | for r in cl.revs(start=striprev + 1): |
|
113 | for r in cl.revs(start=striprev + 1): | |
113 | if any(p in tostrip for p in cl.parentrevs(r)): |
|
114 | if any(p in tostrip for p in cl.parentrevs(r)): | |
114 | tostrip.add(r) |
|
115 | tostrip.add(r) | |
115 |
|
116 | |||
116 | if r not in tostrip: |
|
117 | if r not in tostrip: | |
117 | saverevs.add(r) |
|
118 | saverevs.add(r) | |
118 | saveheads.difference_update(cl.parentrevs(r)) |
|
119 | saveheads.difference_update(cl.parentrevs(r)) | |
119 | saveheads.add(r) |
|
120 | saveheads.add(r) | |
120 | saveheads = [cl.node(r) for r in saveheads] |
|
121 | saveheads = [cl.node(r) for r in saveheads] | |
121 |
|
122 | |||
122 | # compute base nodes |
|
123 | # compute base nodes | |
123 | if saverevs: |
|
124 | if saverevs: | |
124 | descendants = set(cl.descendants(saverevs)) |
|
125 | descendants = set(cl.descendants(saverevs)) | |
125 | saverevs.difference_update(descendants) |
|
126 | saverevs.difference_update(descendants) | |
126 | savebases = [cl.node(r) for r in saverevs] |
|
127 | savebases = [cl.node(r) for r in saverevs] | |
127 | stripbases = [cl.node(r) for r in tostrip] |
|
128 | stripbases = [cl.node(r) for r in tostrip] | |
128 |
|
129 | |||
129 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but |
|
130 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but | |
130 | # is much faster |
|
131 | # is much faster | |
131 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
132 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) | |
132 | if newbmtarget: |
|
133 | if newbmtarget: | |
133 | newbmtarget = repo[newbmtarget.first()].node() |
|
134 | newbmtarget = repo[newbmtarget.first()].node() | |
134 | else: |
|
135 | else: | |
135 | newbmtarget = '.' |
|
136 | newbmtarget = '.' | |
136 |
|
137 | |||
137 | bm = repo._bookmarks |
|
138 | bm = repo._bookmarks | |
138 | updatebm = [] |
|
139 | updatebm = [] | |
139 | for m in bm: |
|
140 | for m in bm: | |
140 | rev = repo[bm[m]].rev() |
|
141 | rev = repo[bm[m]].rev() | |
141 | if rev in tostrip: |
|
142 | if rev in tostrip: | |
142 | updatebm.append(m) |
|
143 | updatebm.append(m) | |
143 |
|
144 | |||
144 | # create a changegroup for all the branches we need to keep |
|
145 | # create a changegroup for all the branches we need to keep | |
145 | backupfile = None |
|
146 | backupfile = None | |
146 | vfs = repo.vfs |
|
147 | vfs = repo.vfs | |
147 | node = nodelist[-1] |
|
148 | node = nodelist[-1] | |
148 | if backup: |
|
149 | if backup: | |
149 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) |
|
150 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) | |
150 | repo.ui.status(_("saved backup bundle to %s\n") % |
|
151 | repo.ui.status(_("saved backup bundle to %s\n") % | |
151 | vfs.join(backupfile)) |
|
152 | vfs.join(backupfile)) | |
152 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", |
|
153 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", | |
153 | vfs.join(backupfile)) |
|
154 | vfs.join(backupfile)) | |
154 | tmpbundlefile = None |
|
155 | tmpbundlefile = None | |
155 | if saveheads: |
|
156 | if saveheads: | |
156 | # do not compress temporary bundle if we remove it from disk later |
|
157 | # do not compress temporary bundle if we remove it from disk later | |
157 | tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
158 | tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp', | |
158 | compress=False) |
|
159 | compress=False) | |
159 |
|
160 | |||
160 | mfst = repo.manifestlog._revlog |
|
161 | mfst = repo.manifestlog._revlog | |
161 |
|
162 | |||
162 | curtr = repo.currenttransaction() |
|
163 | curtr = repo.currenttransaction() | |
163 | if curtr is not None: |
|
164 | if curtr is not None: | |
164 | del curtr # avoid carrying reference to transaction for nothing |
|
165 | del curtr # avoid carrying reference to transaction for nothing | |
165 | msg = _('programming error: cannot strip from inside a transaction') |
|
166 | msg = _('programming error: cannot strip from inside a transaction') | |
166 | raise error.Abort(msg, hint=_('contact your extension maintainer')) |
|
167 | raise error.Abort(msg, hint=_('contact your extension maintainer')) | |
167 |
|
168 | |||
168 | try: |
|
169 | try: | |
169 | with repo.transaction("strip") as tr: |
|
170 | with repo.transaction("strip") as tr: | |
170 | offset = len(tr.entries) |
|
171 | offset = len(tr.entries) | |
171 |
|
172 | |||
172 | tr.startgroup() |
|
173 | tr.startgroup() | |
173 | cl.strip(striprev, tr) |
|
174 | cl.strip(striprev, tr) | |
174 | mfst.strip(striprev, tr) |
|
175 | mfst.strip(striprev, tr) | |
175 | if 'treemanifest' in repo.requirements: # safe but unnecessary |
|
176 | if 'treemanifest' in repo.requirements: # safe but unnecessary | |
176 | # otherwise |
|
177 | # otherwise | |
177 | for unencoded, encoded, size in repo.store.datafiles(): |
|
178 | for unencoded, encoded, size in repo.store.datafiles(): | |
178 | if (unencoded.startswith('meta/') and |
|
179 | if (unencoded.startswith('meta/') and | |
179 | unencoded.endswith('00manifest.i')): |
|
180 | unencoded.endswith('00manifest.i')): | |
180 | dir = unencoded[5:-12] |
|
181 | dir = unencoded[5:-12] | |
181 | repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr) |
|
182 | repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr) | |
182 | for fn in files: |
|
183 | for fn in files: | |
183 | repo.file(fn).strip(striprev, tr) |
|
184 | repo.file(fn).strip(striprev, tr) | |
184 | tr.endgroup() |
|
185 | tr.endgroup() | |
185 |
|
186 | |||
186 | for i in xrange(offset, len(tr.entries)): |
|
187 | for i in xrange(offset, len(tr.entries)): | |
187 | file, troffset, ignore = tr.entries[i] |
|
188 | file, troffset, ignore = tr.entries[i] | |
188 | with repo.svfs(file, 'a', checkambig=True) as fp: |
|
189 | with repo.svfs(file, 'a', checkambig=True) as fp: | |
189 | fp.truncate(troffset) |
|
190 | fp.truncate(troffset) | |
190 | if troffset == 0: |
|
191 | if troffset == 0: | |
191 | repo.store.markremoved(file) |
|
192 | repo.store.markremoved(file) | |
192 |
|
193 | |||
193 | if tmpbundlefile: |
|
194 | if tmpbundlefile: | |
194 | ui.note(_("adding branch\n")) |
|
195 | ui.note(_("adding branch\n")) | |
195 | f = vfs.open(tmpbundlefile, "rb") |
|
196 | f = vfs.open(tmpbundlefile, "rb") | |
196 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
|
197 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) | |
197 | if not repo.ui.verbose: |
|
198 | if not repo.ui.verbose: | |
198 | # silence internal shuffling chatter |
|
199 | # silence internal shuffling chatter | |
199 | repo.ui.pushbuffer() |
|
200 | repo.ui.pushbuffer() | |
200 | if isinstance(gen, bundle2.unbundle20): |
|
201 | if isinstance(gen, bundle2.unbundle20): | |
201 | with repo.transaction('strip') as tr: |
|
202 | with repo.transaction('strip') as tr: | |
202 | tr.hookargs = {'source': 'strip', |
|
203 | tr.hookargs = {'source': 'strip', | |
203 | 'url': 'bundle:' + vfs.join(tmpbundlefile)} |
|
204 | 'url': 'bundle:' + vfs.join(tmpbundlefile)} | |
204 | bundle2.applybundle(repo, gen, tr, source='strip', |
|
205 | bundle2.applybundle(repo, gen, tr, source='strip', | |
205 | url='bundle:' + vfs.join(tmpbundlefile)) |
|
206 | url='bundle:' + vfs.join(tmpbundlefile)) | |
206 | else: |
|
207 | else: | |
207 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile), |
|
208 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile), | |
208 | True) |
|
209 | True) | |
209 | if not repo.ui.verbose: |
|
210 | if not repo.ui.verbose: | |
210 | repo.ui.popbuffer() |
|
211 | repo.ui.popbuffer() | |
211 | f.close() |
|
212 | f.close() | |
212 | repo._phasecache.invalidate() |
|
213 | repo._phasecache.invalidate() | |
213 |
|
214 | |||
214 | for m in updatebm: |
|
215 | for m in updatebm: | |
215 | bm[m] = repo[newbmtarget].node() |
|
216 | bm[m] = repo[newbmtarget].node() | |
216 | lock = tr = None |
|
217 | lock = tr = None | |
217 | try: |
|
218 | try: | |
218 | lock = repo.lock() |
|
219 | lock = repo.lock() | |
219 | tr = repo.transaction('repair') |
|
220 | tr = repo.transaction('repair') | |
220 | bm.recordchange(tr) |
|
221 | bm.recordchange(tr) | |
221 | tr.close() |
|
222 | tr.close() | |
222 | finally: |
|
223 | finally: | |
223 | tr.release() |
|
224 | tr.release() | |
224 | lock.release() |
|
225 | lock.release() | |
225 |
|
226 | |||
226 | # remove undo files |
|
227 | # remove undo files | |
227 | for undovfs, undofile in repo.undofiles(): |
|
228 | for undovfs, undofile in repo.undofiles(): | |
228 | try: |
|
229 | try: | |
229 | undovfs.unlink(undofile) |
|
230 | undovfs.unlink(undofile) | |
230 | except OSError as e: |
|
231 | except OSError as e: | |
231 | if e.errno != errno.ENOENT: |
|
232 | if e.errno != errno.ENOENT: | |
232 | ui.warn(_('error removing %s: %s\n') % |
|
233 | ui.warn(_('error removing %s: %s\n') % | |
233 | (undovfs.join(undofile), str(e))) |
|
234 | (undovfs.join(undofile), str(e))) | |
234 |
|
235 | |||
235 | except: # re-raises |
|
236 | except: # re-raises | |
236 | if backupfile: |
|
237 | if backupfile: | |
237 | ui.warn(_("strip failed, backup bundle stored in '%s'\n") |
|
238 | ui.warn(_("strip failed, backup bundle stored in '%s'\n") | |
238 | % vfs.join(backupfile)) |
|
239 | % vfs.join(backupfile)) | |
239 | if tmpbundlefile: |
|
240 | if tmpbundlefile: | |
240 | ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") |
|
241 | ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") | |
241 | % vfs.join(tmpbundlefile)) |
|
242 | % vfs.join(tmpbundlefile)) | |
242 | ui.warn(_("(fix the problem, then recover the changesets with " |
|
243 | ui.warn(_("(fix the problem, then recover the changesets with " | |
243 | "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) |
|
244 | "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) | |
244 | raise |
|
245 | raise | |
245 | else: |
|
246 | else: | |
246 | if tmpbundlefile: |
|
247 | if tmpbundlefile: | |
247 | # Remove temporary bundle only if there were no exceptions |
|
248 | # Remove temporary bundle only if there were no exceptions | |
248 | vfs.unlink(tmpbundlefile) |
|
249 | vfs.unlink(tmpbundlefile) | |
249 |
|
250 | |||
250 | repo.destroyed() |
|
251 | repo.destroyed() | |
251 | # return the backup file path (or None if 'backup' was False) so |
|
252 | # return the backup file path (or None if 'backup' was False) so | |
252 | # extensions can use it |
|
253 | # extensions can use it | |
253 | return backupfile |
|
254 | return backupfile | |
254 |
|
255 | |||
255 | def rebuildfncache(ui, repo): |
|
256 | def rebuildfncache(ui, repo): | |
256 | """Rebuilds the fncache file from repo history. |
|
257 | """Rebuilds the fncache file from repo history. | |
257 |
|
258 | |||
258 | Missing entries will be added. Extra entries will be removed. |
|
259 | Missing entries will be added. Extra entries will be removed. | |
259 | """ |
|
260 | """ | |
260 | repo = repo.unfiltered() |
|
261 | repo = repo.unfiltered() | |
261 |
|
262 | |||
262 | if 'fncache' not in repo.requirements: |
|
263 | if 'fncache' not in repo.requirements: | |
263 | ui.warn(_('(not rebuilding fncache because repository does not ' |
|
264 | ui.warn(_('(not rebuilding fncache because repository does not ' | |
264 | 'support fncache)\n')) |
|
265 | 'support fncache)\n')) | |
265 | return |
|
266 | return | |
266 |
|
267 | |||
267 | with repo.lock(): |
|
268 | with repo.lock(): | |
268 | fnc = repo.store.fncache |
|
269 | fnc = repo.store.fncache | |
269 | # Trigger load of fncache. |
|
270 | # Trigger load of fncache. | |
270 | if 'irrelevant' in fnc: |
|
271 | if 'irrelevant' in fnc: | |
271 | pass |
|
272 | pass | |
272 |
|
273 | |||
273 | oldentries = set(fnc.entries) |
|
274 | oldentries = set(fnc.entries) | |
274 | newentries = set() |
|
275 | newentries = set() | |
275 | seenfiles = set() |
|
276 | seenfiles = set() | |
276 |
|
277 | |||
277 | repolen = len(repo) |
|
278 | repolen = len(repo) | |
278 | for rev in repo: |
|
279 | for rev in repo: | |
279 | ui.progress(_('rebuilding'), rev, total=repolen, |
|
280 | ui.progress(_('rebuilding'), rev, total=repolen, | |
280 | unit=_('changesets')) |
|
281 | unit=_('changesets')) | |
281 |
|
282 | |||
282 | ctx = repo[rev] |
|
283 | ctx = repo[rev] | |
283 | for f in ctx.files(): |
|
284 | for f in ctx.files(): | |
284 | # This is to minimize I/O. |
|
285 | # This is to minimize I/O. | |
285 | if f in seenfiles: |
|
286 | if f in seenfiles: | |
286 | continue |
|
287 | continue | |
287 | seenfiles.add(f) |
|
288 | seenfiles.add(f) | |
288 |
|
289 | |||
289 | i = 'data/%s.i' % f |
|
290 | i = 'data/%s.i' % f | |
290 | d = 'data/%s.d' % f |
|
291 | d = 'data/%s.d' % f | |
291 |
|
292 | |||
292 | if repo.store._exists(i): |
|
293 | if repo.store._exists(i): | |
293 | newentries.add(i) |
|
294 | newentries.add(i) | |
294 | if repo.store._exists(d): |
|
295 | if repo.store._exists(d): | |
295 | newentries.add(d) |
|
296 | newentries.add(d) | |
296 |
|
297 | |||
297 | ui.progress(_('rebuilding'), None) |
|
298 | ui.progress(_('rebuilding'), None) | |
298 |
|
299 | |||
299 | if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise |
|
300 | if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise | |
300 | for dir in util.dirs(seenfiles): |
|
301 | for dir in util.dirs(seenfiles): | |
301 | i = 'meta/%s/00manifest.i' % dir |
|
302 | i = 'meta/%s/00manifest.i' % dir | |
302 | d = 'meta/%s/00manifest.d' % dir |
|
303 | d = 'meta/%s/00manifest.d' % dir | |
303 |
|
304 | |||
304 | if repo.store._exists(i): |
|
305 | if repo.store._exists(i): | |
305 | newentries.add(i) |
|
306 | newentries.add(i) | |
306 | if repo.store._exists(d): |
|
307 | if repo.store._exists(d): | |
307 | newentries.add(d) |
|
308 | newentries.add(d) | |
308 |
|
309 | |||
309 | addcount = len(newentries - oldentries) |
|
310 | addcount = len(newentries - oldentries) | |
310 | removecount = len(oldentries - newentries) |
|
311 | removecount = len(oldentries - newentries) | |
311 | for p in sorted(oldentries - newentries): |
|
312 | for p in sorted(oldentries - newentries): | |
312 | ui.write(_('removing %s\n') % p) |
|
313 | ui.write(_('removing %s\n') % p) | |
313 | for p in sorted(newentries - oldentries): |
|
314 | for p in sorted(newentries - oldentries): | |
314 | ui.write(_('adding %s\n') % p) |
|
315 | ui.write(_('adding %s\n') % p) | |
315 |
|
316 | |||
316 | if addcount or removecount: |
|
317 | if addcount or removecount: | |
317 | ui.write(_('%d items added, %d removed from fncache\n') % |
|
318 | ui.write(_('%d items added, %d removed from fncache\n') % | |
318 | (addcount, removecount)) |
|
319 | (addcount, removecount)) | |
319 | fnc.entries = newentries |
|
320 | fnc.entries = newentries | |
320 | fnc._dirty = True |
|
321 | fnc._dirty = True | |
321 |
|
322 | |||
322 | with repo.transaction('fncache') as tr: |
|
323 | with repo.transaction('fncache') as tr: | |
323 | fnc.write(tr) |
|
324 | fnc.write(tr) | |
324 | else: |
|
325 | else: | |
325 | ui.write(_('fncache already up to date\n')) |
|
326 | ui.write(_('fncache already up to date\n')) | |
326 |
|
327 | |||
327 | def stripbmrevset(repo, mark): |
|
328 | def stripbmrevset(repo, mark): | |
328 | """ |
|
329 | """ | |
329 | The revset to strip when strip is called with -B mark |
|
330 | The revset to strip when strip is called with -B mark | |
330 |
|
331 | |||
331 | Needs to live here so extensions can use it and wrap it even when strip is |
|
332 | Needs to live here so extensions can use it and wrap it even when strip is | |
332 | not enabled or not present on a box. |
|
333 | not enabled or not present on a box. | |
333 | """ |
|
334 | """ | |
334 | return repo.revs("ancestors(bookmark(%s)) - " |
|
335 | return repo.revs("ancestors(bookmark(%s)) - " | |
335 | "ancestors(head() and not bookmark(%s)) - " |
|
336 | "ancestors(head() and not bookmark(%s)) - " | |
336 | "ancestors(bookmark() and not bookmark(%s))", |
|
337 | "ancestors(bookmark() and not bookmark(%s))", | |
337 | mark, mark, mark) |
|
338 | mark, mark, mark) | |
338 |
|
339 | |||
339 | def deleteobsmarkers(obsstore, indices): |
|
340 | def deleteobsmarkers(obsstore, indices): | |
340 | """Delete some obsmarkers from obsstore and return how many were deleted |
|
341 | """Delete some obsmarkers from obsstore and return how many were deleted | |
341 |
|
342 | |||
342 | 'indices' is a list of ints which are the indices |
|
343 | 'indices' is a list of ints which are the indices | |
343 | of the markers to be deleted. |
|
344 | of the markers to be deleted. | |
344 |
|
345 | |||
345 | Every invocation of this function completely rewrites the obsstore file, |
|
346 | Every invocation of this function completely rewrites the obsstore file, | |
346 | skipping the markers we want to be removed. The new temporary file is |
|
347 | skipping the markers we want to be removed. The new temporary file is | |
347 | created, remaining markers are written there and on .close() this file |
|
348 | created, remaining markers are written there and on .close() this file | |
348 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" |
|
349 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" | |
349 | if not indices: |
|
350 | if not indices: | |
350 | # we don't want to rewrite the obsstore with the same content |
|
351 | # we don't want to rewrite the obsstore with the same content | |
351 | return |
|
352 | return | |
352 |
|
353 | |||
353 | left = [] |
|
354 | left = [] | |
354 | current = obsstore._all |
|
355 | current = obsstore._all | |
355 | n = 0 |
|
356 | n = 0 | |
356 | for i, m in enumerate(current): |
|
357 | for i, m in enumerate(current): | |
357 | if i in indices: |
|
358 | if i in indices: | |
358 | n += 1 |
|
359 | n += 1 | |
359 | continue |
|
360 | continue | |
360 | left.append(m) |
|
361 | left.append(m) | |
361 |
|
362 | |||
362 | newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True) |
|
363 | newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True) | |
363 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): |
|
364 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): | |
364 | newobsstorefile.write(bytes) |
|
365 | newobsstorefile.write(bytes) | |
365 | newobsstorefile.close() |
|
366 | newobsstorefile.close() | |
366 | return n |
|
367 | return n | |
367 |
|
368 | |||
368 | def upgraderequiredsourcerequirements(repo): |
|
369 | def upgraderequiredsourcerequirements(repo): | |
369 | """Obtain requirements required to be present to upgrade a repo. |
|
370 | """Obtain requirements required to be present to upgrade a repo. | |
370 |
|
371 | |||
371 | An upgrade will not be allowed if the repository doesn't have the |
|
372 | An upgrade will not be allowed if the repository doesn't have the | |
372 | requirements returned by this function. |
|
373 | requirements returned by this function. | |
373 | """ |
|
374 | """ | |
374 | return set([ |
|
375 | return set([ | |
375 | # Introduced in Mercurial 0.9.2. |
|
376 | # Introduced in Mercurial 0.9.2. | |
376 | 'revlogv1', |
|
377 | 'revlogv1', | |
377 | # Introduced in Mercurial 0.9.2. |
|
378 | # Introduced in Mercurial 0.9.2. | |
378 | 'store', |
|
379 | 'store', | |
379 | ]) |
|
380 | ]) | |
380 |
|
381 | |||
381 | def upgradeblocksourcerequirements(repo): |
|
382 | def upgradeblocksourcerequirements(repo): | |
382 | """Obtain requirements that will prevent an upgrade from occurring. |
|
383 | """Obtain requirements that will prevent an upgrade from occurring. | |
383 |
|
384 | |||
384 | An upgrade cannot be performed if the source repository contains a |
|
385 | An upgrade cannot be performed if the source repository contains a | |
385 | requirements in the returned set. |
|
386 | requirements in the returned set. | |
386 | """ |
|
387 | """ | |
387 | return set([ |
|
388 | return set([ | |
388 | # The upgrade code does not yet support these experimental features. |
|
389 | # The upgrade code does not yet support these experimental features. | |
389 | # This is an artificial limitation. |
|
390 | # This is an artificial limitation. | |
390 | 'manifestv2', |
|
391 | 'manifestv2', | |
391 | 'treemanifest', |
|
392 | 'treemanifest', | |
392 | # This was a precursor to generaldelta and was never enabled by default. |
|
393 | # This was a precursor to generaldelta and was never enabled by default. | |
393 | # It should (hopefully) not exist in the wild. |
|
394 | # It should (hopefully) not exist in the wild. | |
394 | 'parentdelta', |
|
395 | 'parentdelta', | |
395 | # Upgrade should operate on the actual store, not the shared link. |
|
396 | # Upgrade should operate on the actual store, not the shared link. | |
396 | 'shared', |
|
397 | 'shared', | |
397 | ]) |
|
398 | ]) | |
398 |
|
399 | |||
399 | def upgradesupportremovedrequirements(repo): |
|
400 | def upgradesupportremovedrequirements(repo): | |
400 | """Obtain requirements that can be removed during an upgrade. |
|
401 | """Obtain requirements that can be removed during an upgrade. | |
401 |
|
402 | |||
402 | If an upgrade were to create a repository that dropped a requirement, |
|
403 | If an upgrade were to create a repository that dropped a requirement, | |
403 | the dropped requirement must appear in the returned set for the upgrade |
|
404 | the dropped requirement must appear in the returned set for the upgrade | |
404 | to be allowed. |
|
405 | to be allowed. | |
405 | """ |
|
406 | """ | |
406 | return set() |
|
407 | return set() | |
407 |
|
408 | |||
408 | def upgradesupporteddestrequirements(repo): |
|
409 | def upgradesupporteddestrequirements(repo): | |
409 | """Obtain requirements that upgrade supports in the destination. |
|
410 | """Obtain requirements that upgrade supports in the destination. | |
410 |
|
411 | |||
411 | If the result of the upgrade would create requirements not in this set, |
|
412 | If the result of the upgrade would create requirements not in this set, | |
412 | the upgrade is disallowed. |
|
413 | the upgrade is disallowed. | |
413 |
|
414 | |||
414 | Extensions should monkeypatch this to add their custom requirements. |
|
415 | Extensions should monkeypatch this to add their custom requirements. | |
415 | """ |
|
416 | """ | |
416 | return set([ |
|
417 | return set([ | |
417 | 'dotencode', |
|
418 | 'dotencode', | |
418 | 'fncache', |
|
419 | 'fncache', | |
419 | 'generaldelta', |
|
420 | 'generaldelta', | |
420 | 'revlogv1', |
|
421 | 'revlogv1', | |
421 | 'store', |
|
422 | 'store', | |
422 | ]) |
|
423 | ]) | |
423 |
|
424 | |||
424 | def upgradeallowednewrequirements(repo): |
|
425 | def upgradeallowednewrequirements(repo): | |
425 | """Obtain requirements that can be added to a repository during upgrade. |
|
426 | """Obtain requirements that can be added to a repository during upgrade. | |
426 |
|
427 | |||
427 | This is used to disallow proposed requirements from being added when |
|
428 | This is used to disallow proposed requirements from being added when | |
428 | they weren't present before. |
|
429 | they weren't present before. | |
429 |
|
430 | |||
430 | We use a list of allowed requirement additions instead of a list of known |
|
431 | We use a list of allowed requirement additions instead of a list of known | |
431 | bad additions because the whitelist approach is safer and will prevent |
|
432 | bad additions because the whitelist approach is safer and will prevent | |
432 | future, unknown requirements from accidentally being added. |
|
433 | future, unknown requirements from accidentally being added. | |
433 | """ |
|
434 | """ | |
434 | return set([ |
|
435 | return set([ | |
435 | 'dotencode', |
|
436 | 'dotencode', | |
436 | 'fncache', |
|
437 | 'fncache', | |
437 | 'generaldelta', |
|
438 | 'generaldelta', | |
438 | ]) |
|
439 | ]) | |
439 |
|
440 | |||
440 | deficiency = 'deficiency' |
|
441 | deficiency = 'deficiency' | |
441 | optimisation = 'optimization' |
|
442 | optimisation = 'optimization' | |
442 |
|
443 | |||
443 | class upgradeimprovement(object): |
|
444 | class upgradeimprovement(object): | |
444 | """Represents an improvement that can be made as part of an upgrade. |
|
445 | """Represents an improvement that can be made as part of an upgrade. | |
445 |
|
446 | |||
446 | The following attributes are defined on each instance: |
|
447 | The following attributes are defined on each instance: | |
447 |
|
448 | |||
448 | name |
|
449 | name | |
449 | Machine-readable string uniquely identifying this improvement. It |
|
450 | Machine-readable string uniquely identifying this improvement. It | |
450 | will be mapped to an action later in the upgrade process. |
|
451 | will be mapped to an action later in the upgrade process. | |
451 |
|
452 | |||
452 | type |
|
453 | type | |
453 | Either ``deficiency`` or ``optimisation``. A deficiency is an obvious |
|
454 | Either ``deficiency`` or ``optimisation``. A deficiency is an obvious | |
454 | problem. An optimization is an action (sometimes optional) that |
|
455 | problem. An optimization is an action (sometimes optional) that | |
455 | can be taken to further improve the state of the repository. |
|
456 | can be taken to further improve the state of the repository. | |
456 |
|
457 | |||
457 | description |
|
458 | description | |
458 | Message intended for humans explaining the improvement in more detail, |
|
459 | Message intended for humans explaining the improvement in more detail, | |
459 | including the implications of it. For ``deficiency`` types, should be |
|
460 | including the implications of it. For ``deficiency`` types, should be | |
460 | worded in the present tense. For ``optimisation`` types, should be |
|
461 | worded in the present tense. For ``optimisation`` types, should be | |
461 | worded in the future tense. |
|
462 | worded in the future tense. | |
462 |
|
463 | |||
463 | upgrademessage |
|
464 | upgrademessage | |
464 | Message intended for humans explaining what an upgrade addressing this |
|
465 | Message intended for humans explaining what an upgrade addressing this | |
465 | issue will do. Should be worded in the future tense. |
|
466 | issue will do. Should be worded in the future tense. | |
466 |
|
467 | |||
467 | fromdefault (``deficiency`` types only) |
|
468 | fromdefault (``deficiency`` types only) | |
468 | Boolean indicating whether the current (deficient) state deviates |
|
469 | Boolean indicating whether the current (deficient) state deviates | |
469 | from Mercurial's default configuration. |
|
470 | from Mercurial's default configuration. | |
470 |
|
471 | |||
471 | fromconfig (``deficiency`` types only) |
|
472 | fromconfig (``deficiency`` types only) | |
472 | Boolean indicating whether the current (deficient) state deviates |
|
473 | Boolean indicating whether the current (deficient) state deviates | |
473 | from the current Mercurial configuration. |
|
474 | from the current Mercurial configuration. | |
474 | """ |
|
475 | """ | |
475 | def __init__(self, name, type, description, upgrademessage, **kwargs): |
|
476 | def __init__(self, name, type, description, upgrademessage, **kwargs): | |
476 | self.name = name |
|
477 | self.name = name | |
477 | self.type = type |
|
478 | self.type = type | |
478 | self.description = description |
|
479 | self.description = description | |
479 | self.upgrademessage = upgrademessage |
|
480 | self.upgrademessage = upgrademessage | |
480 |
|
481 | |||
481 | for k, v in kwargs.items(): |
|
482 | for k, v in kwargs.items(): | |
482 | setattr(self, k, v) |
|
483 | setattr(self, k, v) | |
483 |
|
484 | |||
484 | def upgradefindimprovements(repo): |
|
485 | def upgradefindimprovements(repo): | |
485 | """Determine improvements that can be made to the repo during upgrade. |
|
486 | """Determine improvements that can be made to the repo during upgrade. | |
486 |
|
487 | |||
487 | Returns a list of ``upgradeimprovement`` describing repository deficiencies |
|
488 | Returns a list of ``upgradeimprovement`` describing repository deficiencies | |
488 | and optimizations. |
|
489 | and optimizations. | |
489 | """ |
|
490 | """ | |
490 | # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil |
|
491 | # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil | |
491 | from . import localrepo |
|
492 | from . import localrepo | |
492 |
|
493 | |||
493 | newreporeqs = localrepo.newreporequirements(repo) |
|
494 | newreporeqs = localrepo.newreporequirements(repo) | |
494 |
|
495 | |||
495 | improvements = [] |
|
496 | improvements = [] | |
496 |
|
497 | |||
497 | # We could detect lack of revlogv1 and store here, but they were added |
|
498 | # We could detect lack of revlogv1 and store here, but they were added | |
498 | # in 0.9.2 and we don't support upgrading repos without these |
|
499 | # in 0.9.2 and we don't support upgrading repos without these | |
499 | # requirements, so let's not bother. |
|
500 | # requirements, so let's not bother. | |
500 |
|
501 | |||
501 | if 'fncache' not in repo.requirements: |
|
502 | if 'fncache' not in repo.requirements: | |
502 | improvements.append(upgradeimprovement( |
|
503 | improvements.append(upgradeimprovement( | |
503 | name='fncache', |
|
504 | name='fncache', | |
504 | type=deficiency, |
|
505 | type=deficiency, | |
505 | description=_('long and reserved filenames may not work correctly; ' |
|
506 | description=_('long and reserved filenames may not work correctly; ' | |
506 | 'repository performance is sub-optimal'), |
|
507 | 'repository performance is sub-optimal'), | |
507 | upgrademessage=_('repository will be more resilient to storing ' |
|
508 | upgrademessage=_('repository will be more resilient to storing ' | |
508 | 'certain paths and performance of certain ' |
|
509 | 'certain paths and performance of certain ' | |
509 | 'operations should be improved'), |
|
510 | 'operations should be improved'), | |
510 | fromdefault=True, |
|
511 | fromdefault=True, | |
511 | fromconfig='fncache' in newreporeqs)) |
|
512 | fromconfig='fncache' in newreporeqs)) | |
512 |
|
513 | |||
513 | if 'dotencode' not in repo.requirements: |
|
514 | if 'dotencode' not in repo.requirements: | |
514 | improvements.append(upgradeimprovement( |
|
515 | improvements.append(upgradeimprovement( | |
515 | name='dotencode', |
|
516 | name='dotencode', | |
516 | type=deficiency, |
|
517 | type=deficiency, | |
517 | description=_('storage of filenames beginning with a period or ' |
|
518 | description=_('storage of filenames beginning with a period or ' | |
518 | 'space may not work correctly'), |
|
519 | 'space may not work correctly'), | |
519 | upgrademessage=_('repository will be better able to store files ' |
|
520 | upgrademessage=_('repository will be better able to store files ' | |
520 | 'beginning with a space or period'), |
|
521 | 'beginning with a space or period'), | |
521 | fromdefault=True, |
|
522 | fromdefault=True, | |
522 | fromconfig='dotencode' in newreporeqs)) |
|
523 | fromconfig='dotencode' in newreporeqs)) | |
523 |
|
524 | |||
524 | if 'generaldelta' not in repo.requirements: |
|
525 | if 'generaldelta' not in repo.requirements: | |
525 | improvements.append(upgradeimprovement( |
|
526 | improvements.append(upgradeimprovement( | |
526 | name='generaldelta', |
|
527 | name='generaldelta', | |
527 | type=deficiency, |
|
528 | type=deficiency, | |
528 | description=_('deltas within internal storage are unable to ' |
|
529 | description=_('deltas within internal storage are unable to ' | |
529 | 'choose optimal revisions; repository is larger and ' |
|
530 | 'choose optimal revisions; repository is larger and ' | |
530 | 'slower than it could be; interaction with other ' |
|
531 | 'slower than it could be; interaction with other ' | |
531 | 'repositories may require extra network and CPU ' |
|
532 | 'repositories may require extra network and CPU ' | |
532 | 'resources, making "hg push" and "hg pull" slower'), |
|
533 | 'resources, making "hg push" and "hg pull" slower'), | |
533 | upgrademessage=_('repository storage will be able to create ' |
|
534 | upgrademessage=_('repository storage will be able to create ' | |
534 | 'optimal deltas; new repository data will be ' |
|
535 | 'optimal deltas; new repository data will be ' | |
535 | 'smaller and read times should decrease; ' |
|
536 | 'smaller and read times should decrease; ' | |
536 | 'interacting with other repositories using this ' |
|
537 | 'interacting with other repositories using this ' | |
537 | 'storage model should require less network and ' |
|
538 | 'storage model should require less network and ' | |
538 | 'CPU resources, making "hg push" and "hg pull" ' |
|
539 | 'CPU resources, making "hg push" and "hg pull" ' | |
539 | 'faster'), |
|
540 | 'faster'), | |
540 | fromdefault=True, |
|
541 | fromdefault=True, | |
541 | fromconfig='generaldelta' in newreporeqs)) |
|
542 | fromconfig='generaldelta' in newreporeqs)) | |
542 |
|
543 | |||
543 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for |
|
544 | # Mercurial 4.0 changed changelogs to not use delta chains. Search for | |
544 | # changelogs with deltas. |
|
545 | # changelogs with deltas. | |
545 | cl = repo.changelog |
|
546 | cl = repo.changelog | |
546 | for rev in cl: |
|
547 | for rev in cl: | |
547 | chainbase = cl.chainbase(rev) |
|
548 | chainbase = cl.chainbase(rev) | |
548 | if chainbase != rev: |
|
549 | if chainbase != rev: | |
549 | improvements.append(upgradeimprovement( |
|
550 | improvements.append(upgradeimprovement( | |
550 | name='removecldeltachain', |
|
551 | name='removecldeltachain', | |
551 | type=deficiency, |
|
552 | type=deficiency, | |
552 | description=_('changelog storage is using deltas instead of ' |
|
553 | description=_('changelog storage is using deltas instead of ' | |
553 | 'raw entries; changelog reading and any ' |
|
554 | 'raw entries; changelog reading and any ' | |
554 | 'operation relying on changelog data are slower ' |
|
555 | 'operation relying on changelog data are slower ' | |
555 | 'than they could be'), |
|
556 | 'than they could be'), | |
556 | upgrademessage=_('changelog storage will be reformated to ' |
|
557 | upgrademessage=_('changelog storage will be reformated to ' | |
557 | 'store raw entries; changelog reading will be ' |
|
558 | 'store raw entries; changelog reading will be ' | |
558 | 'faster; changelog size may be reduced'), |
|
559 | 'faster; changelog size may be reduced'), | |
559 | fromdefault=True, |
|
560 | fromdefault=True, | |
560 | fromconfig=True)) |
|
561 | fromconfig=True)) | |
561 | break |
|
562 | break | |
562 |
|
563 | |||
563 | # Now for the optimizations. |
|
564 | # Now for the optimizations. | |
564 |
|
565 | |||
565 | # These are unconditionally added. There is logic later that figures out |
|
566 | # These are unconditionally added. There is logic later that figures out | |
566 | # which ones to apply. |
|
567 | # which ones to apply. | |
567 |
|
568 | |||
568 | improvements.append(upgradeimprovement( |
|
569 | improvements.append(upgradeimprovement( | |
569 | name='redeltaparent', |
|
570 | name='redeltaparent', | |
570 | type=optimisation, |
|
571 | type=optimisation, | |
571 | description=_('deltas within internal storage will be recalculated to ' |
|
572 | description=_('deltas within internal storage will be recalculated to ' | |
572 | 'choose an optimal base revision where this was not ' |
|
573 | 'choose an optimal base revision where this was not ' | |
573 | 'already done; the size of the repository may shrink and ' |
|
574 | 'already done; the size of the repository may shrink and ' | |
574 | 'various operations may become faster; the first time ' |
|
575 | 'various operations may become faster; the first time ' | |
575 | 'this optimization is performed could slow down upgrade ' |
|
576 | 'this optimization is performed could slow down upgrade ' | |
576 | 'execution considerably; subsequent invocations should ' |
|
577 | 'execution considerably; subsequent invocations should ' | |
577 | 'not run noticeably slower'), |
|
578 | 'not run noticeably slower'), | |
578 | upgrademessage=_('deltas within internal storage will choose a new ' |
|
579 | upgrademessage=_('deltas within internal storage will choose a new ' | |
579 | 'base revision if needed'))) |
|
580 | 'base revision if needed'))) | |
580 |
|
581 | |||
581 | improvements.append(upgradeimprovement( |
|
582 | improvements.append(upgradeimprovement( | |
582 | name='redeltamultibase', |
|
583 | name='redeltamultibase', | |
583 | type=optimisation, |
|
584 | type=optimisation, | |
584 | description=_('deltas within internal storage will be recalculated ' |
|
585 | description=_('deltas within internal storage will be recalculated ' | |
585 | 'against multiple base revision and the smallest ' |
|
586 | 'against multiple base revision and the smallest ' | |
586 | 'difference will be used; the size of the repository may ' |
|
587 | 'difference will be used; the size of the repository may ' | |
587 | 'shrink significantly when there are many merges; this ' |
|
588 | 'shrink significantly when there are many merges; this ' | |
588 | 'optimization will slow down execution in proportion to ' |
|
589 | 'optimization will slow down execution in proportion to ' | |
589 | 'the number of merges in the repository and the amount ' |
|
590 | 'the number of merges in the repository and the amount ' | |
590 | 'of files in the repository; this slow down should not ' |
|
591 | 'of files in the repository; this slow down should not ' | |
591 | 'be significant unless there are tens of thousands of ' |
|
592 | 'be significant unless there are tens of thousands of ' | |
592 | 'files and thousands of merges'), |
|
593 | 'files and thousands of merges'), | |
593 | upgrademessage=_('deltas within internal storage will choose an ' |
|
594 | upgrademessage=_('deltas within internal storage will choose an ' | |
594 | 'optimal delta by computing deltas against multiple ' |
|
595 | 'optimal delta by computing deltas against multiple ' | |
595 | 'parents; may slow down execution time ' |
|
596 | 'parents; may slow down execution time ' | |
596 | 'significantly'))) |
|
597 | 'significantly'))) | |
597 |
|
598 | |||
598 | improvements.append(upgradeimprovement( |
|
599 | improvements.append(upgradeimprovement( | |
599 | name='redeltaall', |
|
600 | name='redeltaall', | |
600 | type=optimisation, |
|
601 | type=optimisation, | |
601 | description=_('deltas within internal storage will always be ' |
|
602 | description=_('deltas within internal storage will always be ' | |
602 | 'recalculated without reusing prior deltas; this will ' |
|
603 | 'recalculated without reusing prior deltas; this will ' | |
603 | 'likely make execution run several times slower; this ' |
|
604 | 'likely make execution run several times slower; this ' | |
604 | 'optimization is typically not needed'), |
|
605 | 'optimization is typically not needed'), | |
605 | upgrademessage=_('deltas within internal storage will be fully ' |
|
606 | upgrademessage=_('deltas within internal storage will be fully ' | |
606 | 'recomputed; this will likely drastically slow down ' |
|
607 | 'recomputed; this will likely drastically slow down ' | |
607 | 'execution time'))) |
|
608 | 'execution time'))) | |
608 |
|
609 | |||
609 | return improvements |
|
610 | return improvements | |
610 |
|
611 | |||
611 | def upgradedetermineactions(repo, improvements, sourcereqs, destreqs, |
|
612 | def upgradedetermineactions(repo, improvements, sourcereqs, destreqs, | |
612 | optimize): |
|
613 | optimize): | |
613 | """Determine upgrade actions that will be performed. |
|
614 | """Determine upgrade actions that will be performed. | |
614 |
|
615 | |||
615 | Given a list of improvements as returned by ``upgradefindimprovements``, |
|
616 | Given a list of improvements as returned by ``upgradefindimprovements``, | |
616 | determine the list of upgrade actions that will be performed. |
|
617 | determine the list of upgrade actions that will be performed. | |
617 |
|
618 | |||
618 | The role of this function is to filter improvements if needed, apply |
|
619 | The role of this function is to filter improvements if needed, apply | |
619 | recommended optimizations from the improvements list that make sense, |
|
620 | recommended optimizations from the improvements list that make sense, | |
620 | etc. |
|
621 | etc. | |
621 |
|
622 | |||
622 | Returns a list of action names. |
|
623 | Returns a list of action names. | |
623 | """ |
|
624 | """ | |
624 | newactions = [] |
|
625 | newactions = [] | |
625 |
|
626 | |||
626 | knownreqs = upgradesupporteddestrequirements(repo) |
|
627 | knownreqs = upgradesupporteddestrequirements(repo) | |
627 |
|
628 | |||
628 | for i in improvements: |
|
629 | for i in improvements: | |
629 | name = i.name |
|
630 | name = i.name | |
630 |
|
631 | |||
631 | # If the action is a requirement that doesn't show up in the |
|
632 | # If the action is a requirement that doesn't show up in the | |
632 | # destination requirements, prune the action. |
|
633 | # destination requirements, prune the action. | |
633 | if name in knownreqs and name not in destreqs: |
|
634 | if name in knownreqs and name not in destreqs: | |
634 | continue |
|
635 | continue | |
635 |
|
636 | |||
636 | if i.type == deficiency: |
|
637 | if i.type == deficiency: | |
637 | newactions.append(name) |
|
638 | newactions.append(name) | |
638 |
|
639 | |||
639 | newactions.extend(o for o in sorted(optimize) if o not in newactions) |
|
640 | newactions.extend(o for o in sorted(optimize) if o not in newactions) | |
640 |
|
641 | |||
641 | # FUTURE consider adding some optimizations here for certain transitions. |
|
642 | # FUTURE consider adding some optimizations here for certain transitions. | |
642 | # e.g. adding generaldelta could schedule parent redeltas. |
|
643 | # e.g. adding generaldelta could schedule parent redeltas. | |
643 |
|
644 | |||
644 | return newactions |
|
645 | return newactions | |
645 |
|
646 | |||
646 | def _revlogfrompath(repo, path): |
|
647 | def _revlogfrompath(repo, path): | |
647 | """Obtain a revlog from a repo path. |
|
648 | """Obtain a revlog from a repo path. | |
648 |
|
649 | |||
649 | An instance of the appropriate class is returned. |
|
650 | An instance of the appropriate class is returned. | |
650 | """ |
|
651 | """ | |
651 | if path == '00changelog.i': |
|
652 | if path == '00changelog.i': | |
652 | return changelog.changelog(repo.svfs) |
|
653 | return changelog.changelog(repo.svfs) | |
653 | elif path.endswith('00manifest.i'): |
|
654 | elif path.endswith('00manifest.i'): | |
654 | mandir = path[:-len('00manifest.i')] |
|
655 | mandir = path[:-len('00manifest.i')] | |
655 | return manifest.manifestrevlog(repo.svfs, dir=mandir) |
|
656 | return manifest.manifestrevlog(repo.svfs, dir=mandir) | |
656 | else: |
|
657 | else: | |
657 | # Filelogs don't do anything special with settings. So we can use a |
|
658 | # Filelogs don't do anything special with settings. So we can use a | |
658 | # vanilla revlog. |
|
659 | # vanilla revlog. | |
659 | return revlog.revlog(repo.svfs, path) |
|
660 | return revlog.revlog(repo.svfs, path) | |
660 |
|
661 | |||
661 | def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas): |
|
662 | def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas): | |
662 | """Copy revlogs between 2 repos.""" |
|
663 | """Copy revlogs between 2 repos.""" | |
663 | revcount = 0 |
|
664 | revcount = 0 | |
664 | srcsize = 0 |
|
665 | srcsize = 0 | |
665 | srcrawsize = 0 |
|
666 | srcrawsize = 0 | |
666 | dstsize = 0 |
|
667 | dstsize = 0 | |
667 | fcount = 0 |
|
668 | fcount = 0 | |
668 | frevcount = 0 |
|
669 | frevcount = 0 | |
669 | fsrcsize = 0 |
|
670 | fsrcsize = 0 | |
670 | frawsize = 0 |
|
671 | frawsize = 0 | |
671 | fdstsize = 0 |
|
672 | fdstsize = 0 | |
672 | mcount = 0 |
|
673 | mcount = 0 | |
673 | mrevcount = 0 |
|
674 | mrevcount = 0 | |
674 | msrcsize = 0 |
|
675 | msrcsize = 0 | |
675 | mrawsize = 0 |
|
676 | mrawsize = 0 | |
676 | mdstsize = 0 |
|
677 | mdstsize = 0 | |
677 | crevcount = 0 |
|
678 | crevcount = 0 | |
678 | csrcsize = 0 |
|
679 | csrcsize = 0 | |
679 | crawsize = 0 |
|
680 | crawsize = 0 | |
680 | cdstsize = 0 |
|
681 | cdstsize = 0 | |
681 |
|
682 | |||
682 | # Perform a pass to collect metadata. This validates we can open all |
|
683 | # Perform a pass to collect metadata. This validates we can open all | |
683 | # source files and allows a unified progress bar to be displayed. |
|
684 | # source files and allows a unified progress bar to be displayed. | |
684 | for unencoded, encoded, size in srcrepo.store.walk(): |
|
685 | for unencoded, encoded, size in srcrepo.store.walk(): | |
685 | if unencoded.endswith('.d'): |
|
686 | if unencoded.endswith('.d'): | |
686 | continue |
|
687 | continue | |
687 |
|
688 | |||
688 | rl = _revlogfrompath(srcrepo, unencoded) |
|
689 | rl = _revlogfrompath(srcrepo, unencoded) | |
689 | revcount += len(rl) |
|
690 | revcount += len(rl) | |
690 |
|
691 | |||
691 | datasize = 0 |
|
692 | datasize = 0 | |
692 | rawsize = 0 |
|
693 | rawsize = 0 | |
693 | idx = rl.index |
|
694 | idx = rl.index | |
694 | for rev in rl: |
|
695 | for rev in rl: | |
695 | e = idx[rev] |
|
696 | e = idx[rev] | |
696 | datasize += e[1] |
|
697 | datasize += e[1] | |
697 | rawsize += e[2] |
|
698 | rawsize += e[2] | |
698 |
|
699 | |||
699 | srcsize += datasize |
|
700 | srcsize += datasize | |
700 | srcrawsize += rawsize |
|
701 | srcrawsize += rawsize | |
701 |
|
702 | |||
702 | # This is for the separate progress bars. |
|
703 | # This is for the separate progress bars. | |
703 | if isinstance(rl, changelog.changelog): |
|
704 | if isinstance(rl, changelog.changelog): | |
704 | crevcount += len(rl) |
|
705 | crevcount += len(rl) | |
705 | csrcsize += datasize |
|
706 | csrcsize += datasize | |
706 | crawsize += rawsize |
|
707 | crawsize += rawsize | |
707 | elif isinstance(rl, manifest.manifestrevlog): |
|
708 | elif isinstance(rl, manifest.manifestrevlog): | |
708 | mcount += 1 |
|
709 | mcount += 1 | |
709 | mrevcount += len(rl) |
|
710 | mrevcount += len(rl) | |
710 | msrcsize += datasize |
|
711 | msrcsize += datasize | |
711 | mrawsize += rawsize |
|
712 | mrawsize += rawsize | |
712 | elif isinstance(rl, revlog.revlog): |
|
713 | elif isinstance(rl, revlog.revlog): | |
713 | fcount += 1 |
|
714 | fcount += 1 | |
714 | frevcount += len(rl) |
|
715 | frevcount += len(rl) | |
715 | fsrcsize += datasize |
|
716 | fsrcsize += datasize | |
716 | frawsize += rawsize |
|
717 | frawsize += rawsize | |
717 |
|
718 | |||
718 | if not revcount: |
|
719 | if not revcount: | |
719 | return |
|
720 | return | |
720 |
|
721 | |||
721 | ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, ' |
|
722 | ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, ' | |
722 | '%d in changelog)\n') % |
|
723 | '%d in changelog)\n') % | |
723 | (revcount, frevcount, mrevcount, crevcount)) |
|
724 | (revcount, frevcount, mrevcount, crevcount)) | |
724 | ui.write(_('migrating %s in store; %s tracked data\n') % ( |
|
725 | ui.write(_('migrating %s in store; %s tracked data\n') % ( | |
725 | (util.bytecount(srcsize), util.bytecount(srcrawsize)))) |
|
726 | (util.bytecount(srcsize), util.bytecount(srcrawsize)))) | |
726 |
|
727 | |||
727 | # Used to keep track of progress. |
|
728 | # Used to keep track of progress. | |
728 | progress = [] |
|
729 | progress = [] | |
729 | def oncopiedrevision(rl, rev, node): |
|
730 | def oncopiedrevision(rl, rev, node): | |
730 | progress[1] += 1 |
|
731 | progress[1] += 1 | |
731 | srcrepo.ui.progress(progress[0], progress[1], total=progress[2]) |
|
732 | srcrepo.ui.progress(progress[0], progress[1], total=progress[2]) | |
732 |
|
733 | |||
733 | # Do the actual copying. |
|
734 | # Do the actual copying. | |
734 | # FUTURE this operation can be farmed off to worker processes. |
|
735 | # FUTURE this operation can be farmed off to worker processes. | |
735 | seen = set() |
|
736 | seen = set() | |
736 | for unencoded, encoded, size in srcrepo.store.walk(): |
|
737 | for unencoded, encoded, size in srcrepo.store.walk(): | |
737 | if unencoded.endswith('.d'): |
|
738 | if unencoded.endswith('.d'): | |
738 | continue |
|
739 | continue | |
739 |
|
740 | |||
740 | oldrl = _revlogfrompath(srcrepo, unencoded) |
|
741 | oldrl = _revlogfrompath(srcrepo, unencoded) | |
741 | newrl = _revlogfrompath(dstrepo, unencoded) |
|
742 | newrl = _revlogfrompath(dstrepo, unencoded) | |
742 |
|
743 | |||
743 | if isinstance(oldrl, changelog.changelog) and 'c' not in seen: |
|
744 | if isinstance(oldrl, changelog.changelog) and 'c' not in seen: | |
744 | ui.write(_('finished migrating %d manifest revisions across %d ' |
|
745 | ui.write(_('finished migrating %d manifest revisions across %d ' | |
745 | 'manifests; change in size: %s\n') % |
|
746 | 'manifests; change in size: %s\n') % | |
746 | (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))) |
|
747 | (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))) | |
747 |
|
748 | |||
748 | ui.write(_('migrating changelog containing %d revisions ' |
|
749 | ui.write(_('migrating changelog containing %d revisions ' | |
749 | '(%s in store; %s tracked data)\n') % |
|
750 | '(%s in store; %s tracked data)\n') % | |
750 | (crevcount, util.bytecount(csrcsize), |
|
751 | (crevcount, util.bytecount(csrcsize), | |
751 | util.bytecount(crawsize))) |
|
752 | util.bytecount(crawsize))) | |
752 | seen.add('c') |
|
753 | seen.add('c') | |
753 | progress[:] = [_('changelog revisions'), 0, crevcount] |
|
754 | progress[:] = [_('changelog revisions'), 0, crevcount] | |
754 | elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: |
|
755 | elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen: | |
755 | ui.write(_('finished migrating %d filelog revisions across %d ' |
|
756 | ui.write(_('finished migrating %d filelog revisions across %d ' | |
756 | 'filelogs; change in size: %s\n') % |
|
757 | 'filelogs; change in size: %s\n') % | |
757 | (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))) |
|
758 | (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))) | |
758 |
|
759 | |||
759 | ui.write(_('migrating %d manifests containing %d revisions ' |
|
760 | ui.write(_('migrating %d manifests containing %d revisions ' | |
760 | '(%s in store; %s tracked data)\n') % |
|
761 | '(%s in store; %s tracked data)\n') % | |
761 | (mcount, mrevcount, util.bytecount(msrcsize), |
|
762 | (mcount, mrevcount, util.bytecount(msrcsize), | |
762 | util.bytecount(mrawsize))) |
|
763 | util.bytecount(mrawsize))) | |
763 | seen.add('m') |
|
764 | seen.add('m') | |
764 | progress[:] = [_('manifest revisions'), 0, mrevcount] |
|
765 | progress[:] = [_('manifest revisions'), 0, mrevcount] | |
765 | elif 'f' not in seen: |
|
766 | elif 'f' not in seen: | |
766 | ui.write(_('migrating %d filelogs containing %d revisions ' |
|
767 | ui.write(_('migrating %d filelogs containing %d revisions ' | |
767 | '(%s in store; %s tracked data)\n') % |
|
768 | '(%s in store; %s tracked data)\n') % | |
768 | (fcount, frevcount, util.bytecount(fsrcsize), |
|
769 | (fcount, frevcount, util.bytecount(fsrcsize), | |
769 | util.bytecount(frawsize))) |
|
770 | util.bytecount(frawsize))) | |
770 | seen.add('f') |
|
771 | seen.add('f') | |
771 | progress[:] = [_('file revisions'), 0, frevcount] |
|
772 | progress[:] = [_('file revisions'), 0, frevcount] | |
772 |
|
773 | |||
773 | ui.progress(progress[0], progress[1], total=progress[2]) |
|
774 | ui.progress(progress[0], progress[1], total=progress[2]) | |
774 |
|
775 | |||
775 | ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded)) |
|
776 | ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded)) | |
776 | oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, |
|
777 | oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision, | |
777 | deltareuse=deltareuse, |
|
778 | deltareuse=deltareuse, | |
778 | aggressivemergedeltas=aggressivemergedeltas) |
|
779 | aggressivemergedeltas=aggressivemergedeltas) | |
779 |
|
780 | |||
780 | datasize = 0 |
|
781 | datasize = 0 | |
781 | idx = newrl.index |
|
782 | idx = newrl.index | |
782 | for rev in newrl: |
|
783 | for rev in newrl: | |
783 | datasize += idx[rev][1] |
|
784 | datasize += idx[rev][1] | |
784 |
|
785 | |||
785 | dstsize += datasize |
|
786 | dstsize += datasize | |
786 |
|
787 | |||
787 | if isinstance(newrl, changelog.changelog): |
|
788 | if isinstance(newrl, changelog.changelog): | |
788 | cdstsize += datasize |
|
789 | cdstsize += datasize | |
789 | elif isinstance(newrl, manifest.manifestrevlog): |
|
790 | elif isinstance(newrl, manifest.manifestrevlog): | |
790 | mdstsize += datasize |
|
791 | mdstsize += datasize | |
791 | else: |
|
792 | else: | |
792 | fdstsize += datasize |
|
793 | fdstsize += datasize | |
793 |
|
794 | |||
794 | ui.progress(progress[0], None) |
|
795 | ui.progress(progress[0], None) | |
795 |
|
796 | |||
796 | ui.write(_('finished migrating %d changelog revisions; change in size: ' |
|
797 | ui.write(_('finished migrating %d changelog revisions; change in size: ' | |
797 | '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize))) |
|
798 | '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize))) | |
798 |
|
799 | |||
799 | ui.write(_('finished migrating %d total revisions; total change in store ' |
|
800 | ui.write(_('finished migrating %d total revisions; total change in store ' | |
800 | 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize))) |
|
801 | 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize))) | |
801 |
|
802 | |||
802 | def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st): |
|
803 | def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st): | |
803 | """Determine whether to copy a store file during upgrade. |
|
804 | """Determine whether to copy a store file during upgrade. | |
804 |
|
805 | |||
805 | This function is called when migrating store files from ``srcrepo`` to |
|
806 | This function is called when migrating store files from ``srcrepo`` to | |
806 | ``dstrepo`` as part of upgrading a repository. |
|
807 | ``dstrepo`` as part of upgrading a repository. | |
807 |
|
808 | |||
808 | Args: |
|
809 | Args: | |
809 | srcrepo: repo we are copying from |
|
810 | srcrepo: repo we are copying from | |
810 | dstrepo: repo we are copying to |
|
811 | dstrepo: repo we are copying to | |
811 | requirements: set of requirements for ``dstrepo`` |
|
812 | requirements: set of requirements for ``dstrepo`` | |
812 | path: store file being examined |
|
813 | path: store file being examined | |
813 | mode: the ``ST_MODE`` file type of ``path`` |
|
814 | mode: the ``ST_MODE`` file type of ``path`` | |
814 | st: ``stat`` data structure for ``path`` |
|
815 | st: ``stat`` data structure for ``path`` | |
815 |
|
816 | |||
816 | Function should return ``True`` if the file is to be copied. |
|
817 | Function should return ``True`` if the file is to be copied. | |
817 | """ |
|
818 | """ | |
818 | # Skip revlogs. |
|
819 | # Skip revlogs. | |
819 | if path.endswith(('.i', '.d')): |
|
820 | if path.endswith(('.i', '.d')): | |
820 | return False |
|
821 | return False | |
821 | # Skip transaction related files. |
|
822 | # Skip transaction related files. | |
822 | if path.startswith('undo'): |
|
823 | if path.startswith('undo'): | |
823 | return False |
|
824 | return False | |
824 | # Only copy regular files. |
|
825 | # Only copy regular files. | |
825 | if mode != stat.S_IFREG: |
|
826 | if mode != stat.S_IFREG: | |
826 | return False |
|
827 | return False | |
827 | # Skip other skipped files. |
|
828 | # Skip other skipped files. | |
828 | if path in ('lock', 'fncache'): |
|
829 | if path in ('lock', 'fncache'): | |
829 | return False |
|
830 | return False | |
830 |
|
831 | |||
831 | return True |
|
832 | return True | |
832 |
|
833 | |||
833 | def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements): |
|
834 | def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements): | |
834 | """Hook point for extensions to perform additional actions during upgrade. |
|
835 | """Hook point for extensions to perform additional actions during upgrade. | |
835 |
|
836 | |||
836 | This function is called after revlogs and store files have been copied but |
|
837 | This function is called after revlogs and store files have been copied but | |
837 | before the new store is swapped into the original location. |
|
838 | before the new store is swapped into the original location. | |
838 | """ |
|
839 | """ | |
839 |
|
840 | |||
840 | def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions): |
|
841 | def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions): | |
841 | """Do the low-level work of upgrading a repository. |
|
842 | """Do the low-level work of upgrading a repository. | |
842 |
|
843 | |||
843 | The upgrade is effectively performed as a copy between a source |
|
844 | The upgrade is effectively performed as a copy between a source | |
844 | repository and a temporary destination repository. |
|
845 | repository and a temporary destination repository. | |
845 |
|
846 | |||
846 | The source repository is unmodified for as long as possible so the |
|
847 | The source repository is unmodified for as long as possible so the | |
847 | upgrade can abort at any time without causing loss of service for |
|
848 | upgrade can abort at any time without causing loss of service for | |
848 | readers and without corrupting the source repository. |
|
849 | readers and without corrupting the source repository. | |
849 | """ |
|
850 | """ | |
850 | assert srcrepo.currentwlock() |
|
851 | assert srcrepo.currentwlock() | |
851 | assert dstrepo.currentwlock() |
|
852 | assert dstrepo.currentwlock() | |
852 |
|
853 | |||
853 | ui.write(_('(it is safe to interrupt this process any time before ' |
|
854 | ui.write(_('(it is safe to interrupt this process any time before ' | |
854 | 'data migration completes)\n')) |
|
855 | 'data migration completes)\n')) | |
855 |
|
856 | |||
856 | if 'redeltaall' in actions: |
|
857 | if 'redeltaall' in actions: | |
857 | deltareuse = revlog.revlog.DELTAREUSENEVER |
|
858 | deltareuse = revlog.revlog.DELTAREUSENEVER | |
858 | elif 'redeltaparent' in actions: |
|
859 | elif 'redeltaparent' in actions: | |
859 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
860 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
860 | elif 'redeltamultibase' in actions: |
|
861 | elif 'redeltamultibase' in actions: | |
861 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS |
|
862 | deltareuse = revlog.revlog.DELTAREUSESAMEREVS | |
862 | else: |
|
863 | else: | |
863 | deltareuse = revlog.revlog.DELTAREUSEALWAYS |
|
864 | deltareuse = revlog.revlog.DELTAREUSEALWAYS | |
864 |
|
865 | |||
865 | with dstrepo.transaction('upgrade') as tr: |
|
866 | with dstrepo.transaction('upgrade') as tr: | |
866 | _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, |
|
867 | _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, | |
867 | 'redeltamultibase' in actions) |
|
868 | 'redeltamultibase' in actions) | |
868 |
|
869 | |||
869 | # Now copy other files in the store directory. |
|
870 | # Now copy other files in the store directory. | |
870 | for p, kind, st in srcrepo.store.vfs.readdir('', stat=True): |
|
871 | for p, kind, st in srcrepo.store.vfs.readdir('', stat=True): | |
871 | if not _upgradefilterstorefile(srcrepo, dstrepo, requirements, |
|
872 | if not _upgradefilterstorefile(srcrepo, dstrepo, requirements, | |
872 | p, kind, st): |
|
873 | p, kind, st): | |
873 | continue |
|
874 | continue | |
874 |
|
875 | |||
875 | srcrepo.ui.write(_('copying %s\n') % p) |
|
876 | srcrepo.ui.write(_('copying %s\n') % p) | |
876 | src = srcrepo.store.vfs.join(p) |
|
877 | src = srcrepo.store.vfs.join(p) | |
877 | dst = dstrepo.store.vfs.join(p) |
|
878 | dst = dstrepo.store.vfs.join(p) | |
878 | util.copyfile(src, dst, copystat=True) |
|
879 | util.copyfile(src, dst, copystat=True) | |
879 |
|
880 | |||
880 | _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements) |
|
881 | _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements) | |
881 |
|
882 | |||
882 | ui.write(_('data fully migrated to temporary repository\n')) |
|
883 | ui.write(_('data fully migrated to temporary repository\n')) | |
883 |
|
884 | |||
884 | backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path) |
|
885 | backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path) | |
885 |
backupvfs = |
|
886 | backupvfs = vfsmod.vfs(backuppath) | |
886 |
|
887 | |||
887 | # Make a backup of requires file first, as it is the first to be modified. |
|
888 | # Make a backup of requires file first, as it is the first to be modified. | |
888 | util.copyfile(srcrepo.join('requires'), backupvfs.join('requires')) |
|
889 | util.copyfile(srcrepo.join('requires'), backupvfs.join('requires')) | |
889 |
|
890 | |||
890 | # We install an arbitrary requirement that clients must not support |
|
891 | # We install an arbitrary requirement that clients must not support | |
891 | # as a mechanism to lock out new clients during the data swap. This is |
|
892 | # as a mechanism to lock out new clients during the data swap. This is | |
892 | # better than allowing a client to continue while the repository is in |
|
893 | # better than allowing a client to continue while the repository is in | |
893 | # an inconsistent state. |
|
894 | # an inconsistent state. | |
894 | ui.write(_('marking source repository as being upgraded; clients will be ' |
|
895 | ui.write(_('marking source repository as being upgraded; clients will be ' | |
895 | 'unable to read from repository\n')) |
|
896 | 'unable to read from repository\n')) | |
896 | scmutil.writerequires(srcrepo.vfs, |
|
897 | scmutil.writerequires(srcrepo.vfs, | |
897 | srcrepo.requirements | set(['upgradeinprogress'])) |
|
898 | srcrepo.requirements | set(['upgradeinprogress'])) | |
898 |
|
899 | |||
899 | ui.write(_('starting in-place swap of repository data\n')) |
|
900 | ui.write(_('starting in-place swap of repository data\n')) | |
900 | ui.write(_('replaced files will be backed up at %s\n') % |
|
901 | ui.write(_('replaced files will be backed up at %s\n') % | |
901 | backuppath) |
|
902 | backuppath) | |
902 |
|
903 | |||
903 | # Now swap in the new store directory. Doing it as a rename should make |
|
904 | # Now swap in the new store directory. Doing it as a rename should make | |
904 | # the operation nearly instantaneous and atomic (at least in well-behaved |
|
905 | # the operation nearly instantaneous and atomic (at least in well-behaved | |
905 | # environments). |
|
906 | # environments). | |
906 | ui.write(_('replacing store...\n')) |
|
907 | ui.write(_('replacing store...\n')) | |
907 | tstart = util.timer() |
|
908 | tstart = util.timer() | |
908 | util.rename(srcrepo.spath, backupvfs.join('store')) |
|
909 | util.rename(srcrepo.spath, backupvfs.join('store')) | |
909 | util.rename(dstrepo.spath, srcrepo.spath) |
|
910 | util.rename(dstrepo.spath, srcrepo.spath) | |
910 | elapsed = util.timer() - tstart |
|
911 | elapsed = util.timer() - tstart | |
911 | ui.write(_('store replacement complete; repository was inconsistent for ' |
|
912 | ui.write(_('store replacement complete; repository was inconsistent for ' | |
912 | '%0.1fs\n') % elapsed) |
|
913 | '%0.1fs\n') % elapsed) | |
913 |
|
914 | |||
914 | # We first write the requirements file. Any new requirements will lock |
|
915 | # We first write the requirements file. Any new requirements will lock | |
915 | # out legacy clients. |
|
916 | # out legacy clients. | |
916 | ui.write(_('finalizing requirements file and making repository readable ' |
|
917 | ui.write(_('finalizing requirements file and making repository readable ' | |
917 | 'again\n')) |
|
918 | 'again\n')) | |
918 | scmutil.writerequires(srcrepo.vfs, requirements) |
|
919 | scmutil.writerequires(srcrepo.vfs, requirements) | |
919 |
|
920 | |||
920 | # The lock file from the old store won't be removed because nothing has a |
|
921 | # The lock file from the old store won't be removed because nothing has a | |
921 | # reference to its new location. So clean it up manually. Alternatively, we |
|
922 | # reference to its new location. So clean it up manually. Alternatively, we | |
922 | # could update srcrepo.svfs and other variables to point to the new |
|
923 | # could update srcrepo.svfs and other variables to point to the new | |
923 | # location. This is simpler. |
|
924 | # location. This is simpler. | |
924 | backupvfs.unlink('store/lock') |
|
925 | backupvfs.unlink('store/lock') | |
925 |
|
926 | |||
926 | return backuppath |
|
927 | return backuppath | |
927 |
|
928 | |||
928 | def upgraderepo(ui, repo, run=False, optimize=None): |
|
929 | def upgraderepo(ui, repo, run=False, optimize=None): | |
929 | """Upgrade a repository in place.""" |
|
930 | """Upgrade a repository in place.""" | |
930 | # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil |
|
931 | # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil | |
931 | from . import localrepo |
|
932 | from . import localrepo | |
932 |
|
933 | |||
933 | optimize = set(optimize or []) |
|
934 | optimize = set(optimize or []) | |
934 | repo = repo.unfiltered() |
|
935 | repo = repo.unfiltered() | |
935 |
|
936 | |||
936 | # Ensure the repository can be upgraded. |
|
937 | # Ensure the repository can be upgraded. | |
937 | missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements |
|
938 | missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements | |
938 | if missingreqs: |
|
939 | if missingreqs: | |
939 | raise error.Abort(_('cannot upgrade repository; requirement ' |
|
940 | raise error.Abort(_('cannot upgrade repository; requirement ' | |
940 | 'missing: %s') % _(', ').join(sorted(missingreqs))) |
|
941 | 'missing: %s') % _(', ').join(sorted(missingreqs))) | |
941 |
|
942 | |||
942 | blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements |
|
943 | blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements | |
943 | if blockedreqs: |
|
944 | if blockedreqs: | |
944 | raise error.Abort(_('cannot upgrade repository; unsupported source ' |
|
945 | raise error.Abort(_('cannot upgrade repository; unsupported source ' | |
945 | 'requirement: %s') % |
|
946 | 'requirement: %s') % | |
946 | _(', ').join(sorted(blockedreqs))) |
|
947 | _(', ').join(sorted(blockedreqs))) | |
947 |
|
948 | |||
948 | # FUTURE there is potentially a need to control the wanted requirements via |
|
949 | # FUTURE there is potentially a need to control the wanted requirements via | |
949 | # command arguments or via an extension hook point. |
|
950 | # command arguments or via an extension hook point. | |
950 | newreqs = localrepo.newreporequirements(repo) |
|
951 | newreqs = localrepo.newreporequirements(repo) | |
951 |
|
952 | |||
952 | noremovereqs = (repo.requirements - newreqs - |
|
953 | noremovereqs = (repo.requirements - newreqs - | |
953 | upgradesupportremovedrequirements(repo)) |
|
954 | upgradesupportremovedrequirements(repo)) | |
954 | if noremovereqs: |
|
955 | if noremovereqs: | |
955 | raise error.Abort(_('cannot upgrade repository; requirement would be ' |
|
956 | raise error.Abort(_('cannot upgrade repository; requirement would be ' | |
956 | 'removed: %s') % _(', ').join(sorted(noremovereqs))) |
|
957 | 'removed: %s') % _(', ').join(sorted(noremovereqs))) | |
957 |
|
958 | |||
958 | noaddreqs = (newreqs - repo.requirements - |
|
959 | noaddreqs = (newreqs - repo.requirements - | |
959 | upgradeallowednewrequirements(repo)) |
|
960 | upgradeallowednewrequirements(repo)) | |
960 | if noaddreqs: |
|
961 | if noaddreqs: | |
961 | raise error.Abort(_('cannot upgrade repository; do not support adding ' |
|
962 | raise error.Abort(_('cannot upgrade repository; do not support adding ' | |
962 | 'requirement: %s') % |
|
963 | 'requirement: %s') % | |
963 | _(', ').join(sorted(noaddreqs))) |
|
964 | _(', ').join(sorted(noaddreqs))) | |
964 |
|
965 | |||
965 | unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo) |
|
966 | unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo) | |
966 | if unsupportedreqs: |
|
967 | if unsupportedreqs: | |
967 | raise error.Abort(_('cannot upgrade repository; do not support ' |
|
968 | raise error.Abort(_('cannot upgrade repository; do not support ' | |
968 | 'destination requirement: %s') % |
|
969 | 'destination requirement: %s') % | |
969 | _(', ').join(sorted(unsupportedreqs))) |
|
970 | _(', ').join(sorted(unsupportedreqs))) | |
970 |
|
971 | |||
971 | # Find and validate all improvements that can be made. |
|
972 | # Find and validate all improvements that can be made. | |
972 | improvements = upgradefindimprovements(repo) |
|
973 | improvements = upgradefindimprovements(repo) | |
973 | for i in improvements: |
|
974 | for i in improvements: | |
974 | if i.type not in (deficiency, optimisation): |
|
975 | if i.type not in (deficiency, optimisation): | |
975 | raise error.Abort(_('unexpected improvement type %s for %s') % ( |
|
976 | raise error.Abort(_('unexpected improvement type %s for %s') % ( | |
976 | i.type, i.name)) |
|
977 | i.type, i.name)) | |
977 |
|
978 | |||
978 | # Validate arguments. |
|
979 | # Validate arguments. | |
979 | unknownoptimize = optimize - set(i.name for i in improvements |
|
980 | unknownoptimize = optimize - set(i.name for i in improvements | |
980 | if i.type == optimisation) |
|
981 | if i.type == optimisation) | |
981 | if unknownoptimize: |
|
982 | if unknownoptimize: | |
982 | raise error.Abort(_('unknown optimization action requested: %s') % |
|
983 | raise error.Abort(_('unknown optimization action requested: %s') % | |
983 | ', '.join(sorted(unknownoptimize)), |
|
984 | ', '.join(sorted(unknownoptimize)), | |
984 | hint=_('run without arguments to see valid ' |
|
985 | hint=_('run without arguments to see valid ' | |
985 | 'optimizations')) |
|
986 | 'optimizations')) | |
986 |
|
987 | |||
987 | actions = upgradedetermineactions(repo, improvements, repo.requirements, |
|
988 | actions = upgradedetermineactions(repo, improvements, repo.requirements, | |
988 | newreqs, optimize) |
|
989 | newreqs, optimize) | |
989 |
|
990 | |||
990 | def printrequirements(): |
|
991 | def printrequirements(): | |
991 | ui.write(_('requirements\n')) |
|
992 | ui.write(_('requirements\n')) | |
992 | ui.write(_(' preserved: %s\n') % |
|
993 | ui.write(_(' preserved: %s\n') % | |
993 | _(', ').join(sorted(newreqs & repo.requirements))) |
|
994 | _(', ').join(sorted(newreqs & repo.requirements))) | |
994 |
|
995 | |||
995 | if repo.requirements - newreqs: |
|
996 | if repo.requirements - newreqs: | |
996 | ui.write(_(' removed: %s\n') % |
|
997 | ui.write(_(' removed: %s\n') % | |
997 | _(', ').join(sorted(repo.requirements - newreqs))) |
|
998 | _(', ').join(sorted(repo.requirements - newreqs))) | |
998 |
|
999 | |||
999 | if newreqs - repo.requirements: |
|
1000 | if newreqs - repo.requirements: | |
1000 | ui.write(_(' added: %s\n') % |
|
1001 | ui.write(_(' added: %s\n') % | |
1001 | _(', ').join(sorted(newreqs - repo.requirements))) |
|
1002 | _(', ').join(sorted(newreqs - repo.requirements))) | |
1002 |
|
1003 | |||
1003 | ui.write('\n') |
|
1004 | ui.write('\n') | |
1004 |
|
1005 | |||
1005 | def printupgradeactions(): |
|
1006 | def printupgradeactions(): | |
1006 | for action in actions: |
|
1007 | for action in actions: | |
1007 | for i in improvements: |
|
1008 | for i in improvements: | |
1008 | if i.name == action: |
|
1009 | if i.name == action: | |
1009 | ui.write('%s\n %s\n\n' % |
|
1010 | ui.write('%s\n %s\n\n' % | |
1010 | (i.name, i.upgrademessage)) |
|
1011 | (i.name, i.upgrademessage)) | |
1011 |
|
1012 | |||
1012 | if not run: |
|
1013 | if not run: | |
1013 | fromdefault = [] |
|
1014 | fromdefault = [] | |
1014 | fromconfig = [] |
|
1015 | fromconfig = [] | |
1015 | optimizations = [] |
|
1016 | optimizations = [] | |
1016 |
|
1017 | |||
1017 | for i in improvements: |
|
1018 | for i in improvements: | |
1018 | assert i.type in (deficiency, optimisation) |
|
1019 | assert i.type in (deficiency, optimisation) | |
1019 | if i.type == deficiency: |
|
1020 | if i.type == deficiency: | |
1020 | if i.fromdefault: |
|
1021 | if i.fromdefault: | |
1021 | fromdefault.append(i) |
|
1022 | fromdefault.append(i) | |
1022 | if i.fromconfig: |
|
1023 | if i.fromconfig: | |
1023 | fromconfig.append(i) |
|
1024 | fromconfig.append(i) | |
1024 | else: |
|
1025 | else: | |
1025 | optimizations.append(i) |
|
1026 | optimizations.append(i) | |
1026 |
|
1027 | |||
1027 | if fromdefault or fromconfig: |
|
1028 | if fromdefault or fromconfig: | |
1028 | fromconfignames = set(x.name for x in fromconfig) |
|
1029 | fromconfignames = set(x.name for x in fromconfig) | |
1029 | onlydefault = [i for i in fromdefault |
|
1030 | onlydefault = [i for i in fromdefault | |
1030 | if i.name not in fromconfignames] |
|
1031 | if i.name not in fromconfignames] | |
1031 |
|
1032 | |||
1032 | if fromconfig: |
|
1033 | if fromconfig: | |
1033 | ui.write(_('repository lacks features recommended by ' |
|
1034 | ui.write(_('repository lacks features recommended by ' | |
1034 | 'current config options:\n\n')) |
|
1035 | 'current config options:\n\n')) | |
1035 | for i in fromconfig: |
|
1036 | for i in fromconfig: | |
1036 | ui.write('%s\n %s\n\n' % (i.name, i.description)) |
|
1037 | ui.write('%s\n %s\n\n' % (i.name, i.description)) | |
1037 |
|
1038 | |||
1038 | if onlydefault: |
|
1039 | if onlydefault: | |
1039 | ui.write(_('repository lacks features used by the default ' |
|
1040 | ui.write(_('repository lacks features used by the default ' | |
1040 | 'config options:\n\n')) |
|
1041 | 'config options:\n\n')) | |
1041 | for i in onlydefault: |
|
1042 | for i in onlydefault: | |
1042 | ui.write('%s\n %s\n\n' % (i.name, i.description)) |
|
1043 | ui.write('%s\n %s\n\n' % (i.name, i.description)) | |
1043 |
|
1044 | |||
1044 | ui.write('\n') |
|
1045 | ui.write('\n') | |
1045 | else: |
|
1046 | else: | |
1046 | ui.write(_('(no feature deficiencies found in existing ' |
|
1047 | ui.write(_('(no feature deficiencies found in existing ' | |
1047 | 'repository)\n')) |
|
1048 | 'repository)\n')) | |
1048 |
|
1049 | |||
1049 | ui.write(_('performing an upgrade with "--run" will make the following ' |
|
1050 | ui.write(_('performing an upgrade with "--run" will make the following ' | |
1050 | 'changes:\n\n')) |
|
1051 | 'changes:\n\n')) | |
1051 |
|
1052 | |||
1052 | printrequirements() |
|
1053 | printrequirements() | |
1053 | printupgradeactions() |
|
1054 | printupgradeactions() | |
1054 |
|
1055 | |||
1055 | unusedoptimize = [i for i in improvements |
|
1056 | unusedoptimize = [i for i in improvements | |
1056 | if i.name not in actions and i.type == optimisation] |
|
1057 | if i.name not in actions and i.type == optimisation] | |
1057 | if unusedoptimize: |
|
1058 | if unusedoptimize: | |
1058 | ui.write(_('additional optimizations are available by specifying ' |
|
1059 | ui.write(_('additional optimizations are available by specifying ' | |
1059 | '"--optimize <name>":\n\n')) |
|
1060 | '"--optimize <name>":\n\n')) | |
1060 | for i in unusedoptimize: |
|
1061 | for i in unusedoptimize: | |
1061 | ui.write(_('%s\n %s\n\n') % (i.name, i.description)) |
|
1062 | ui.write(_('%s\n %s\n\n') % (i.name, i.description)) | |
1062 | return |
|
1063 | return | |
1063 |
|
1064 | |||
1064 | # Else we're in the run=true case. |
|
1065 | # Else we're in the run=true case. | |
1065 | ui.write(_('upgrade will perform the following actions:\n\n')) |
|
1066 | ui.write(_('upgrade will perform the following actions:\n\n')) | |
1066 | printrequirements() |
|
1067 | printrequirements() | |
1067 | printupgradeactions() |
|
1068 | printupgradeactions() | |
1068 |
|
1069 | |||
1069 | ui.write(_('beginning upgrade...\n')) |
|
1070 | ui.write(_('beginning upgrade...\n')) | |
1070 | with repo.wlock(): |
|
1071 | with repo.wlock(): | |
1071 | with repo.lock(): |
|
1072 | with repo.lock(): | |
1072 | ui.write(_('repository locked and read-only\n')) |
|
1073 | ui.write(_('repository locked and read-only\n')) | |
1073 | # Our strategy for upgrading the repository is to create a new, |
|
1074 | # Our strategy for upgrading the repository is to create a new, | |
1074 | # temporary repository, write data to it, then do a swap of the |
|
1075 | # temporary repository, write data to it, then do a swap of the | |
1075 | # data. There are less heavyweight ways to do this, but it is easier |
|
1076 | # data. There are less heavyweight ways to do this, but it is easier | |
1076 | # to create a new repo object than to instantiate all the components |
|
1077 | # to create a new repo object than to instantiate all the components | |
1077 | # (like the store) separately. |
|
1078 | # (like the store) separately. | |
1078 | tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path) |
|
1079 | tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path) | |
1079 | backuppath = None |
|
1080 | backuppath = None | |
1080 | try: |
|
1081 | try: | |
1081 | ui.write(_('creating temporary repository to stage migrated ' |
|
1082 | ui.write(_('creating temporary repository to stage migrated ' | |
1082 | 'data: %s\n') % tmppath) |
|
1083 | 'data: %s\n') % tmppath) | |
1083 | dstrepo = localrepo.localrepository(repo.baseui, |
|
1084 | dstrepo = localrepo.localrepository(repo.baseui, | |
1084 | path=tmppath, |
|
1085 | path=tmppath, | |
1085 | create=True) |
|
1086 | create=True) | |
1086 |
|
1087 | |||
1087 | with dstrepo.wlock(): |
|
1088 | with dstrepo.wlock(): | |
1088 | with dstrepo.lock(): |
|
1089 | with dstrepo.lock(): | |
1089 | backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, |
|
1090 | backuppath = _upgraderepo(ui, repo, dstrepo, newreqs, | |
1090 | actions) |
|
1091 | actions) | |
1091 |
|
1092 | |||
1092 | finally: |
|
1093 | finally: | |
1093 | ui.write(_('removing temporary repository %s\n') % tmppath) |
|
1094 | ui.write(_('removing temporary repository %s\n') % tmppath) | |
1094 | repo.vfs.rmtree(tmppath, forcibly=True) |
|
1095 | repo.vfs.rmtree(tmppath, forcibly=True) | |
1095 |
|
1096 | |||
1096 | if backuppath: |
|
1097 | if backuppath: | |
1097 | ui.warn(_('copy of old repository backed up at %s\n') % |
|
1098 | ui.warn(_('copy of old repository backed up at %s\n') % | |
1098 | backuppath) |
|
1099 | backuppath) | |
1099 | ui.warn(_('the old repository will not be deleted; remove ' |
|
1100 | ui.warn(_('the old repository will not be deleted; remove ' | |
1100 | 'it to free up disk space once the upgraded ' |
|
1101 | 'it to free up disk space once the upgraded ' | |
1101 | 'repository is verified\n')) |
|
1102 | 'repository is verified\n')) |
General Comments 0
You need to be logged in to leave comments.
Login now