##// END OF EJS Templates
repair: begin implementation of in-place upgrading...
Gregory Szorc -
r30777:7de7afd8 default
parent child Browse files
Show More
@@ -1,773 +1,859 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import tempfile
13
14
14 from .i18n import _
15 from .i18n import _
15 from .node import short
16 from .node import short
16 from . import (
17 from . import (
17 bundle2,
18 bundle2,
18 changegroup,
19 changegroup,
19 error,
20 error,
20 exchange,
21 exchange,
21 obsolete,
22 obsolete,
23 scmutil,
22 util,
24 util,
23 )
25 )
24
26
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
27 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
28 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
29 cgversion = changegroup.safeversion(repo)
28
30
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
31 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
32 version=cgversion)
31 backupdir = "strip-backup"
33 backupdir = "strip-backup"
32 vfs = repo.vfs
34 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
35 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
36 vfs.mkdir(backupdir)
35
37
36 # Include a hash of all the nodes in the filename for uniqueness
38 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
39 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
40 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
41 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
42 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
43
42 comp = None
44 comp = None
43 if cgversion != '01':
45 if cgversion != '01':
44 bundletype = "HG20"
46 bundletype = "HG20"
45 if compress:
47 if compress:
46 comp = 'BZ'
48 comp = 'BZ'
47 elif compress:
49 elif compress:
48 bundletype = "HG10BZ"
50 bundletype = "HG10BZ"
49 else:
51 else:
50 bundletype = "HG10UN"
52 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
53 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
54 compression=comp)
53
55
54 def _collectfiles(repo, striprev):
56 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
57 """find out the filelogs affected by the strip"""
56 files = set()
58 files = set()
57
59
58 for x in xrange(striprev, len(repo)):
60 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
61 files.update(repo[x].files())
60
62
61 return sorted(files)
63 return sorted(files)
62
64
63 def _collectbrokencsets(repo, files, striprev):
65 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
66 """return the changesets which will be broken by the truncation"""
65 s = set()
67 s = set()
66 def collectone(revlog):
68 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
69 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
70 s.update([revlog.linkrev(r) for r in brokenset])
69
71
70 collectone(repo.manifestlog._revlog)
72 collectone(repo.manifestlog._revlog)
71 for fname in files:
73 for fname in files:
72 collectone(repo.file(fname))
74 collectone(repo.file(fname))
73
75
74 return s
76 return s
75
77
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
79 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
80 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
81 # Simple way to maintain backwards compatibility for this
80 # argument.
82 # argument.
81 if backup in ['none', 'strip']:
83 if backup in ['none', 'strip']:
82 backup = False
84 backup = False
83
85
84 repo = repo.unfiltered()
86 repo = repo.unfiltered()
85 repo.destroying()
87 repo.destroying()
86
88
87 cl = repo.changelog
89 cl = repo.changelog
88 # TODO handle undo of merge sets
90 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
91 if isinstance(nodelist, str):
90 nodelist = [nodelist]
92 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
93 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
94 striprev = min(striplist)
93
95
94 files = _collectfiles(repo, striprev)
96 files = _collectfiles(repo, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
97 saverevs = _collectbrokencsets(repo, files, striprev)
96
98
97 # Some revisions with rev > striprev may not be descendants of striprev.
99 # Some revisions with rev > striprev may not be descendants of striprev.
98 # We have to find these revisions and put them in a bundle, so that
100 # We have to find these revisions and put them in a bundle, so that
99 # we can restore them after the truncations.
101 # we can restore them after the truncations.
100 # To create the bundle we use repo.changegroupsubset which requires
102 # To create the bundle we use repo.changegroupsubset which requires
101 # the list of heads and bases of the set of interesting revisions.
103 # the list of heads and bases of the set of interesting revisions.
102 # (head = revision in the set that has no descendant in the set;
104 # (head = revision in the set that has no descendant in the set;
103 # base = revision in the set that has no ancestor in the set)
105 # base = revision in the set that has no ancestor in the set)
104 tostrip = set(striplist)
106 tostrip = set(striplist)
105 saveheads = set(saverevs)
107 saveheads = set(saverevs)
106 for r in cl.revs(start=striprev + 1):
108 for r in cl.revs(start=striprev + 1):
107 if any(p in tostrip for p in cl.parentrevs(r)):
109 if any(p in tostrip for p in cl.parentrevs(r)):
108 tostrip.add(r)
110 tostrip.add(r)
109
111
110 if r not in tostrip:
112 if r not in tostrip:
111 saverevs.add(r)
113 saverevs.add(r)
112 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.add(r)
115 saveheads.add(r)
114 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
115
117
116 # compute base nodes
118 # compute base nodes
117 if saverevs:
119 if saverevs:
118 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
119 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
120 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
121 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
122
124
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # is much faster
126 # is much faster
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 if newbmtarget:
128 if newbmtarget:
127 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
128 else:
130 else:
129 newbmtarget = '.'
131 newbmtarget = '.'
130
132
131 bm = repo._bookmarks
133 bm = repo._bookmarks
132 updatebm = []
134 updatebm = []
133 for m in bm:
135 for m in bm:
134 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
135 if rev in tostrip:
137 if rev in tostrip:
136 updatebm.append(m)
138 updatebm.append(m)
137
139
138 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
139 backupfile = None
141 backupfile = None
140 vfs = repo.vfs
142 vfs = repo.vfs
141 node = nodelist[-1]
143 node = nodelist[-1]
142 if backup:
144 if backup:
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
145 vfs.join(backupfile))
147 vfs.join(backupfile))
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 vfs.join(backupfile))
149 vfs.join(backupfile))
148 tmpbundlefile = None
150 tmpbundlefile = None
149 if saveheads:
151 if saveheads:
150 # do not compress temporary bundle if we remove it from disk later
152 # do not compress temporary bundle if we remove it from disk later
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
154 compress=False)
153
155
154 mfst = repo.manifestlog._revlog
156 mfst = repo.manifestlog._revlog
155
157
156 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
157 if curtr is not None:
159 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
159 msg = _('programming error: cannot strip from inside a transaction')
161 msg = _('programming error: cannot strip from inside a transaction')
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161
163
162 try:
164 try:
163 with repo.transaction("strip") as tr:
165 with repo.transaction("strip") as tr:
164 offset = len(tr.entries)
166 offset = len(tr.entries)
165
167
166 tr.startgroup()
168 tr.startgroup()
167 cl.strip(striprev, tr)
169 cl.strip(striprev, tr)
168 mfst.strip(striprev, tr)
170 mfst.strip(striprev, tr)
169 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
170 # otherwise
172 # otherwise
171 for unencoded, encoded, size in repo.store.datafiles():
173 for unencoded, encoded, size in repo.store.datafiles():
172 if (unencoded.startswith('meta/') and
174 if (unencoded.startswith('meta/') and
173 unencoded.endswith('00manifest.i')):
175 unencoded.endswith('00manifest.i')):
174 dir = unencoded[5:-12]
176 dir = unencoded[5:-12]
175 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
176 for fn in files:
178 for fn in files:
177 repo.file(fn).strip(striprev, tr)
179 repo.file(fn).strip(striprev, tr)
178 tr.endgroup()
180 tr.endgroup()
179
181
180 for i in xrange(offset, len(tr.entries)):
182 for i in xrange(offset, len(tr.entries)):
181 file, troffset, ignore = tr.entries[i]
183 file, troffset, ignore = tr.entries[i]
182 with repo.svfs(file, 'a', checkambig=True) as fp:
184 with repo.svfs(file, 'a', checkambig=True) as fp:
183 fp.truncate(troffset)
185 fp.truncate(troffset)
184 if troffset == 0:
186 if troffset == 0:
185 repo.store.markremoved(file)
187 repo.store.markremoved(file)
186
188
187 if tmpbundlefile:
189 if tmpbundlefile:
188 ui.note(_("adding branch\n"))
190 ui.note(_("adding branch\n"))
189 f = vfs.open(tmpbundlefile, "rb")
191 f = vfs.open(tmpbundlefile, "rb")
190 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
191 if not repo.ui.verbose:
193 if not repo.ui.verbose:
192 # silence internal shuffling chatter
194 # silence internal shuffling chatter
193 repo.ui.pushbuffer()
195 repo.ui.pushbuffer()
194 if isinstance(gen, bundle2.unbundle20):
196 if isinstance(gen, bundle2.unbundle20):
195 with repo.transaction('strip') as tr:
197 with repo.transaction('strip') as tr:
196 tr.hookargs = {'source': 'strip',
198 tr.hookargs = {'source': 'strip',
197 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
198 bundle2.applybundle(repo, gen, tr, source='strip',
200 bundle2.applybundle(repo, gen, tr, source='strip',
199 url='bundle:' + vfs.join(tmpbundlefile))
201 url='bundle:' + vfs.join(tmpbundlefile))
200 else:
202 else:
201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
202 True)
204 True)
203 if not repo.ui.verbose:
205 if not repo.ui.verbose:
204 repo.ui.popbuffer()
206 repo.ui.popbuffer()
205 f.close()
207 f.close()
206 repo._phasecache.invalidate()
208 repo._phasecache.invalidate()
207
209
208 for m in updatebm:
210 for m in updatebm:
209 bm[m] = repo[newbmtarget].node()
211 bm[m] = repo[newbmtarget].node()
210 lock = tr = None
212 lock = tr = None
211 try:
213 try:
212 lock = repo.lock()
214 lock = repo.lock()
213 tr = repo.transaction('repair')
215 tr = repo.transaction('repair')
214 bm.recordchange(tr)
216 bm.recordchange(tr)
215 tr.close()
217 tr.close()
216 finally:
218 finally:
217 tr.release()
219 tr.release()
218 lock.release()
220 lock.release()
219
221
220 # remove undo files
222 # remove undo files
221 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
222 try:
224 try:
223 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
224 except OSError as e:
226 except OSError as e:
225 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
226 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
227 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
228
230
229 except: # re-raises
231 except: # re-raises
230 if backupfile:
232 if backupfile:
231 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
232 % vfs.join(backupfile))
234 % vfs.join(backupfile))
233 if tmpbundlefile:
235 if tmpbundlefile:
234 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
235 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
236 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
237 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
238 raise
240 raise
239 else:
241 else:
240 if tmpbundlefile:
242 if tmpbundlefile:
241 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
242 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
243
245
244 repo.destroyed()
246 repo.destroyed()
245 # return the backup file path (or None if 'backup' was False) so
247 # return the backup file path (or None if 'backup' was False) so
246 # extensions can use it
248 # extensions can use it
247 return backupfile
249 return backupfile
248
250
249 def rebuildfncache(ui, repo):
251 def rebuildfncache(ui, repo):
250 """Rebuilds the fncache file from repo history.
252 """Rebuilds the fncache file from repo history.
251
253
252 Missing entries will be added. Extra entries will be removed.
254 Missing entries will be added. Extra entries will be removed.
253 """
255 """
254 repo = repo.unfiltered()
256 repo = repo.unfiltered()
255
257
256 if 'fncache' not in repo.requirements:
258 if 'fncache' not in repo.requirements:
257 ui.warn(_('(not rebuilding fncache because repository does not '
259 ui.warn(_('(not rebuilding fncache because repository does not '
258 'support fncache)\n'))
260 'support fncache)\n'))
259 return
261 return
260
262
261 with repo.lock():
263 with repo.lock():
262 fnc = repo.store.fncache
264 fnc = repo.store.fncache
263 # Trigger load of fncache.
265 # Trigger load of fncache.
264 if 'irrelevant' in fnc:
266 if 'irrelevant' in fnc:
265 pass
267 pass
266
268
267 oldentries = set(fnc.entries)
269 oldentries = set(fnc.entries)
268 newentries = set()
270 newentries = set()
269 seenfiles = set()
271 seenfiles = set()
270
272
271 repolen = len(repo)
273 repolen = len(repo)
272 for rev in repo:
274 for rev in repo:
273 ui.progress(_('rebuilding'), rev, total=repolen,
275 ui.progress(_('rebuilding'), rev, total=repolen,
274 unit=_('changesets'))
276 unit=_('changesets'))
275
277
276 ctx = repo[rev]
278 ctx = repo[rev]
277 for f in ctx.files():
279 for f in ctx.files():
278 # This is to minimize I/O.
280 # This is to minimize I/O.
279 if f in seenfiles:
281 if f in seenfiles:
280 continue
282 continue
281 seenfiles.add(f)
283 seenfiles.add(f)
282
284
283 i = 'data/%s.i' % f
285 i = 'data/%s.i' % f
284 d = 'data/%s.d' % f
286 d = 'data/%s.d' % f
285
287
286 if repo.store._exists(i):
288 if repo.store._exists(i):
287 newentries.add(i)
289 newentries.add(i)
288 if repo.store._exists(d):
290 if repo.store._exists(d):
289 newentries.add(d)
291 newentries.add(d)
290
292
291 ui.progress(_('rebuilding'), None)
293 ui.progress(_('rebuilding'), None)
292
294
293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
294 for dir in util.dirs(seenfiles):
296 for dir in util.dirs(seenfiles):
295 i = 'meta/%s/00manifest.i' % dir
297 i = 'meta/%s/00manifest.i' % dir
296 d = 'meta/%s/00manifest.d' % dir
298 d = 'meta/%s/00manifest.d' % dir
297
299
298 if repo.store._exists(i):
300 if repo.store._exists(i):
299 newentries.add(i)
301 newentries.add(i)
300 if repo.store._exists(d):
302 if repo.store._exists(d):
301 newentries.add(d)
303 newentries.add(d)
302
304
303 addcount = len(newentries - oldentries)
305 addcount = len(newentries - oldentries)
304 removecount = len(oldentries - newentries)
306 removecount = len(oldentries - newentries)
305 for p in sorted(oldentries - newentries):
307 for p in sorted(oldentries - newentries):
306 ui.write(_('removing %s\n') % p)
308 ui.write(_('removing %s\n') % p)
307 for p in sorted(newentries - oldentries):
309 for p in sorted(newentries - oldentries):
308 ui.write(_('adding %s\n') % p)
310 ui.write(_('adding %s\n') % p)
309
311
310 if addcount or removecount:
312 if addcount or removecount:
311 ui.write(_('%d items added, %d removed from fncache\n') %
313 ui.write(_('%d items added, %d removed from fncache\n') %
312 (addcount, removecount))
314 (addcount, removecount))
313 fnc.entries = newentries
315 fnc.entries = newentries
314 fnc._dirty = True
316 fnc._dirty = True
315
317
316 with repo.transaction('fncache') as tr:
318 with repo.transaction('fncache') as tr:
317 fnc.write(tr)
319 fnc.write(tr)
318 else:
320 else:
319 ui.write(_('fncache already up to date\n'))
321 ui.write(_('fncache already up to date\n'))
320
322
321 def stripbmrevset(repo, mark):
323 def stripbmrevset(repo, mark):
322 """
324 """
323 The revset to strip when strip is called with -B mark
325 The revset to strip when strip is called with -B mark
324
326
325 Needs to live here so extensions can use it and wrap it even when strip is
327 Needs to live here so extensions can use it and wrap it even when strip is
326 not enabled or not present on a box.
328 not enabled or not present on a box.
327 """
329 """
328 return repo.revs("ancestors(bookmark(%s)) - "
330 return repo.revs("ancestors(bookmark(%s)) - "
329 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
330 "ancestors(bookmark() and not bookmark(%s))",
332 "ancestors(bookmark() and not bookmark(%s))",
331 mark, mark, mark)
333 mark, mark, mark)
332
334
333 def deleteobsmarkers(obsstore, indices):
335 def deleteobsmarkers(obsstore, indices):
334 """Delete some obsmarkers from obsstore and return how many were deleted
336 """Delete some obsmarkers from obsstore and return how many were deleted
335
337
336 'indices' is a list of ints which are the indices
338 'indices' is a list of ints which are the indices
337 of the markers to be deleted.
339 of the markers to be deleted.
338
340
339 Every invocation of this function completely rewrites the obsstore file,
341 Every invocation of this function completely rewrites the obsstore file,
340 skipping the markers we want to be removed. The new temporary file is
342 skipping the markers we want to be removed. The new temporary file is
341 created, remaining markers are written there and on .close() this file
343 created, remaining markers are written there and on .close() this file
342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
343 if not indices:
345 if not indices:
344 # we don't want to rewrite the obsstore with the same content
346 # we don't want to rewrite the obsstore with the same content
345 return
347 return
346
348
347 left = []
349 left = []
348 current = obsstore._all
350 current = obsstore._all
349 n = 0
351 n = 0
350 for i, m in enumerate(current):
352 for i, m in enumerate(current):
351 if i in indices:
353 if i in indices:
352 n += 1
354 n += 1
353 continue
355 continue
354 left.append(m)
356 left.append(m)
355
357
356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
358 newobsstorefile.write(bytes)
360 newobsstorefile.write(bytes)
359 newobsstorefile.close()
361 newobsstorefile.close()
360 return n
362 return n
361
363
362 def upgraderequiredsourcerequirements(repo):
364 def upgraderequiredsourcerequirements(repo):
363 """Obtain requirements required to be present to upgrade a repo.
365 """Obtain requirements required to be present to upgrade a repo.
364
366
365 An upgrade will not be allowed if the repository doesn't have the
367 An upgrade will not be allowed if the repository doesn't have the
366 requirements returned by this function.
368 requirements returned by this function.
367 """
369 """
368 return set([
370 return set([
369 # Introduced in Mercurial 0.9.2.
371 # Introduced in Mercurial 0.9.2.
370 'revlogv1',
372 'revlogv1',
371 # Introduced in Mercurial 0.9.2.
373 # Introduced in Mercurial 0.9.2.
372 'store',
374 'store',
373 ])
375 ])
374
376
375 def upgradeblocksourcerequirements(repo):
377 def upgradeblocksourcerequirements(repo):
376 """Obtain requirements that will prevent an upgrade from occurring.
378 """Obtain requirements that will prevent an upgrade from occurring.
377
379
378 An upgrade cannot be performed if the source repository contains a
380 An upgrade cannot be performed if the source repository contains a
379 requirements in the returned set.
381 requirements in the returned set.
380 """
382 """
381 return set([
383 return set([
382 # The upgrade code does not yet support these experimental features.
384 # The upgrade code does not yet support these experimental features.
383 # This is an artificial limitation.
385 # This is an artificial limitation.
384 'manifestv2',
386 'manifestv2',
385 'treemanifest',
387 'treemanifest',
386 # This was a precursor to generaldelta and was never enabled by default.
388 # This was a precursor to generaldelta and was never enabled by default.
387 # It should (hopefully) not exist in the wild.
389 # It should (hopefully) not exist in the wild.
388 'parentdelta',
390 'parentdelta',
389 # Upgrade should operate on the actual store, not the shared link.
391 # Upgrade should operate on the actual store, not the shared link.
390 'shared',
392 'shared',
391 ])
393 ])
392
394
393 def upgradesupportremovedrequirements(repo):
395 def upgradesupportremovedrequirements(repo):
394 """Obtain requirements that can be removed during an upgrade.
396 """Obtain requirements that can be removed during an upgrade.
395
397
396 If an upgrade were to create a repository that dropped a requirement,
398 If an upgrade were to create a repository that dropped a requirement,
397 the dropped requirement must appear in the returned set for the upgrade
399 the dropped requirement must appear in the returned set for the upgrade
398 to be allowed.
400 to be allowed.
399 """
401 """
400 return set()
402 return set()
401
403
402 def upgradesupporteddestrequirements(repo):
404 def upgradesupporteddestrequirements(repo):
403 """Obtain requirements that upgrade supports in the destination.
405 """Obtain requirements that upgrade supports in the destination.
404
406
405 If the result of the upgrade would create requirements not in this set,
407 If the result of the upgrade would create requirements not in this set,
406 the upgrade is disallowed.
408 the upgrade is disallowed.
407
409
408 Extensions should monkeypatch this to add their custom requirements.
410 Extensions should monkeypatch this to add their custom requirements.
409 """
411 """
410 return set([
412 return set([
411 'dotencode',
413 'dotencode',
412 'fncache',
414 'fncache',
413 'generaldelta',
415 'generaldelta',
414 'revlogv1',
416 'revlogv1',
415 'store',
417 'store',
416 ])
418 ])
417
419
418 def upgradeallowednewrequirements(repo):
420 def upgradeallowednewrequirements(repo):
419 """Obtain requirements that can be added to a repository during upgrade.
421 """Obtain requirements that can be added to a repository during upgrade.
420
422
421 This is used to disallow proposed requirements from being added when
423 This is used to disallow proposed requirements from being added when
422 they weren't present before.
424 they weren't present before.
423
425
424 We use a list of allowed requirement additions instead of a list of known
426 We use a list of allowed requirement additions instead of a list of known
425 bad additions because the whitelist approach is safer and will prevent
427 bad additions because the whitelist approach is safer and will prevent
426 future, unknown requirements from accidentally being added.
428 future, unknown requirements from accidentally being added.
427 """
429 """
428 return set([
430 return set([
429 'dotencode',
431 'dotencode',
430 'fncache',
432 'fncache',
431 'generaldelta',
433 'generaldelta',
432 ])
434 ])
433
435
434 deficiency = 'deficiency'
436 deficiency = 'deficiency'
435 optimisation = 'optimization'
437 optimisation = 'optimization'
436
438
437 class upgradeimprovement(object):
439 class upgradeimprovement(object):
438 """Represents an improvement that can be made as part of an upgrade.
440 """Represents an improvement that can be made as part of an upgrade.
439
441
440 The following attributes are defined on each instance:
442 The following attributes are defined on each instance:
441
443
442 name
444 name
443 Machine-readable string uniquely identifying this improvement. It
445 Machine-readable string uniquely identifying this improvement. It
444 will be mapped to an action later in the upgrade process.
446 will be mapped to an action later in the upgrade process.
445
447
446 type
448 type
447 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
448 problem. An optimization is an action (sometimes optional) that
450 problem. An optimization is an action (sometimes optional) that
449 can be taken to further improve the state of the repository.
451 can be taken to further improve the state of the repository.
450
452
451 description
453 description
452 Message intended for humans explaining the improvement in more detail,
454 Message intended for humans explaining the improvement in more detail,
453 including the implications of it. For ``deficiency`` types, should be
455 including the implications of it. For ``deficiency`` types, should be
454 worded in the present tense. For ``optimisation`` types, should be
456 worded in the present tense. For ``optimisation`` types, should be
455 worded in the future tense.
457 worded in the future tense.
456
458
457 upgrademessage
459 upgrademessage
458 Message intended for humans explaining what an upgrade addressing this
460 Message intended for humans explaining what an upgrade addressing this
459 issue will do. Should be worded in the future tense.
461 issue will do. Should be worded in the future tense.
460
462
461 fromdefault (``deficiency`` types only)
463 fromdefault (``deficiency`` types only)
462 Boolean indicating whether the current (deficient) state deviates
464 Boolean indicating whether the current (deficient) state deviates
463 from Mercurial's default configuration.
465 from Mercurial's default configuration.
464
466
465 fromconfig (``deficiency`` types only)
467 fromconfig (``deficiency`` types only)
466 Boolean indicating whether the current (deficient) state deviates
468 Boolean indicating whether the current (deficient) state deviates
467 from the current Mercurial configuration.
469 from the current Mercurial configuration.
468 """
470 """
469 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 def __init__(self, name, type, description, upgrademessage, **kwargs):
470 self.name = name
472 self.name = name
471 self.type = type
473 self.type = type
472 self.description = description
474 self.description = description
473 self.upgrademessage = upgrademessage
475 self.upgrademessage = upgrademessage
474
476
475 for k, v in kwargs.items():
477 for k, v in kwargs.items():
476 setattr(self, k, v)
478 setattr(self, k, v)
477
479
478 def upgradefindimprovements(repo):
480 def upgradefindimprovements(repo):
479 """Determine improvements that can be made to the repo during upgrade.
481 """Determine improvements that can be made to the repo during upgrade.
480
482
481 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 Returns a list of ``upgradeimprovement`` describing repository deficiencies
482 and optimizations.
484 and optimizations.
483 """
485 """
484 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
485 from . import localrepo
487 from . import localrepo
486
488
487 newreporeqs = localrepo.newreporequirements(repo)
489 newreporeqs = localrepo.newreporequirements(repo)
488
490
489 improvements = []
491 improvements = []
490
492
491 # We could detect lack of revlogv1 and store here, but they were added
493 # We could detect lack of revlogv1 and store here, but they were added
492 # in 0.9.2 and we don't support upgrading repos without these
494 # in 0.9.2 and we don't support upgrading repos without these
493 # requirements, so let's not bother.
495 # requirements, so let's not bother.
494
496
495 if 'fncache' not in repo.requirements:
497 if 'fncache' not in repo.requirements:
496 improvements.append(upgradeimprovement(
498 improvements.append(upgradeimprovement(
497 name='fncache',
499 name='fncache',
498 type=deficiency,
500 type=deficiency,
499 description=_('long and reserved filenames may not work correctly; '
501 description=_('long and reserved filenames may not work correctly; '
500 'repository performance is sub-optimal'),
502 'repository performance is sub-optimal'),
501 upgrademessage=_('repository will be more resilient to storing '
503 upgrademessage=_('repository will be more resilient to storing '
502 'certain paths and performance of certain '
504 'certain paths and performance of certain '
503 'operations should be improved'),
505 'operations should be improved'),
504 fromdefault=True,
506 fromdefault=True,
505 fromconfig='fncache' in newreporeqs))
507 fromconfig='fncache' in newreporeqs))
506
508
507 if 'dotencode' not in repo.requirements:
509 if 'dotencode' not in repo.requirements:
508 improvements.append(upgradeimprovement(
510 improvements.append(upgradeimprovement(
509 name='dotencode',
511 name='dotencode',
510 type=deficiency,
512 type=deficiency,
511 description=_('storage of filenames beginning with a period or '
513 description=_('storage of filenames beginning with a period or '
512 'space may not work correctly'),
514 'space may not work correctly'),
513 upgrademessage=_('repository will be better able to store files '
515 upgrademessage=_('repository will be better able to store files '
514 'beginning with a space or period'),
516 'beginning with a space or period'),
515 fromdefault=True,
517 fromdefault=True,
516 fromconfig='dotencode' in newreporeqs))
518 fromconfig='dotencode' in newreporeqs))
517
519
518 if 'generaldelta' not in repo.requirements:
520 if 'generaldelta' not in repo.requirements:
519 improvements.append(upgradeimprovement(
521 improvements.append(upgradeimprovement(
520 name='generaldelta',
522 name='generaldelta',
521 type=deficiency,
523 type=deficiency,
522 description=_('deltas within internal storage are unable to '
524 description=_('deltas within internal storage are unable to '
523 'choose optimal revisions; repository is larger and '
525 'choose optimal revisions; repository is larger and '
524 'slower than it could be; interaction with other '
526 'slower than it could be; interaction with other '
525 'repositories may require extra network and CPU '
527 'repositories may require extra network and CPU '
526 'resources, making "hg push" and "hg pull" slower'),
528 'resources, making "hg push" and "hg pull" slower'),
527 upgrademessage=_('repository storage will be able to create '
529 upgrademessage=_('repository storage will be able to create '
528 'optimal deltas; new repository data will be '
530 'optimal deltas; new repository data will be '
529 'smaller and read times should decrease; '
531 'smaller and read times should decrease; '
530 'interacting with other repositories using this '
532 'interacting with other repositories using this '
531 'storage model should require less network and '
533 'storage model should require less network and '
532 'CPU resources, making "hg push" and "hg pull" '
534 'CPU resources, making "hg push" and "hg pull" '
533 'faster'),
535 'faster'),
534 fromdefault=True,
536 fromdefault=True,
535 fromconfig='generaldelta' in newreporeqs))
537 fromconfig='generaldelta' in newreporeqs))
536
538
537 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
538 # changelogs with deltas.
540 # changelogs with deltas.
539 cl = repo.changelog
541 cl = repo.changelog
540 for rev in cl:
542 for rev in cl:
541 chainbase = cl.chainbase(rev)
543 chainbase = cl.chainbase(rev)
542 if chainbase != rev:
544 if chainbase != rev:
543 improvements.append(upgradeimprovement(
545 improvements.append(upgradeimprovement(
544 name='removecldeltachain',
546 name='removecldeltachain',
545 type=deficiency,
547 type=deficiency,
546 description=_('changelog storage is using deltas instead of '
548 description=_('changelog storage is using deltas instead of '
547 'raw entries; changelog reading and any '
549 'raw entries; changelog reading and any '
548 'operation relying on changelog data are slower '
550 'operation relying on changelog data are slower '
549 'than they could be'),
551 'than they could be'),
550 upgrademessage=_('changelog storage will be reformated to '
552 upgrademessage=_('changelog storage will be reformated to '
551 'store raw entries; changelog reading will be '
553 'store raw entries; changelog reading will be '
552 'faster; changelog size may be reduced'),
554 'faster; changelog size may be reduced'),
553 fromdefault=True,
555 fromdefault=True,
554 fromconfig=True))
556 fromconfig=True))
555 break
557 break
556
558
557 # Now for the optimizations.
559 # Now for the optimizations.
558
560
559 # These are unconditionally added. There is logic later that figures out
561 # These are unconditionally added. There is logic later that figures out
560 # which ones to apply.
562 # which ones to apply.
561
563
562 improvements.append(upgradeimprovement(
564 improvements.append(upgradeimprovement(
563 name='redeltaparent',
565 name='redeltaparent',
564 type=optimisation,
566 type=optimisation,
565 description=_('deltas within internal storage will be recalculated to '
567 description=_('deltas within internal storage will be recalculated to '
566 'choose an optimal base revision where this was not '
568 'choose an optimal base revision where this was not '
567 'already done; the size of the repository may shrink and '
569 'already done; the size of the repository may shrink and '
568 'various operations may become faster; the first time '
570 'various operations may become faster; the first time '
569 'this optimization is performed could slow down upgrade '
571 'this optimization is performed could slow down upgrade '
570 'execution considerably; subsequent invocations should '
572 'execution considerably; subsequent invocations should '
571 'not run noticeably slower'),
573 'not run noticeably slower'),
572 upgrademessage=_('deltas within internal storage will choose a new '
574 upgrademessage=_('deltas within internal storage will choose a new '
573 'base revision if needed')))
575 'base revision if needed')))
574
576
575 improvements.append(upgradeimprovement(
577 improvements.append(upgradeimprovement(
576 name='redeltamultibase',
578 name='redeltamultibase',
577 type=optimisation,
579 type=optimisation,
578 description=_('deltas within internal storage will be recalculated '
580 description=_('deltas within internal storage will be recalculated '
579 'against multiple base revision and the smallest '
581 'against multiple base revision and the smallest '
580 'difference will be used; the size of the repository may '
582 'difference will be used; the size of the repository may '
581 'shrink significantly when there are many merges; this '
583 'shrink significantly when there are many merges; this '
582 'optimization will slow down execution in proportion to '
584 'optimization will slow down execution in proportion to '
583 'the number of merges in the repository and the amount '
585 'the number of merges in the repository and the amount '
584 'of files in the repository; this slow down should not '
586 'of files in the repository; this slow down should not '
585 'be significant unless there are tens of thousands of '
587 'be significant unless there are tens of thousands of '
586 'files and thousands of merges'),
588 'files and thousands of merges'),
587 upgrademessage=_('deltas within internal storage will choose an '
589 upgrademessage=_('deltas within internal storage will choose an '
588 'optimal delta by computing deltas against multiple '
590 'optimal delta by computing deltas against multiple '
589 'parents; may slow down execution time '
591 'parents; may slow down execution time '
590 'significantly')))
592 'significantly')))
591
593
592 improvements.append(upgradeimprovement(
594 improvements.append(upgradeimprovement(
593 name='redeltaall',
595 name='redeltaall',
594 type=optimisation,
596 type=optimisation,
595 description=_('deltas within internal storage will always be '
597 description=_('deltas within internal storage will always be '
596 'recalculated without reusing prior deltas; this will '
598 'recalculated without reusing prior deltas; this will '
597 'likely make execution run several times slower; this '
599 'likely make execution run several times slower; this '
598 'optimization is typically not needed'),
600 'optimization is typically not needed'),
599 upgrademessage=_('deltas within internal storage will be fully '
601 upgrademessage=_('deltas within internal storage will be fully '
600 'recomputed; this will likely drastically slow down '
602 'recomputed; this will likely drastically slow down '
601 'execution time')))
603 'execution time')))
602
604
603 return improvements
605 return improvements
604
606
605 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
606 optimize):
608 optimize):
607 """Determine upgrade actions that will be performed.
609 """Determine upgrade actions that will be performed.
608
610
609 Given a list of improvements as returned by ``upgradefindimprovements``,
611 Given a list of improvements as returned by ``upgradefindimprovements``,
610 determine the list of upgrade actions that will be performed.
612 determine the list of upgrade actions that will be performed.
611
613
612 The role of this function is to filter improvements if needed, apply
614 The role of this function is to filter improvements if needed, apply
613 recommended optimizations from the improvements list that make sense,
615 recommended optimizations from the improvements list that make sense,
614 etc.
616 etc.
615
617
616 Returns a list of action names.
618 Returns a list of action names.
617 """
619 """
618 newactions = []
620 newactions = []
619
621
620 knownreqs = upgradesupporteddestrequirements(repo)
622 knownreqs = upgradesupporteddestrequirements(repo)
621
623
622 for i in improvements:
624 for i in improvements:
623 name = i.name
625 name = i.name
624
626
625 # If the action is a requirement that doesn't show up in the
627 # If the action is a requirement that doesn't show up in the
626 # destination requirements, prune the action.
628 # destination requirements, prune the action.
627 if name in knownreqs and name not in destreqs:
629 if name in knownreqs and name not in destreqs:
628 continue
630 continue
629
631
630 if i.type == deficiency:
632 if i.type == deficiency:
631 newactions.append(name)
633 newactions.append(name)
632
634
633 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635 newactions.extend(o for o in sorted(optimize) if o not in newactions)
634
636
635 # FUTURE consider adding some optimizations here for certain transitions.
637 # FUTURE consider adding some optimizations here for certain transitions.
636 # e.g. adding generaldelta could schedule parent redeltas.
638 # e.g. adding generaldelta could schedule parent redeltas.
637
639
638 return newactions
640 return newactions
639
641
642 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
643 """Do the low-level work of upgrading a repository.
644
645 The upgrade is effectively performed as a copy between a source
646 repository and a temporary destination repository.
647
648 The source repository is unmodified for as long as possible so the
649 upgrade can abort at any time without causing loss of service for
650 readers and without corrupting the source repository.
651 """
652 assert srcrepo.currentwlock()
653 assert dstrepo.currentwlock()
654
655 # TODO copy store
656
657 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
658 backupvfs = scmutil.vfs(backuppath)
659
660 # Make a backup of requires file first, as it is the first to be modified.
661 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
662
663 # We install an arbitrary requirement that clients must not support
664 # as a mechanism to lock out new clients during the data swap. This is
665 # better than allowing a client to continue while the repository is in
666 # an inconsistent state.
667 ui.write(_('marking source repository as being upgraded; clients will be '
668 'unable to read from repository\n'))
669 scmutil.writerequires(srcrepo.vfs,
670 srcrepo.requirements | set(['upgradeinprogress']))
671
672 ui.write(_('starting in-place swap of repository data\n'))
673 ui.write(_('replaced files will be backed up at %s\n') %
674 backuppath)
675
676 # TODO do the store swap here.
677
678 # We first write the requirements file. Any new requirements will lock
679 # out legacy clients.
680 ui.write(_('finalizing requirements file and making repository readable '
681 'again\n'))
682 scmutil.writerequires(srcrepo.vfs, requirements)
683
684 return backuppath
685
640 def upgraderepo(ui, repo, run=False, optimize=None):
686 def upgraderepo(ui, repo, run=False, optimize=None):
641 """Upgrade a repository in place."""
687 """Upgrade a repository in place."""
642 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
688 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
643 from . import localrepo
689 from . import localrepo
644
690
645 optimize = set(optimize or [])
691 optimize = set(optimize or [])
646 repo = repo.unfiltered()
692 repo = repo.unfiltered()
647
693
648 # Ensure the repository can be upgraded.
694 # Ensure the repository can be upgraded.
649 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
695 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
650 if missingreqs:
696 if missingreqs:
651 raise error.Abort(_('cannot upgrade repository; requirement '
697 raise error.Abort(_('cannot upgrade repository; requirement '
652 'missing: %s') % _(', ').join(sorted(missingreqs)))
698 'missing: %s') % _(', ').join(sorted(missingreqs)))
653
699
654 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
700 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
655 if blockedreqs:
701 if blockedreqs:
656 raise error.Abort(_('cannot upgrade repository; unsupported source '
702 raise error.Abort(_('cannot upgrade repository; unsupported source '
657 'requirement: %s') %
703 'requirement: %s') %
658 _(', ').join(sorted(blockedreqs)))
704 _(', ').join(sorted(blockedreqs)))
659
705
660 # FUTURE there is potentially a need to control the wanted requirements via
706 # FUTURE there is potentially a need to control the wanted requirements via
661 # command arguments or via an extension hook point.
707 # command arguments or via an extension hook point.
662 newreqs = localrepo.newreporequirements(repo)
708 newreqs = localrepo.newreporequirements(repo)
663
709
664 noremovereqs = (repo.requirements - newreqs -
710 noremovereqs = (repo.requirements - newreqs -
665 upgradesupportremovedrequirements(repo))
711 upgradesupportremovedrequirements(repo))
666 if noremovereqs:
712 if noremovereqs:
667 raise error.Abort(_('cannot upgrade repository; requirement would be '
713 raise error.Abort(_('cannot upgrade repository; requirement would be '
668 'removed: %s') % _(', ').join(sorted(noremovereqs)))
714 'removed: %s') % _(', ').join(sorted(noremovereqs)))
669
715
670 noaddreqs = (newreqs - repo.requirements -
716 noaddreqs = (newreqs - repo.requirements -
671 upgradeallowednewrequirements(repo))
717 upgradeallowednewrequirements(repo))
672 if noaddreqs:
718 if noaddreqs:
673 raise error.Abort(_('cannot upgrade repository; do not support adding '
719 raise error.Abort(_('cannot upgrade repository; do not support adding '
674 'requirement: %s') %
720 'requirement: %s') %
675 _(', ').join(sorted(noaddreqs)))
721 _(', ').join(sorted(noaddreqs)))
676
722
677 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
723 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
678 if unsupportedreqs:
724 if unsupportedreqs:
679 raise error.Abort(_('cannot upgrade repository; do not support '
725 raise error.Abort(_('cannot upgrade repository; do not support '
680 'destination requirement: %s') %
726 'destination requirement: %s') %
681 _(', ').join(sorted(unsupportedreqs)))
727 _(', ').join(sorted(unsupportedreqs)))
682
728
683 # Find and validate all improvements that can be made.
729 # Find and validate all improvements that can be made.
684 improvements = upgradefindimprovements(repo)
730 improvements = upgradefindimprovements(repo)
685 for i in improvements:
731 for i in improvements:
686 if i.type not in (deficiency, optimisation):
732 if i.type not in (deficiency, optimisation):
687 raise error.Abort(_('unexpected improvement type %s for %s') % (
733 raise error.Abort(_('unexpected improvement type %s for %s') % (
688 i.type, i.name))
734 i.type, i.name))
689
735
690 # Validate arguments.
736 # Validate arguments.
691 unknownoptimize = optimize - set(i.name for i in improvements
737 unknownoptimize = optimize - set(i.name for i in improvements
692 if i.type == optimisation)
738 if i.type == optimisation)
693 if unknownoptimize:
739 if unknownoptimize:
694 raise error.Abort(_('unknown optimization action requested: %s') %
740 raise error.Abort(_('unknown optimization action requested: %s') %
695 ', '.join(sorted(unknownoptimize)),
741 ', '.join(sorted(unknownoptimize)),
696 hint=_('run without arguments to see valid '
742 hint=_('run without arguments to see valid '
697 'optimizations'))
743 'optimizations'))
698
744
699 actions = upgradedetermineactions(repo, improvements, repo.requirements,
745 actions = upgradedetermineactions(repo, improvements, repo.requirements,
700 newreqs, optimize)
746 newreqs, optimize)
701
747
702 def printrequirements():
748 def printrequirements():
703 ui.write(_('requirements\n'))
749 ui.write(_('requirements\n'))
704 ui.write(_(' preserved: %s\n') %
750 ui.write(_(' preserved: %s\n') %
705 _(', ').join(sorted(newreqs & repo.requirements)))
751 _(', ').join(sorted(newreqs & repo.requirements)))
706
752
707 if repo.requirements - newreqs:
753 if repo.requirements - newreqs:
708 ui.write(_(' removed: %s\n') %
754 ui.write(_(' removed: %s\n') %
709 _(', ').join(sorted(repo.requirements - newreqs)))
755 _(', ').join(sorted(repo.requirements - newreqs)))
710
756
711 if newreqs - repo.requirements:
757 if newreqs - repo.requirements:
712 ui.write(_(' added: %s\n') %
758 ui.write(_(' added: %s\n') %
713 _(', ').join(sorted(newreqs - repo.requirements)))
759 _(', ').join(sorted(newreqs - repo.requirements)))
714
760
715 ui.write('\n')
761 ui.write('\n')
716
762
717 def printupgradeactions():
763 def printupgradeactions():
718 for action in actions:
764 for action in actions:
719 for i in improvements:
765 for i in improvements:
720 if i.name == action:
766 if i.name == action:
721 ui.write('%s\n %s\n\n' %
767 ui.write('%s\n %s\n\n' %
722 (i.name, i.upgrademessage))
768 (i.name, i.upgrademessage))
723
769
724 if not run:
770 if not run:
725 fromdefault = []
771 fromdefault = []
726 fromconfig = []
772 fromconfig = []
727 optimizations = []
773 optimizations = []
728
774
729 for i in improvements:
775 for i in improvements:
730 assert i.type in (deficiency, optimisation)
776 assert i.type in (deficiency, optimisation)
731 if i.type == deficiency:
777 if i.type == deficiency:
732 if i.fromdefault:
778 if i.fromdefault:
733 fromdefault.append(i)
779 fromdefault.append(i)
734 if i.fromconfig:
780 if i.fromconfig:
735 fromconfig.append(i)
781 fromconfig.append(i)
736 else:
782 else:
737 optimizations.append(i)
783 optimizations.append(i)
738
784
739 if fromdefault or fromconfig:
785 if fromdefault or fromconfig:
740 fromconfignames = set(x.name for x in fromconfig)
786 fromconfignames = set(x.name for x in fromconfig)
741 onlydefault = [i for i in fromdefault
787 onlydefault = [i for i in fromdefault
742 if i.name not in fromconfignames]
788 if i.name not in fromconfignames]
743
789
744 if fromconfig:
790 if fromconfig:
745 ui.write(_('repository lacks features recommended by '
791 ui.write(_('repository lacks features recommended by '
746 'current config options:\n\n'))
792 'current config options:\n\n'))
747 for i in fromconfig:
793 for i in fromconfig:
748 ui.write('%s\n %s\n\n' % (i.name, i.description))
794 ui.write('%s\n %s\n\n' % (i.name, i.description))
749
795
750 if onlydefault:
796 if onlydefault:
751 ui.write(_('repository lacks features used by the default '
797 ui.write(_('repository lacks features used by the default '
752 'config options:\n\n'))
798 'config options:\n\n'))
753 for i in onlydefault:
799 for i in onlydefault:
754 ui.write('%s\n %s\n\n' % (i.name, i.description))
800 ui.write('%s\n %s\n\n' % (i.name, i.description))
755
801
756 ui.write('\n')
802 ui.write('\n')
757 else:
803 else:
758 ui.write(_('(no feature deficiencies found in existing '
804 ui.write(_('(no feature deficiencies found in existing '
759 'repository)\n'))
805 'repository)\n'))
760
806
761 ui.write(_('performing an upgrade with "--run" will make the following '
807 ui.write(_('performing an upgrade with "--run" will make the following '
762 'changes:\n\n'))
808 'changes:\n\n'))
763
809
764 printrequirements()
810 printrequirements()
765 printupgradeactions()
811 printupgradeactions()
766
812
767 unusedoptimize = [i for i in improvements
813 unusedoptimize = [i for i in improvements
768 if i.name not in actions and i.type == optimisation]
814 if i.name not in actions and i.type == optimisation]
769 if unusedoptimize:
815 if unusedoptimize:
770 ui.write(_('additional optimizations are available by specifying '
816 ui.write(_('additional optimizations are available by specifying '
771 '"--optimize <name>":\n\n'))
817 '"--optimize <name>":\n\n'))
772 for i in unusedoptimize:
818 for i in unusedoptimize:
773 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
819 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
820 return
821
822 # Else we're in the run=true case.
823 ui.write(_('upgrade will perform the following actions:\n\n'))
824 printrequirements()
825 printupgradeactions()
826
827 ui.write(_('beginning upgrade...\n'))
828 with repo.wlock():
829 with repo.lock():
830 ui.write(_('repository locked and read-only\n'))
831 # Our strategy for upgrading the repository is to create a new,
832 # temporary repository, write data to it, then do a swap of the
833 # data. There are less heavyweight ways to do this, but it is easier
834 # to create a new repo object than to instantiate all the components
835 # (like the store) separately.
836 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
837 backuppath = None
838 try:
839 ui.write(_('creating temporary repository to stage migrated '
840 'data: %s\n') % tmppath)
841 dstrepo = localrepo.localrepository(repo.baseui,
842 path=tmppath,
843 create=True)
844
845 with dstrepo.wlock():
846 with dstrepo.lock():
847 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
848 actions)
849
850 finally:
851 ui.write(_('removing temporary repository %s\n') % tmppath)
852 repo.vfs.rmtree(tmppath, forcibly=True)
853
854 if backuppath:
855 ui.warn(_('copy of old repository backed up at %s\n') %
856 backuppath)
857 ui.warn(_('the old repository will not be deleted; remove '
858 'it to free up disk space once the upgraded '
859 'repository is verified\n'))
@@ -1,182 +1,255 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > share =
3 > share =
4 > EOF
4 > EOF
5
5
6 store and revlogv1 are required in source
6 store and revlogv1 are required in source
7
7
8 $ hg --config format.usestore=false init no-store
8 $ hg --config format.usestore=false init no-store
9 $ hg -R no-store debugupgraderepo
9 $ hg -R no-store debugupgraderepo
10 abort: cannot upgrade repository; requirement missing: store
10 abort: cannot upgrade repository; requirement missing: store
11 [255]
11 [255]
12
12
13 $ hg init no-revlogv1
13 $ hg init no-revlogv1
14 $ cat > no-revlogv1/.hg/requires << EOF
14 $ cat > no-revlogv1/.hg/requires << EOF
15 > dotencode
15 > dotencode
16 > fncache
16 > fncache
17 > generaldelta
17 > generaldelta
18 > store
18 > store
19 > EOF
19 > EOF
20
20
21 $ hg -R no-revlogv1 debugupgraderepo
21 $ hg -R no-revlogv1 debugupgraderepo
22 abort: cannot upgrade repository; requirement missing: revlogv1
22 abort: cannot upgrade repository; requirement missing: revlogv1
23 [255]
23 [255]
24
24
25 Cannot upgrade shared repositories
25 Cannot upgrade shared repositories
26
26
27 $ hg init share-parent
27 $ hg init share-parent
28 $ hg -q share share-parent share-child
28 $ hg -q share share-parent share-child
29
29
30 $ hg -R share-child debugupgraderepo
30 $ hg -R share-child debugupgraderepo
31 abort: cannot upgrade repository; unsupported source requirement: shared
31 abort: cannot upgrade repository; unsupported source requirement: shared
32 [255]
32 [255]
33
33
34 Do not yet support upgrading manifestv2 and treemanifest repos
34 Do not yet support upgrading manifestv2 and treemanifest repos
35
35
36 $ hg --config experimental.manifestv2=true init manifestv2
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
39 [255]
40
40
41 $ hg --config experimental.treemanifest=true init treemanifest
41 $ hg --config experimental.treemanifest=true init treemanifest
42 $ hg -R treemanifest debugupgraderepo
42 $ hg -R treemanifest debugupgraderepo
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 [255]
44 [255]
45
45
46 Cannot add manifestv2 or treemanifest requirement during upgrade
46 Cannot add manifestv2 or treemanifest requirement during upgrade
47
47
48 $ hg init disallowaddedreq
48 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 [255]
51 [255]
52
52
53 An upgrade of a repository created with recommended settings only suggests optimizations
53 An upgrade of a repository created with recommended settings only suggests optimizations
54
54
55 $ hg init empty
55 $ hg init empty
56 $ cd empty
56 $ cd empty
57 $ hg debugupgraderepo
57 $ hg debugupgraderepo
58 (no feature deficiencies found in existing repository)
58 (no feature deficiencies found in existing repository)
59 performing an upgrade with "--run" will make the following changes:
59 performing an upgrade with "--run" will make the following changes:
60
60
61 requirements
61 requirements
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63
63
64 additional optimizations are available by specifying "--optimize <name>":
64 additional optimizations are available by specifying "--optimize <name>":
65
65
66 redeltaparent
66 redeltaparent
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68
68
69 redeltamultibase
69 redeltamultibase
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71
71
72 redeltaall
72 redeltaall
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74
74
75
75
76 --optimize can be used to add optimizations
76 --optimize can be used to add optimizations
77
77
78 $ hg debugupgrade --optimize redeltaparent
78 $ hg debugupgrade --optimize redeltaparent
79 (no feature deficiencies found in existing repository)
79 (no feature deficiencies found in existing repository)
80 performing an upgrade with "--run" will make the following changes:
80 performing an upgrade with "--run" will make the following changes:
81
81
82 requirements
82 requirements
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84
84
85 redeltaparent
85 redeltaparent
86 deltas within internal storage will choose a new base revision if needed
86 deltas within internal storage will choose a new base revision if needed
87
87
88 additional optimizations are available by specifying "--optimize <name>":
88 additional optimizations are available by specifying "--optimize <name>":
89
89
90 redeltamultibase
90 redeltamultibase
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92
92
93 redeltaall
93 redeltaall
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95
95
96
96
97 Various sub-optimal detections work
97 Various sub-optimal detections work
98
98
99 $ cat > .hg/requires << EOF
99 $ cat > .hg/requires << EOF
100 > revlogv1
100 > revlogv1
101 > store
101 > store
102 > EOF
102 > EOF
103
103
104 $ hg debugupgraderepo
104 $ hg debugupgraderepo
105 repository lacks features recommended by current config options:
105 repository lacks features recommended by current config options:
106
106
107 fncache
107 fncache
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109
109
110 dotencode
110 dotencode
111 storage of filenames beginning with a period or space may not work correctly
111 storage of filenames beginning with a period or space may not work correctly
112
112
113 generaldelta
113 generaldelta
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115
115
116
116
117 performing an upgrade with "--run" will make the following changes:
117 performing an upgrade with "--run" will make the following changes:
118
118
119 requirements
119 requirements
120 preserved: revlogv1, store
120 preserved: revlogv1, store
121 added: dotencode, fncache, generaldelta
121 added: dotencode, fncache, generaldelta
122
122
123 fncache
123 fncache
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125
125
126 dotencode
126 dotencode
127 repository will be better able to store files beginning with a space or period
127 repository will be better able to store files beginning with a space or period
128
128
129 generaldelta
129 generaldelta
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131
131
132 additional optimizations are available by specifying "--optimize <name>":
132 additional optimizations are available by specifying "--optimize <name>":
133
133
134 redeltaparent
134 redeltaparent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
136
137 redeltamultibase
137 redeltamultibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
139
140 redeltaall
140 redeltaall
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
142
143
143
144 $ hg --config format.dotencode=false debugupgraderepo
144 $ hg --config format.dotencode=false debugupgraderepo
145 repository lacks features recommended by current config options:
145 repository lacks features recommended by current config options:
146
146
147 fncache
147 fncache
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149
149
150 generaldelta
150 generaldelta
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152
152
153 repository lacks features used by the default config options:
153 repository lacks features used by the default config options:
154
154
155 dotencode
155 dotencode
156 storage of filenames beginning with a period or space may not work correctly
156 storage of filenames beginning with a period or space may not work correctly
157
157
158
158
159 performing an upgrade with "--run" will make the following changes:
159 performing an upgrade with "--run" will make the following changes:
160
160
161 requirements
161 requirements
162 preserved: revlogv1, store
162 preserved: revlogv1, store
163 added: fncache, generaldelta
163 added: fncache, generaldelta
164
164
165 fncache
165 fncache
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167
167
168 generaldelta
168 generaldelta
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170
170
171 additional optimizations are available by specifying "--optimize <name>":
171 additional optimizations are available by specifying "--optimize <name>":
172
172
173 redeltaparent
173 redeltaparent
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175
175
176 redeltamultibase
176 redeltamultibase
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178
178
179 redeltaall
179 redeltaall
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181
181
182
182
183 $ cd ..
184
185 Upgrading a repository that is already modern essentially no-ops
186
187 $ hg init modern
188 $ hg -R modern debugupgraderepo --run
189 upgrade will perform the following actions:
190
191 requirements
192 preserved: dotencode, fncache, generaldelta, revlogv1, store
193
194 beginning upgrade...
195 repository locked and read-only
196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
197 marking source repository as being upgraded; clients will be unable to read from repository
198 starting in-place swap of repository data
199 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
200 finalizing requirements file and making repository readable again
201 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
202 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
203 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
204
205 Upgrading a repository to generaldelta works
206
207 $ hg --config format.usegeneraldelta=false init upgradegd
208 $ cd upgradegd
209 $ touch f0
210 $ hg -q commit -A -m initial
211 $ touch f1
212 $ hg -q commit -A -m 'add f1'
213 $ hg -q up -r 0
214 $ touch f2
215 $ hg -q commit -A -m 'add f2'
216
217 $ hg debugupgraderepo --run
218 upgrade will perform the following actions:
219
220 requirements
221 preserved: dotencode, fncache, revlogv1, store
222 added: generaldelta
223
224 generaldelta
225 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
226
227 beginning upgrade...
228 repository locked and read-only
229 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
230 marking source repository as being upgraded; clients will be unable to read from repository
231 starting in-place swap of repository data
232 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
233 finalizing requirements file and making repository readable again
234 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
235 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
236 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
237
238 Original requirements backed up
239
240 $ cat .hg/upgradebackup.*/requires
241 dotencode
242 fncache
243 revlogv1
244 store
245
246 generaldelta added to original requirements files
247
248 $ cat .hg/requires
249 dotencode
250 fncache
251 generaldelta
252 revlogv1
253 store
254
255 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now