##// END OF EJS Templates
repair: migrate revlogs during upgrade...
Gregory Szorc -
r30779:38aa1ca9 default
parent child Browse files
Show More
@@ -1,859 +1,1046 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import tempfile
13 import tempfile
14 import time
14
15
15 from .i18n import _
16 from .i18n import _
16 from .node import short
17 from .node import short
17 from . import (
18 from . import (
18 bundle2,
19 bundle2,
19 changegroup,
20 changegroup,
21 changelog,
20 error,
22 error,
21 exchange,
23 exchange,
24 manifest,
22 obsolete,
25 obsolete,
26 revlog,
23 scmutil,
27 scmutil,
24 util,
28 util,
25 )
29 )
26
30
27 def _bundle(repo, bases, heads, node, suffix, compress=True):
31 def _bundle(repo, bases, heads, node, suffix, compress=True):
28 """create a bundle with the specified revisions as a backup"""
32 """create a bundle with the specified revisions as a backup"""
29 cgversion = changegroup.safeversion(repo)
33 cgversion = changegroup.safeversion(repo)
30
34
31 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
35 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
32 version=cgversion)
36 version=cgversion)
33 backupdir = "strip-backup"
37 backupdir = "strip-backup"
34 vfs = repo.vfs
38 vfs = repo.vfs
35 if not vfs.isdir(backupdir):
39 if not vfs.isdir(backupdir):
36 vfs.mkdir(backupdir)
40 vfs.mkdir(backupdir)
37
41
38 # Include a hash of all the nodes in the filename for uniqueness
42 # Include a hash of all the nodes in the filename for uniqueness
39 allcommits = repo.set('%ln::%ln', bases, heads)
43 allcommits = repo.set('%ln::%ln', bases, heads)
40 allhashes = sorted(c.hex() for c in allcommits)
44 allhashes = sorted(c.hex() for c in allcommits)
41 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
45 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
42 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
46 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
43
47
44 comp = None
48 comp = None
45 if cgversion != '01':
49 if cgversion != '01':
46 bundletype = "HG20"
50 bundletype = "HG20"
47 if compress:
51 if compress:
48 comp = 'BZ'
52 comp = 'BZ'
49 elif compress:
53 elif compress:
50 bundletype = "HG10BZ"
54 bundletype = "HG10BZ"
51 else:
55 else:
52 bundletype = "HG10UN"
56 bundletype = "HG10UN"
53 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
57 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
54 compression=comp)
58 compression=comp)
55
59
56 def _collectfiles(repo, striprev):
60 def _collectfiles(repo, striprev):
57 """find out the filelogs affected by the strip"""
61 """find out the filelogs affected by the strip"""
58 files = set()
62 files = set()
59
63
60 for x in xrange(striprev, len(repo)):
64 for x in xrange(striprev, len(repo)):
61 files.update(repo[x].files())
65 files.update(repo[x].files())
62
66
63 return sorted(files)
67 return sorted(files)
64
68
65 def _collectbrokencsets(repo, files, striprev):
69 def _collectbrokencsets(repo, files, striprev):
66 """return the changesets which will be broken by the truncation"""
70 """return the changesets which will be broken by the truncation"""
67 s = set()
71 s = set()
68 def collectone(revlog):
72 def collectone(revlog):
69 _, brokenset = revlog.getstrippoint(striprev)
73 _, brokenset = revlog.getstrippoint(striprev)
70 s.update([revlog.linkrev(r) for r in brokenset])
74 s.update([revlog.linkrev(r) for r in brokenset])
71
75
72 collectone(repo.manifestlog._revlog)
76 collectone(repo.manifestlog._revlog)
73 for fname in files:
77 for fname in files:
74 collectone(repo.file(fname))
78 collectone(repo.file(fname))
75
79
76 return s
80 return s
77
81
78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
82 def strip(ui, repo, nodelist, backup=True, topic='backup'):
79 # This function operates within a transaction of its own, but does
83 # This function operates within a transaction of its own, but does
80 # not take any lock on the repo.
84 # not take any lock on the repo.
81 # Simple way to maintain backwards compatibility for this
85 # Simple way to maintain backwards compatibility for this
82 # argument.
86 # argument.
83 if backup in ['none', 'strip']:
87 if backup in ['none', 'strip']:
84 backup = False
88 backup = False
85
89
86 repo = repo.unfiltered()
90 repo = repo.unfiltered()
87 repo.destroying()
91 repo.destroying()
88
92
89 cl = repo.changelog
93 cl = repo.changelog
90 # TODO handle undo of merge sets
94 # TODO handle undo of merge sets
91 if isinstance(nodelist, str):
95 if isinstance(nodelist, str):
92 nodelist = [nodelist]
96 nodelist = [nodelist]
93 striplist = [cl.rev(node) for node in nodelist]
97 striplist = [cl.rev(node) for node in nodelist]
94 striprev = min(striplist)
98 striprev = min(striplist)
95
99
96 files = _collectfiles(repo, striprev)
100 files = _collectfiles(repo, striprev)
97 saverevs = _collectbrokencsets(repo, files, striprev)
101 saverevs = _collectbrokencsets(repo, files, striprev)
98
102
99 # Some revisions with rev > striprev may not be descendants of striprev.
103 # Some revisions with rev > striprev may not be descendants of striprev.
100 # We have to find these revisions and put them in a bundle, so that
104 # We have to find these revisions and put them in a bundle, so that
101 # we can restore them after the truncations.
105 # we can restore them after the truncations.
102 # To create the bundle we use repo.changegroupsubset which requires
106 # To create the bundle we use repo.changegroupsubset which requires
103 # the list of heads and bases of the set of interesting revisions.
107 # the list of heads and bases of the set of interesting revisions.
104 # (head = revision in the set that has no descendant in the set;
108 # (head = revision in the set that has no descendant in the set;
105 # base = revision in the set that has no ancestor in the set)
109 # base = revision in the set that has no ancestor in the set)
106 tostrip = set(striplist)
110 tostrip = set(striplist)
107 saveheads = set(saverevs)
111 saveheads = set(saverevs)
108 for r in cl.revs(start=striprev + 1):
112 for r in cl.revs(start=striprev + 1):
109 if any(p in tostrip for p in cl.parentrevs(r)):
113 if any(p in tostrip for p in cl.parentrevs(r)):
110 tostrip.add(r)
114 tostrip.add(r)
111
115
112 if r not in tostrip:
116 if r not in tostrip:
113 saverevs.add(r)
117 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
118 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
119 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
120 saveheads = [cl.node(r) for r in saveheads]
117
121
118 # compute base nodes
122 # compute base nodes
119 if saverevs:
123 if saverevs:
120 descendants = set(cl.descendants(saverevs))
124 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
125 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
126 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
127 stripbases = [cl.node(r) for r in tostrip]
124
128
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
129 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
130 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
131 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
132 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
133 newbmtarget = repo[newbmtarget.first()].node()
130 else:
134 else:
131 newbmtarget = '.'
135 newbmtarget = '.'
132
136
133 bm = repo._bookmarks
137 bm = repo._bookmarks
134 updatebm = []
138 updatebm = []
135 for m in bm:
139 for m in bm:
136 rev = repo[bm[m]].rev()
140 rev = repo[bm[m]].rev()
137 if rev in tostrip:
141 if rev in tostrip:
138 updatebm.append(m)
142 updatebm.append(m)
139
143
140 # create a changegroup for all the branches we need to keep
144 # create a changegroup for all the branches we need to keep
141 backupfile = None
145 backupfile = None
142 vfs = repo.vfs
146 vfs = repo.vfs
143 node = nodelist[-1]
147 node = nodelist[-1]
144 if backup:
148 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
149 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
150 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
151 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
152 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
153 vfs.join(backupfile))
150 tmpbundlefile = None
154 tmpbundlefile = None
151 if saveheads:
155 if saveheads:
152 # do not compress temporary bundle if we remove it from disk later
156 # do not compress temporary bundle if we remove it from disk later
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
157 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 compress=False)
158 compress=False)
155
159
156 mfst = repo.manifestlog._revlog
160 mfst = repo.manifestlog._revlog
157
161
158 curtr = repo.currenttransaction()
162 curtr = repo.currenttransaction()
159 if curtr is not None:
163 if curtr is not None:
160 del curtr # avoid carrying reference to transaction for nothing
164 del curtr # avoid carrying reference to transaction for nothing
161 msg = _('programming error: cannot strip from inside a transaction')
165 msg = _('programming error: cannot strip from inside a transaction')
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
166 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163
167
164 try:
168 try:
165 with repo.transaction("strip") as tr:
169 with repo.transaction("strip") as tr:
166 offset = len(tr.entries)
170 offset = len(tr.entries)
167
171
168 tr.startgroup()
172 tr.startgroup()
169 cl.strip(striprev, tr)
173 cl.strip(striprev, tr)
170 mfst.strip(striprev, tr)
174 mfst.strip(striprev, tr)
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 # otherwise
176 # otherwise
173 for unencoded, encoded, size in repo.store.datafiles():
177 for unencoded, encoded, size in repo.store.datafiles():
174 if (unencoded.startswith('meta/') and
178 if (unencoded.startswith('meta/') and
175 unencoded.endswith('00manifest.i')):
179 unencoded.endswith('00manifest.i')):
176 dir = unencoded[5:-12]
180 dir = unencoded[5:-12]
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
178 for fn in files:
182 for fn in files:
179 repo.file(fn).strip(striprev, tr)
183 repo.file(fn).strip(striprev, tr)
180 tr.endgroup()
184 tr.endgroup()
181
185
182 for i in xrange(offset, len(tr.entries)):
186 for i in xrange(offset, len(tr.entries)):
183 file, troffset, ignore = tr.entries[i]
187 file, troffset, ignore = tr.entries[i]
184 with repo.svfs(file, 'a', checkambig=True) as fp:
188 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
189 fp.truncate(troffset)
186 if troffset == 0:
190 if troffset == 0:
187 repo.store.markremoved(file)
191 repo.store.markremoved(file)
188
192
189 if tmpbundlefile:
193 if tmpbundlefile:
190 ui.note(_("adding branch\n"))
194 ui.note(_("adding branch\n"))
191 f = vfs.open(tmpbundlefile, "rb")
195 f = vfs.open(tmpbundlefile, "rb")
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
193 if not repo.ui.verbose:
197 if not repo.ui.verbose:
194 # silence internal shuffling chatter
198 # silence internal shuffling chatter
195 repo.ui.pushbuffer()
199 repo.ui.pushbuffer()
196 if isinstance(gen, bundle2.unbundle20):
200 if isinstance(gen, bundle2.unbundle20):
197 with repo.transaction('strip') as tr:
201 with repo.transaction('strip') as tr:
198 tr.hookargs = {'source': 'strip',
202 tr.hookargs = {'source': 'strip',
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
200 bundle2.applybundle(repo, gen, tr, source='strip',
204 bundle2.applybundle(repo, gen, tr, source='strip',
201 url='bundle:' + vfs.join(tmpbundlefile))
205 url='bundle:' + vfs.join(tmpbundlefile))
202 else:
206 else:
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
204 True)
208 True)
205 if not repo.ui.verbose:
209 if not repo.ui.verbose:
206 repo.ui.popbuffer()
210 repo.ui.popbuffer()
207 f.close()
211 f.close()
208 repo._phasecache.invalidate()
212 repo._phasecache.invalidate()
209
213
210 for m in updatebm:
214 for m in updatebm:
211 bm[m] = repo[newbmtarget].node()
215 bm[m] = repo[newbmtarget].node()
212 lock = tr = None
216 lock = tr = None
213 try:
217 try:
214 lock = repo.lock()
218 lock = repo.lock()
215 tr = repo.transaction('repair')
219 tr = repo.transaction('repair')
216 bm.recordchange(tr)
220 bm.recordchange(tr)
217 tr.close()
221 tr.close()
218 finally:
222 finally:
219 tr.release()
223 tr.release()
220 lock.release()
224 lock.release()
221
225
222 # remove undo files
226 # remove undo files
223 for undovfs, undofile in repo.undofiles():
227 for undovfs, undofile in repo.undofiles():
224 try:
228 try:
225 undovfs.unlink(undofile)
229 undovfs.unlink(undofile)
226 except OSError as e:
230 except OSError as e:
227 if e.errno != errno.ENOENT:
231 if e.errno != errno.ENOENT:
228 ui.warn(_('error removing %s: %s\n') %
232 ui.warn(_('error removing %s: %s\n') %
229 (undovfs.join(undofile), str(e)))
233 (undovfs.join(undofile), str(e)))
230
234
231 except: # re-raises
235 except: # re-raises
232 if backupfile:
236 if backupfile:
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 % vfs.join(backupfile))
238 % vfs.join(backupfile))
235 if tmpbundlefile:
239 if tmpbundlefile:
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 % vfs.join(tmpbundlefile))
241 % vfs.join(tmpbundlefile))
238 ui.warn(_("(fix the problem, then recover the changesets with "
242 ui.warn(_("(fix the problem, then recover the changesets with "
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 raise
244 raise
241 else:
245 else:
242 if tmpbundlefile:
246 if tmpbundlefile:
243 # Remove temporary bundle only if there were no exceptions
247 # Remove temporary bundle only if there were no exceptions
244 vfs.unlink(tmpbundlefile)
248 vfs.unlink(tmpbundlefile)
245
249
246 repo.destroyed()
250 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
251 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
252 # extensions can use it
249 return backupfile
253 return backupfile
250
254
251 def rebuildfncache(ui, repo):
255 def rebuildfncache(ui, repo):
252 """Rebuilds the fncache file from repo history.
256 """Rebuilds the fncache file from repo history.
253
257
254 Missing entries will be added. Extra entries will be removed.
258 Missing entries will be added. Extra entries will be removed.
255 """
259 """
256 repo = repo.unfiltered()
260 repo = repo.unfiltered()
257
261
258 if 'fncache' not in repo.requirements:
262 if 'fncache' not in repo.requirements:
259 ui.warn(_('(not rebuilding fncache because repository does not '
263 ui.warn(_('(not rebuilding fncache because repository does not '
260 'support fncache)\n'))
264 'support fncache)\n'))
261 return
265 return
262
266
263 with repo.lock():
267 with repo.lock():
264 fnc = repo.store.fncache
268 fnc = repo.store.fncache
265 # Trigger load of fncache.
269 # Trigger load of fncache.
266 if 'irrelevant' in fnc:
270 if 'irrelevant' in fnc:
267 pass
271 pass
268
272
269 oldentries = set(fnc.entries)
273 oldentries = set(fnc.entries)
270 newentries = set()
274 newentries = set()
271 seenfiles = set()
275 seenfiles = set()
272
276
273 repolen = len(repo)
277 repolen = len(repo)
274 for rev in repo:
278 for rev in repo:
275 ui.progress(_('rebuilding'), rev, total=repolen,
279 ui.progress(_('rebuilding'), rev, total=repolen,
276 unit=_('changesets'))
280 unit=_('changesets'))
277
281
278 ctx = repo[rev]
282 ctx = repo[rev]
279 for f in ctx.files():
283 for f in ctx.files():
280 # This is to minimize I/O.
284 # This is to minimize I/O.
281 if f in seenfiles:
285 if f in seenfiles:
282 continue
286 continue
283 seenfiles.add(f)
287 seenfiles.add(f)
284
288
285 i = 'data/%s.i' % f
289 i = 'data/%s.i' % f
286 d = 'data/%s.d' % f
290 d = 'data/%s.d' % f
287
291
288 if repo.store._exists(i):
292 if repo.store._exists(i):
289 newentries.add(i)
293 newentries.add(i)
290 if repo.store._exists(d):
294 if repo.store._exists(d):
291 newentries.add(d)
295 newentries.add(d)
292
296
293 ui.progress(_('rebuilding'), None)
297 ui.progress(_('rebuilding'), None)
294
298
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
299 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 for dir in util.dirs(seenfiles):
300 for dir in util.dirs(seenfiles):
297 i = 'meta/%s/00manifest.i' % dir
301 i = 'meta/%s/00manifest.i' % dir
298 d = 'meta/%s/00manifest.d' % dir
302 d = 'meta/%s/00manifest.d' % dir
299
303
300 if repo.store._exists(i):
304 if repo.store._exists(i):
301 newentries.add(i)
305 newentries.add(i)
302 if repo.store._exists(d):
306 if repo.store._exists(d):
303 newentries.add(d)
307 newentries.add(d)
304
308
305 addcount = len(newentries - oldentries)
309 addcount = len(newentries - oldentries)
306 removecount = len(oldentries - newentries)
310 removecount = len(oldentries - newentries)
307 for p in sorted(oldentries - newentries):
311 for p in sorted(oldentries - newentries):
308 ui.write(_('removing %s\n') % p)
312 ui.write(_('removing %s\n') % p)
309 for p in sorted(newentries - oldentries):
313 for p in sorted(newentries - oldentries):
310 ui.write(_('adding %s\n') % p)
314 ui.write(_('adding %s\n') % p)
311
315
312 if addcount or removecount:
316 if addcount or removecount:
313 ui.write(_('%d items added, %d removed from fncache\n') %
317 ui.write(_('%d items added, %d removed from fncache\n') %
314 (addcount, removecount))
318 (addcount, removecount))
315 fnc.entries = newentries
319 fnc.entries = newentries
316 fnc._dirty = True
320 fnc._dirty = True
317
321
318 with repo.transaction('fncache') as tr:
322 with repo.transaction('fncache') as tr:
319 fnc.write(tr)
323 fnc.write(tr)
320 else:
324 else:
321 ui.write(_('fncache already up to date\n'))
325 ui.write(_('fncache already up to date\n'))
322
326
323 def stripbmrevset(repo, mark):
327 def stripbmrevset(repo, mark):
324 """
328 """
325 The revset to strip when strip is called with -B mark
329 The revset to strip when strip is called with -B mark
326
330
327 Needs to live here so extensions can use it and wrap it even when strip is
331 Needs to live here so extensions can use it and wrap it even when strip is
328 not enabled or not present on a box.
332 not enabled or not present on a box.
329 """
333 """
330 return repo.revs("ancestors(bookmark(%s)) - "
334 return repo.revs("ancestors(bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
335 "ancestors(head() and not bookmark(%s)) - "
332 "ancestors(bookmark() and not bookmark(%s))",
336 "ancestors(bookmark() and not bookmark(%s))",
333 mark, mark, mark)
337 mark, mark, mark)
334
338
335 def deleteobsmarkers(obsstore, indices):
339 def deleteobsmarkers(obsstore, indices):
336 """Delete some obsmarkers from obsstore and return how many were deleted
340 """Delete some obsmarkers from obsstore and return how many were deleted
337
341
338 'indices' is a list of ints which are the indices
342 'indices' is a list of ints which are the indices
339 of the markers to be deleted.
343 of the markers to be deleted.
340
344
341 Every invocation of this function completely rewrites the obsstore file,
345 Every invocation of this function completely rewrites the obsstore file,
342 skipping the markers we want to be removed. The new temporary file is
346 skipping the markers we want to be removed. The new temporary file is
343 created, remaining markers are written there and on .close() this file
347 created, remaining markers are written there and on .close() this file
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
348 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 if not indices:
349 if not indices:
346 # we don't want to rewrite the obsstore with the same content
350 # we don't want to rewrite the obsstore with the same content
347 return
351 return
348
352
349 left = []
353 left = []
350 current = obsstore._all
354 current = obsstore._all
351 n = 0
355 n = 0
352 for i, m in enumerate(current):
356 for i, m in enumerate(current):
353 if i in indices:
357 if i in indices:
354 n += 1
358 n += 1
355 continue
359 continue
356 left.append(m)
360 left.append(m)
357
361
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
362 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
363 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 newobsstorefile.write(bytes)
364 newobsstorefile.write(bytes)
361 newobsstorefile.close()
365 newobsstorefile.close()
362 return n
366 return n
363
367
364 def upgraderequiredsourcerequirements(repo):
368 def upgraderequiredsourcerequirements(repo):
365 """Obtain requirements required to be present to upgrade a repo.
369 """Obtain requirements required to be present to upgrade a repo.
366
370
367 An upgrade will not be allowed if the repository doesn't have the
371 An upgrade will not be allowed if the repository doesn't have the
368 requirements returned by this function.
372 requirements returned by this function.
369 """
373 """
370 return set([
374 return set([
371 # Introduced in Mercurial 0.9.2.
375 # Introduced in Mercurial 0.9.2.
372 'revlogv1',
376 'revlogv1',
373 # Introduced in Mercurial 0.9.2.
377 # Introduced in Mercurial 0.9.2.
374 'store',
378 'store',
375 ])
379 ])
376
380
377 def upgradeblocksourcerequirements(repo):
381 def upgradeblocksourcerequirements(repo):
378 """Obtain requirements that will prevent an upgrade from occurring.
382 """Obtain requirements that will prevent an upgrade from occurring.
379
383
380 An upgrade cannot be performed if the source repository contains a
384 An upgrade cannot be performed if the source repository contains a
381 requirements in the returned set.
385 requirements in the returned set.
382 """
386 """
383 return set([
387 return set([
384 # The upgrade code does not yet support these experimental features.
388 # The upgrade code does not yet support these experimental features.
385 # This is an artificial limitation.
389 # This is an artificial limitation.
386 'manifestv2',
390 'manifestv2',
387 'treemanifest',
391 'treemanifest',
388 # This was a precursor to generaldelta and was never enabled by default.
392 # This was a precursor to generaldelta and was never enabled by default.
389 # It should (hopefully) not exist in the wild.
393 # It should (hopefully) not exist in the wild.
390 'parentdelta',
394 'parentdelta',
391 # Upgrade should operate on the actual store, not the shared link.
395 # Upgrade should operate on the actual store, not the shared link.
392 'shared',
396 'shared',
393 ])
397 ])
394
398
395 def upgradesupportremovedrequirements(repo):
399 def upgradesupportremovedrequirements(repo):
396 """Obtain requirements that can be removed during an upgrade.
400 """Obtain requirements that can be removed during an upgrade.
397
401
398 If an upgrade were to create a repository that dropped a requirement,
402 If an upgrade were to create a repository that dropped a requirement,
399 the dropped requirement must appear in the returned set for the upgrade
403 the dropped requirement must appear in the returned set for the upgrade
400 to be allowed.
404 to be allowed.
401 """
405 """
402 return set()
406 return set()
403
407
404 def upgradesupporteddestrequirements(repo):
408 def upgradesupporteddestrequirements(repo):
405 """Obtain requirements that upgrade supports in the destination.
409 """Obtain requirements that upgrade supports in the destination.
406
410
407 If the result of the upgrade would create requirements not in this set,
411 If the result of the upgrade would create requirements not in this set,
408 the upgrade is disallowed.
412 the upgrade is disallowed.
409
413
410 Extensions should monkeypatch this to add their custom requirements.
414 Extensions should monkeypatch this to add their custom requirements.
411 """
415 """
412 return set([
416 return set([
413 'dotencode',
417 'dotencode',
414 'fncache',
418 'fncache',
415 'generaldelta',
419 'generaldelta',
416 'revlogv1',
420 'revlogv1',
417 'store',
421 'store',
418 ])
422 ])
419
423
420 def upgradeallowednewrequirements(repo):
424 def upgradeallowednewrequirements(repo):
421 """Obtain requirements that can be added to a repository during upgrade.
425 """Obtain requirements that can be added to a repository during upgrade.
422
426
423 This is used to disallow proposed requirements from being added when
427 This is used to disallow proposed requirements from being added when
424 they weren't present before.
428 they weren't present before.
425
429
426 We use a list of allowed requirement additions instead of a list of known
430 We use a list of allowed requirement additions instead of a list of known
427 bad additions because the whitelist approach is safer and will prevent
431 bad additions because the whitelist approach is safer and will prevent
428 future, unknown requirements from accidentally being added.
432 future, unknown requirements from accidentally being added.
429 """
433 """
430 return set([
434 return set([
431 'dotencode',
435 'dotencode',
432 'fncache',
436 'fncache',
433 'generaldelta',
437 'generaldelta',
434 ])
438 ])
435
439
436 deficiency = 'deficiency'
440 deficiency = 'deficiency'
437 optimisation = 'optimization'
441 optimisation = 'optimization'
438
442
439 class upgradeimprovement(object):
443 class upgradeimprovement(object):
440 """Represents an improvement that can be made as part of an upgrade.
444 """Represents an improvement that can be made as part of an upgrade.
441
445
442 The following attributes are defined on each instance:
446 The following attributes are defined on each instance:
443
447
444 name
448 name
445 Machine-readable string uniquely identifying this improvement. It
449 Machine-readable string uniquely identifying this improvement. It
446 will be mapped to an action later in the upgrade process.
450 will be mapped to an action later in the upgrade process.
447
451
448 type
452 type
449 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
453 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
450 problem. An optimization is an action (sometimes optional) that
454 problem. An optimization is an action (sometimes optional) that
451 can be taken to further improve the state of the repository.
455 can be taken to further improve the state of the repository.
452
456
453 description
457 description
454 Message intended for humans explaining the improvement in more detail,
458 Message intended for humans explaining the improvement in more detail,
455 including the implications of it. For ``deficiency`` types, should be
459 including the implications of it. For ``deficiency`` types, should be
456 worded in the present tense. For ``optimisation`` types, should be
460 worded in the present tense. For ``optimisation`` types, should be
457 worded in the future tense.
461 worded in the future tense.
458
462
459 upgrademessage
463 upgrademessage
460 Message intended for humans explaining what an upgrade addressing this
464 Message intended for humans explaining what an upgrade addressing this
461 issue will do. Should be worded in the future tense.
465 issue will do. Should be worded in the future tense.
462
466
463 fromdefault (``deficiency`` types only)
467 fromdefault (``deficiency`` types only)
464 Boolean indicating whether the current (deficient) state deviates
468 Boolean indicating whether the current (deficient) state deviates
465 from Mercurial's default configuration.
469 from Mercurial's default configuration.
466
470
467 fromconfig (``deficiency`` types only)
471 fromconfig (``deficiency`` types only)
468 Boolean indicating whether the current (deficient) state deviates
472 Boolean indicating whether the current (deficient) state deviates
469 from the current Mercurial configuration.
473 from the current Mercurial configuration.
470 """
474 """
471 def __init__(self, name, type, description, upgrademessage, **kwargs):
475 def __init__(self, name, type, description, upgrademessage, **kwargs):
472 self.name = name
476 self.name = name
473 self.type = type
477 self.type = type
474 self.description = description
478 self.description = description
475 self.upgrademessage = upgrademessage
479 self.upgrademessage = upgrademessage
476
480
477 for k, v in kwargs.items():
481 for k, v in kwargs.items():
478 setattr(self, k, v)
482 setattr(self, k, v)
479
483
480 def upgradefindimprovements(repo):
484 def upgradefindimprovements(repo):
481 """Determine improvements that can be made to the repo during upgrade.
485 """Determine improvements that can be made to the repo during upgrade.
482
486
483 Returns a list of ``upgradeimprovement`` describing repository deficiencies
487 Returns a list of ``upgradeimprovement`` describing repository deficiencies
484 and optimizations.
488 and optimizations.
485 """
489 """
486 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
490 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
487 from . import localrepo
491 from . import localrepo
488
492
489 newreporeqs = localrepo.newreporequirements(repo)
493 newreporeqs = localrepo.newreporequirements(repo)
490
494
491 improvements = []
495 improvements = []
492
496
493 # We could detect lack of revlogv1 and store here, but they were added
497 # We could detect lack of revlogv1 and store here, but they were added
494 # in 0.9.2 and we don't support upgrading repos without these
498 # in 0.9.2 and we don't support upgrading repos without these
495 # requirements, so let's not bother.
499 # requirements, so let's not bother.
496
500
497 if 'fncache' not in repo.requirements:
501 if 'fncache' not in repo.requirements:
498 improvements.append(upgradeimprovement(
502 improvements.append(upgradeimprovement(
499 name='fncache',
503 name='fncache',
500 type=deficiency,
504 type=deficiency,
501 description=_('long and reserved filenames may not work correctly; '
505 description=_('long and reserved filenames may not work correctly; '
502 'repository performance is sub-optimal'),
506 'repository performance is sub-optimal'),
503 upgrademessage=_('repository will be more resilient to storing '
507 upgrademessage=_('repository will be more resilient to storing '
504 'certain paths and performance of certain '
508 'certain paths and performance of certain '
505 'operations should be improved'),
509 'operations should be improved'),
506 fromdefault=True,
510 fromdefault=True,
507 fromconfig='fncache' in newreporeqs))
511 fromconfig='fncache' in newreporeqs))
508
512
509 if 'dotencode' not in repo.requirements:
513 if 'dotencode' not in repo.requirements:
510 improvements.append(upgradeimprovement(
514 improvements.append(upgradeimprovement(
511 name='dotencode',
515 name='dotencode',
512 type=deficiency,
516 type=deficiency,
513 description=_('storage of filenames beginning with a period or '
517 description=_('storage of filenames beginning with a period or '
514 'space may not work correctly'),
518 'space may not work correctly'),
515 upgrademessage=_('repository will be better able to store files '
519 upgrademessage=_('repository will be better able to store files '
516 'beginning with a space or period'),
520 'beginning with a space or period'),
517 fromdefault=True,
521 fromdefault=True,
518 fromconfig='dotencode' in newreporeqs))
522 fromconfig='dotencode' in newreporeqs))
519
523
520 if 'generaldelta' not in repo.requirements:
524 if 'generaldelta' not in repo.requirements:
521 improvements.append(upgradeimprovement(
525 improvements.append(upgradeimprovement(
522 name='generaldelta',
526 name='generaldelta',
523 type=deficiency,
527 type=deficiency,
524 description=_('deltas within internal storage are unable to '
528 description=_('deltas within internal storage are unable to '
525 'choose optimal revisions; repository is larger and '
529 'choose optimal revisions; repository is larger and '
526 'slower than it could be; interaction with other '
530 'slower than it could be; interaction with other '
527 'repositories may require extra network and CPU '
531 'repositories may require extra network and CPU '
528 'resources, making "hg push" and "hg pull" slower'),
532 'resources, making "hg push" and "hg pull" slower'),
529 upgrademessage=_('repository storage will be able to create '
533 upgrademessage=_('repository storage will be able to create '
530 'optimal deltas; new repository data will be '
534 'optimal deltas; new repository data will be '
531 'smaller and read times should decrease; '
535 'smaller and read times should decrease; '
532 'interacting with other repositories using this '
536 'interacting with other repositories using this '
533 'storage model should require less network and '
537 'storage model should require less network and '
534 'CPU resources, making "hg push" and "hg pull" '
538 'CPU resources, making "hg push" and "hg pull" '
535 'faster'),
539 'faster'),
536 fromdefault=True,
540 fromdefault=True,
537 fromconfig='generaldelta' in newreporeqs))
541 fromconfig='generaldelta' in newreporeqs))
538
542
539 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
543 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
540 # changelogs with deltas.
544 # changelogs with deltas.
541 cl = repo.changelog
545 cl = repo.changelog
542 for rev in cl:
546 for rev in cl:
543 chainbase = cl.chainbase(rev)
547 chainbase = cl.chainbase(rev)
544 if chainbase != rev:
548 if chainbase != rev:
545 improvements.append(upgradeimprovement(
549 improvements.append(upgradeimprovement(
546 name='removecldeltachain',
550 name='removecldeltachain',
547 type=deficiency,
551 type=deficiency,
548 description=_('changelog storage is using deltas instead of '
552 description=_('changelog storage is using deltas instead of '
549 'raw entries; changelog reading and any '
553 'raw entries; changelog reading and any '
550 'operation relying on changelog data are slower '
554 'operation relying on changelog data are slower '
551 'than they could be'),
555 'than they could be'),
552 upgrademessage=_('changelog storage will be reformated to '
556 upgrademessage=_('changelog storage will be reformated to '
553 'store raw entries; changelog reading will be '
557 'store raw entries; changelog reading will be '
554 'faster; changelog size may be reduced'),
558 'faster; changelog size may be reduced'),
555 fromdefault=True,
559 fromdefault=True,
556 fromconfig=True))
560 fromconfig=True))
557 break
561 break
558
562
559 # Now for the optimizations.
563 # Now for the optimizations.
560
564
561 # These are unconditionally added. There is logic later that figures out
565 # These are unconditionally added. There is logic later that figures out
562 # which ones to apply.
566 # which ones to apply.
563
567
564 improvements.append(upgradeimprovement(
568 improvements.append(upgradeimprovement(
565 name='redeltaparent',
569 name='redeltaparent',
566 type=optimisation,
570 type=optimisation,
567 description=_('deltas within internal storage will be recalculated to '
571 description=_('deltas within internal storage will be recalculated to '
568 'choose an optimal base revision where this was not '
572 'choose an optimal base revision where this was not '
569 'already done; the size of the repository may shrink and '
573 'already done; the size of the repository may shrink and '
570 'various operations may become faster; the first time '
574 'various operations may become faster; the first time '
571 'this optimization is performed could slow down upgrade '
575 'this optimization is performed could slow down upgrade '
572 'execution considerably; subsequent invocations should '
576 'execution considerably; subsequent invocations should '
573 'not run noticeably slower'),
577 'not run noticeably slower'),
574 upgrademessage=_('deltas within internal storage will choose a new '
578 upgrademessage=_('deltas within internal storage will choose a new '
575 'base revision if needed')))
579 'base revision if needed')))
576
580
577 improvements.append(upgradeimprovement(
581 improvements.append(upgradeimprovement(
578 name='redeltamultibase',
582 name='redeltamultibase',
579 type=optimisation,
583 type=optimisation,
580 description=_('deltas within internal storage will be recalculated '
584 description=_('deltas within internal storage will be recalculated '
581 'against multiple base revision and the smallest '
585 'against multiple base revision and the smallest '
582 'difference will be used; the size of the repository may '
586 'difference will be used; the size of the repository may '
583 'shrink significantly when there are many merges; this '
587 'shrink significantly when there are many merges; this '
584 'optimization will slow down execution in proportion to '
588 'optimization will slow down execution in proportion to '
585 'the number of merges in the repository and the amount '
589 'the number of merges in the repository and the amount '
586 'of files in the repository; this slow down should not '
590 'of files in the repository; this slow down should not '
587 'be significant unless there are tens of thousands of '
591 'be significant unless there are tens of thousands of '
588 'files and thousands of merges'),
592 'files and thousands of merges'),
589 upgrademessage=_('deltas within internal storage will choose an '
593 upgrademessage=_('deltas within internal storage will choose an '
590 'optimal delta by computing deltas against multiple '
594 'optimal delta by computing deltas against multiple '
591 'parents; may slow down execution time '
595 'parents; may slow down execution time '
592 'significantly')))
596 'significantly')))
593
597
594 improvements.append(upgradeimprovement(
598 improvements.append(upgradeimprovement(
595 name='redeltaall',
599 name='redeltaall',
596 type=optimisation,
600 type=optimisation,
597 description=_('deltas within internal storage will always be '
601 description=_('deltas within internal storage will always be '
598 'recalculated without reusing prior deltas; this will '
602 'recalculated without reusing prior deltas; this will '
599 'likely make execution run several times slower; this '
603 'likely make execution run several times slower; this '
600 'optimization is typically not needed'),
604 'optimization is typically not needed'),
601 upgrademessage=_('deltas within internal storage will be fully '
605 upgrademessage=_('deltas within internal storage will be fully '
602 'recomputed; this will likely drastically slow down '
606 'recomputed; this will likely drastically slow down '
603 'execution time')))
607 'execution time')))
604
608
605 return improvements
609 return improvements
606
610
607 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
611 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
608 optimize):
612 optimize):
609 """Determine upgrade actions that will be performed.
613 """Determine upgrade actions that will be performed.
610
614
611 Given a list of improvements as returned by ``upgradefindimprovements``,
615 Given a list of improvements as returned by ``upgradefindimprovements``,
612 determine the list of upgrade actions that will be performed.
616 determine the list of upgrade actions that will be performed.
613
617
614 The role of this function is to filter improvements if needed, apply
618 The role of this function is to filter improvements if needed, apply
615 recommended optimizations from the improvements list that make sense,
619 recommended optimizations from the improvements list that make sense,
616 etc.
620 etc.
617
621
618 Returns a list of action names.
622 Returns a list of action names.
619 """
623 """
620 newactions = []
624 newactions = []
621
625
622 knownreqs = upgradesupporteddestrequirements(repo)
626 knownreqs = upgradesupporteddestrequirements(repo)
623
627
624 for i in improvements:
628 for i in improvements:
625 name = i.name
629 name = i.name
626
630
627 # If the action is a requirement that doesn't show up in the
631 # If the action is a requirement that doesn't show up in the
628 # destination requirements, prune the action.
632 # destination requirements, prune the action.
629 if name in knownreqs and name not in destreqs:
633 if name in knownreqs and name not in destreqs:
630 continue
634 continue
631
635
632 if i.type == deficiency:
636 if i.type == deficiency:
633 newactions.append(name)
637 newactions.append(name)
634
638
635 newactions.extend(o for o in sorted(optimize) if o not in newactions)
639 newactions.extend(o for o in sorted(optimize) if o not in newactions)
636
640
637 # FUTURE consider adding some optimizations here for certain transitions.
641 # FUTURE consider adding some optimizations here for certain transitions.
638 # e.g. adding generaldelta could schedule parent redeltas.
642 # e.g. adding generaldelta could schedule parent redeltas.
639
643
640 return newactions
644 return newactions
641
645
646 def _revlogfrompath(repo, path):
647 """Obtain a revlog from a repo path.
648
649 An instance of the appropriate class is returned.
650 """
651 if path == '00changelog.i':
652 return changelog.changelog(repo.svfs)
653 elif path.endswith('00manifest.i'):
654 mandir = path[:-len('00manifest.i')]
655 return manifest.manifestrevlog(repo.svfs, dir=mandir)
656 else:
657 # Filelogs don't do anything special with settings. So we can use a
658 # vanilla revlog.
659 return revlog.revlog(repo.svfs, path)
660
661 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
662 """Copy revlogs between 2 repos."""
663 revcount = 0
664 srcsize = 0
665 srcrawsize = 0
666 dstsize = 0
667 fcount = 0
668 frevcount = 0
669 fsrcsize = 0
670 frawsize = 0
671 fdstsize = 0
672 mcount = 0
673 mrevcount = 0
674 msrcsize = 0
675 mrawsize = 0
676 mdstsize = 0
677 crevcount = 0
678 csrcsize = 0
679 crawsize = 0
680 cdstsize = 0
681
682 # Perform a pass to collect metadata. This validates we can open all
683 # source files and allows a unified progress bar to be displayed.
684 for unencoded, encoded, size in srcrepo.store.walk():
685 if unencoded.endswith('.d'):
686 continue
687
688 rl = _revlogfrompath(srcrepo, unencoded)
689 revcount += len(rl)
690
691 datasize = 0
692 rawsize = 0
693 idx = rl.index
694 for rev in rl:
695 e = idx[rev]
696 datasize += e[1]
697 rawsize += e[2]
698
699 srcsize += datasize
700 srcrawsize += rawsize
701
702 # This is for the separate progress bars.
703 if isinstance(rl, changelog.changelog):
704 crevcount += len(rl)
705 csrcsize += datasize
706 crawsize += rawsize
707 elif isinstance(rl, manifest.manifestrevlog):
708 mcount += 1
709 mrevcount += len(rl)
710 msrcsize += datasize
711 mrawsize += rawsize
712 elif isinstance(rl, revlog.revlog):
713 fcount += 1
714 frevcount += len(rl)
715 fsrcsize += datasize
716 frawsize += rawsize
717
718 if not revcount:
719 return
720
721 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
722 '%d in changelog)\n') %
723 (revcount, frevcount, mrevcount, crevcount))
724 ui.write(_('migrating %s in store; %s tracked data\n') % (
725 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
726
727 # Used to keep track of progress.
728 progress = []
729 def oncopiedrevision(rl, rev, node):
730 progress[1] += 1
731 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
732
733 # Do the actual copying.
734 # FUTURE this operation can be farmed off to worker processes.
735 seen = set()
736 for unencoded, encoded, size in srcrepo.store.walk():
737 if unencoded.endswith('.d'):
738 continue
739
740 oldrl = _revlogfrompath(srcrepo, unencoded)
741 newrl = _revlogfrompath(dstrepo, unencoded)
742
743 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
744 ui.write(_('finished migrating %d manifest revisions across %d '
745 'manifests; change in size: %s\n') %
746 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
747
748 ui.write(_('migrating changelog containing %d revisions '
749 '(%s in store; %s tracked data)\n') %
750 (crevcount, util.bytecount(csrcsize),
751 util.bytecount(crawsize)))
752 seen.add('c')
753 progress[:] = [_('changelog revisions'), 0, crevcount]
754 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
755 ui.write(_('finished migrating %d filelog revisions across %d '
756 'filelogs; change in size: %s\n') %
757 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
758
759 ui.write(_('migrating %d manifests containing %d revisions '
760 '(%s in store; %s tracked data)\n') %
761 (mcount, mrevcount, util.bytecount(msrcsize),
762 util.bytecount(mrawsize)))
763 seen.add('m')
764 progress[:] = [_('manifest revisions'), 0, mrevcount]
765 elif 'f' not in seen:
766 ui.write(_('migrating %d filelogs containing %d revisions '
767 '(%s in store; %s tracked data)\n') %
768 (fcount, frevcount, util.bytecount(fsrcsize),
769 util.bytecount(frawsize)))
770 seen.add('f')
771 progress[:] = [_('file revisions'), 0, frevcount]
772
773 ui.progress(progress[0], progress[1], total=progress[2])
774
775 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
776 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
777 deltareuse=deltareuse,
778 aggressivemergedeltas=aggressivemergedeltas)
779
780 datasize = 0
781 idx = newrl.index
782 for rev in newrl:
783 datasize += idx[rev][1]
784
785 dstsize += datasize
786
787 if isinstance(newrl, changelog.changelog):
788 cdstsize += datasize
789 elif isinstance(newrl, manifest.manifestrevlog):
790 mdstsize += datasize
791 else:
792 fdstsize += datasize
793
794 ui.progress(progress[0], None)
795
796 ui.write(_('finished migrating %d changelog revisions; change in size: '
797 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
798
799 ui.write(_('finished migrating %d total revisions; total change in store '
800 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
801
642 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
802 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
643 """Do the low-level work of upgrading a repository.
803 """Do the low-level work of upgrading a repository.
644
804
645 The upgrade is effectively performed as a copy between a source
805 The upgrade is effectively performed as a copy between a source
646 repository and a temporary destination repository.
806 repository and a temporary destination repository.
647
807
648 The source repository is unmodified for as long as possible so the
808 The source repository is unmodified for as long as possible so the
649 upgrade can abort at any time without causing loss of service for
809 upgrade can abort at any time without causing loss of service for
650 readers and without corrupting the source repository.
810 readers and without corrupting the source repository.
651 """
811 """
652 assert srcrepo.currentwlock()
812 assert srcrepo.currentwlock()
653 assert dstrepo.currentwlock()
813 assert dstrepo.currentwlock()
654
814
655 # TODO copy store
815 ui.write(_('(it is safe to interrupt this process any time before '
816 'data migration completes)\n'))
817
818 if 'redeltaall' in actions:
819 deltareuse = revlog.revlog.DELTAREUSENEVER
820 elif 'redeltaparent' in actions:
821 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
822 elif 'redeltamultibase' in actions:
823 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
824 else:
825 deltareuse = revlog.revlog.DELTAREUSEALWAYS
826
827 with dstrepo.transaction('upgrade') as tr:
828 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
829 'redeltamultibase' in actions)
830
831 # TODO copy non-revlog store files
832
833 ui.write(_('data fully migrated to temporary repository\n'))
656
834
657 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
835 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
658 backupvfs = scmutil.vfs(backuppath)
836 backupvfs = scmutil.vfs(backuppath)
659
837
660 # Make a backup of requires file first, as it is the first to be modified.
838 # Make a backup of requires file first, as it is the first to be modified.
661 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
839 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
662
840
663 # We install an arbitrary requirement that clients must not support
841 # We install an arbitrary requirement that clients must not support
664 # as a mechanism to lock out new clients during the data swap. This is
842 # as a mechanism to lock out new clients during the data swap. This is
665 # better than allowing a client to continue while the repository is in
843 # better than allowing a client to continue while the repository is in
666 # an inconsistent state.
844 # an inconsistent state.
667 ui.write(_('marking source repository as being upgraded; clients will be '
845 ui.write(_('marking source repository as being upgraded; clients will be '
668 'unable to read from repository\n'))
846 'unable to read from repository\n'))
669 scmutil.writerequires(srcrepo.vfs,
847 scmutil.writerequires(srcrepo.vfs,
670 srcrepo.requirements | set(['upgradeinprogress']))
848 srcrepo.requirements | set(['upgradeinprogress']))
671
849
672 ui.write(_('starting in-place swap of repository data\n'))
850 ui.write(_('starting in-place swap of repository data\n'))
673 ui.write(_('replaced files will be backed up at %s\n') %
851 ui.write(_('replaced files will be backed up at %s\n') %
674 backuppath)
852 backuppath)
675
853
676 # TODO do the store swap here.
854 # Now swap in the new store directory. Doing it as a rename should make
855 # the operation nearly instantaneous and atomic (at least in well-behaved
856 # environments).
857 ui.write(_('replacing store...\n'))
858 tstart = time.time()
859 util.rename(srcrepo.spath, backupvfs.join('store'))
860 util.rename(dstrepo.spath, srcrepo.spath)
861 elapsed = time.time() - tstart
862 ui.write(_('store replacement complete; repository was inconsistent for '
863 '%0.1fs\n') % elapsed)
677
864
678 # We first write the requirements file. Any new requirements will lock
865 # We first write the requirements file. Any new requirements will lock
679 # out legacy clients.
866 # out legacy clients.
680 ui.write(_('finalizing requirements file and making repository readable '
867 ui.write(_('finalizing requirements file and making repository readable '
681 'again\n'))
868 'again\n'))
682 scmutil.writerequires(srcrepo.vfs, requirements)
869 scmutil.writerequires(srcrepo.vfs, requirements)
683
870
684 return backuppath
871 return backuppath
685
872
686 def upgraderepo(ui, repo, run=False, optimize=None):
873 def upgraderepo(ui, repo, run=False, optimize=None):
687 """Upgrade a repository in place."""
874 """Upgrade a repository in place."""
688 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
875 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
689 from . import localrepo
876 from . import localrepo
690
877
691 optimize = set(optimize or [])
878 optimize = set(optimize or [])
692 repo = repo.unfiltered()
879 repo = repo.unfiltered()
693
880
694 # Ensure the repository can be upgraded.
881 # Ensure the repository can be upgraded.
695 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
882 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
696 if missingreqs:
883 if missingreqs:
697 raise error.Abort(_('cannot upgrade repository; requirement '
884 raise error.Abort(_('cannot upgrade repository; requirement '
698 'missing: %s') % _(', ').join(sorted(missingreqs)))
885 'missing: %s') % _(', ').join(sorted(missingreqs)))
699
886
700 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
887 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
701 if blockedreqs:
888 if blockedreqs:
702 raise error.Abort(_('cannot upgrade repository; unsupported source '
889 raise error.Abort(_('cannot upgrade repository; unsupported source '
703 'requirement: %s') %
890 'requirement: %s') %
704 _(', ').join(sorted(blockedreqs)))
891 _(', ').join(sorted(blockedreqs)))
705
892
706 # FUTURE there is potentially a need to control the wanted requirements via
893 # FUTURE there is potentially a need to control the wanted requirements via
707 # command arguments or via an extension hook point.
894 # command arguments or via an extension hook point.
708 newreqs = localrepo.newreporequirements(repo)
895 newreqs = localrepo.newreporequirements(repo)
709
896
710 noremovereqs = (repo.requirements - newreqs -
897 noremovereqs = (repo.requirements - newreqs -
711 upgradesupportremovedrequirements(repo))
898 upgradesupportremovedrequirements(repo))
712 if noremovereqs:
899 if noremovereqs:
713 raise error.Abort(_('cannot upgrade repository; requirement would be '
900 raise error.Abort(_('cannot upgrade repository; requirement would be '
714 'removed: %s') % _(', ').join(sorted(noremovereqs)))
901 'removed: %s') % _(', ').join(sorted(noremovereqs)))
715
902
716 noaddreqs = (newreqs - repo.requirements -
903 noaddreqs = (newreqs - repo.requirements -
717 upgradeallowednewrequirements(repo))
904 upgradeallowednewrequirements(repo))
718 if noaddreqs:
905 if noaddreqs:
719 raise error.Abort(_('cannot upgrade repository; do not support adding '
906 raise error.Abort(_('cannot upgrade repository; do not support adding '
720 'requirement: %s') %
907 'requirement: %s') %
721 _(', ').join(sorted(noaddreqs)))
908 _(', ').join(sorted(noaddreqs)))
722
909
723 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
910 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
724 if unsupportedreqs:
911 if unsupportedreqs:
725 raise error.Abort(_('cannot upgrade repository; do not support '
912 raise error.Abort(_('cannot upgrade repository; do not support '
726 'destination requirement: %s') %
913 'destination requirement: %s') %
727 _(', ').join(sorted(unsupportedreqs)))
914 _(', ').join(sorted(unsupportedreqs)))
728
915
729 # Find and validate all improvements that can be made.
916 # Find and validate all improvements that can be made.
730 improvements = upgradefindimprovements(repo)
917 improvements = upgradefindimprovements(repo)
731 for i in improvements:
918 for i in improvements:
732 if i.type not in (deficiency, optimisation):
919 if i.type not in (deficiency, optimisation):
733 raise error.Abort(_('unexpected improvement type %s for %s') % (
920 raise error.Abort(_('unexpected improvement type %s for %s') % (
734 i.type, i.name))
921 i.type, i.name))
735
922
736 # Validate arguments.
923 # Validate arguments.
737 unknownoptimize = optimize - set(i.name for i in improvements
924 unknownoptimize = optimize - set(i.name for i in improvements
738 if i.type == optimisation)
925 if i.type == optimisation)
739 if unknownoptimize:
926 if unknownoptimize:
740 raise error.Abort(_('unknown optimization action requested: %s') %
927 raise error.Abort(_('unknown optimization action requested: %s') %
741 ', '.join(sorted(unknownoptimize)),
928 ', '.join(sorted(unknownoptimize)),
742 hint=_('run without arguments to see valid '
929 hint=_('run without arguments to see valid '
743 'optimizations'))
930 'optimizations'))
744
931
745 actions = upgradedetermineactions(repo, improvements, repo.requirements,
932 actions = upgradedetermineactions(repo, improvements, repo.requirements,
746 newreqs, optimize)
933 newreqs, optimize)
747
934
748 def printrequirements():
935 def printrequirements():
749 ui.write(_('requirements\n'))
936 ui.write(_('requirements\n'))
750 ui.write(_(' preserved: %s\n') %
937 ui.write(_(' preserved: %s\n') %
751 _(', ').join(sorted(newreqs & repo.requirements)))
938 _(', ').join(sorted(newreqs & repo.requirements)))
752
939
753 if repo.requirements - newreqs:
940 if repo.requirements - newreqs:
754 ui.write(_(' removed: %s\n') %
941 ui.write(_(' removed: %s\n') %
755 _(', ').join(sorted(repo.requirements - newreqs)))
942 _(', ').join(sorted(repo.requirements - newreqs)))
756
943
757 if newreqs - repo.requirements:
944 if newreqs - repo.requirements:
758 ui.write(_(' added: %s\n') %
945 ui.write(_(' added: %s\n') %
759 _(', ').join(sorted(newreqs - repo.requirements)))
946 _(', ').join(sorted(newreqs - repo.requirements)))
760
947
761 ui.write('\n')
948 ui.write('\n')
762
949
763 def printupgradeactions():
950 def printupgradeactions():
764 for action in actions:
951 for action in actions:
765 for i in improvements:
952 for i in improvements:
766 if i.name == action:
953 if i.name == action:
767 ui.write('%s\n %s\n\n' %
954 ui.write('%s\n %s\n\n' %
768 (i.name, i.upgrademessage))
955 (i.name, i.upgrademessage))
769
956
770 if not run:
957 if not run:
771 fromdefault = []
958 fromdefault = []
772 fromconfig = []
959 fromconfig = []
773 optimizations = []
960 optimizations = []
774
961
775 for i in improvements:
962 for i in improvements:
776 assert i.type in (deficiency, optimisation)
963 assert i.type in (deficiency, optimisation)
777 if i.type == deficiency:
964 if i.type == deficiency:
778 if i.fromdefault:
965 if i.fromdefault:
779 fromdefault.append(i)
966 fromdefault.append(i)
780 if i.fromconfig:
967 if i.fromconfig:
781 fromconfig.append(i)
968 fromconfig.append(i)
782 else:
969 else:
783 optimizations.append(i)
970 optimizations.append(i)
784
971
785 if fromdefault or fromconfig:
972 if fromdefault or fromconfig:
786 fromconfignames = set(x.name for x in fromconfig)
973 fromconfignames = set(x.name for x in fromconfig)
787 onlydefault = [i for i in fromdefault
974 onlydefault = [i for i in fromdefault
788 if i.name not in fromconfignames]
975 if i.name not in fromconfignames]
789
976
790 if fromconfig:
977 if fromconfig:
791 ui.write(_('repository lacks features recommended by '
978 ui.write(_('repository lacks features recommended by '
792 'current config options:\n\n'))
979 'current config options:\n\n'))
793 for i in fromconfig:
980 for i in fromconfig:
794 ui.write('%s\n %s\n\n' % (i.name, i.description))
981 ui.write('%s\n %s\n\n' % (i.name, i.description))
795
982
796 if onlydefault:
983 if onlydefault:
797 ui.write(_('repository lacks features used by the default '
984 ui.write(_('repository lacks features used by the default '
798 'config options:\n\n'))
985 'config options:\n\n'))
799 for i in onlydefault:
986 for i in onlydefault:
800 ui.write('%s\n %s\n\n' % (i.name, i.description))
987 ui.write('%s\n %s\n\n' % (i.name, i.description))
801
988
802 ui.write('\n')
989 ui.write('\n')
803 else:
990 else:
804 ui.write(_('(no feature deficiencies found in existing '
991 ui.write(_('(no feature deficiencies found in existing '
805 'repository)\n'))
992 'repository)\n'))
806
993
807 ui.write(_('performing an upgrade with "--run" will make the following '
994 ui.write(_('performing an upgrade with "--run" will make the following '
808 'changes:\n\n'))
995 'changes:\n\n'))
809
996
810 printrequirements()
997 printrequirements()
811 printupgradeactions()
998 printupgradeactions()
812
999
813 unusedoptimize = [i for i in improvements
1000 unusedoptimize = [i for i in improvements
814 if i.name not in actions and i.type == optimisation]
1001 if i.name not in actions and i.type == optimisation]
815 if unusedoptimize:
1002 if unusedoptimize:
816 ui.write(_('additional optimizations are available by specifying '
1003 ui.write(_('additional optimizations are available by specifying '
817 '"--optimize <name>":\n\n'))
1004 '"--optimize <name>":\n\n'))
818 for i in unusedoptimize:
1005 for i in unusedoptimize:
819 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1006 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
820 return
1007 return
821
1008
822 # Else we're in the run=true case.
1009 # Else we're in the run=true case.
823 ui.write(_('upgrade will perform the following actions:\n\n'))
1010 ui.write(_('upgrade will perform the following actions:\n\n'))
824 printrequirements()
1011 printrequirements()
825 printupgradeactions()
1012 printupgradeactions()
826
1013
827 ui.write(_('beginning upgrade...\n'))
1014 ui.write(_('beginning upgrade...\n'))
828 with repo.wlock():
1015 with repo.wlock():
829 with repo.lock():
1016 with repo.lock():
830 ui.write(_('repository locked and read-only\n'))
1017 ui.write(_('repository locked and read-only\n'))
831 # Our strategy for upgrading the repository is to create a new,
1018 # Our strategy for upgrading the repository is to create a new,
832 # temporary repository, write data to it, then do a swap of the
1019 # temporary repository, write data to it, then do a swap of the
833 # data. There are less heavyweight ways to do this, but it is easier
1020 # data. There are less heavyweight ways to do this, but it is easier
834 # to create a new repo object than to instantiate all the components
1021 # to create a new repo object than to instantiate all the components
835 # (like the store) separately.
1022 # (like the store) separately.
836 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1023 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
837 backuppath = None
1024 backuppath = None
838 try:
1025 try:
839 ui.write(_('creating temporary repository to stage migrated '
1026 ui.write(_('creating temporary repository to stage migrated '
840 'data: %s\n') % tmppath)
1027 'data: %s\n') % tmppath)
841 dstrepo = localrepo.localrepository(repo.baseui,
1028 dstrepo = localrepo.localrepository(repo.baseui,
842 path=tmppath,
1029 path=tmppath,
843 create=True)
1030 create=True)
844
1031
845 with dstrepo.wlock():
1032 with dstrepo.wlock():
846 with dstrepo.lock():
1033 with dstrepo.lock():
847 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1034 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
848 actions)
1035 actions)
849
1036
850 finally:
1037 finally:
851 ui.write(_('removing temporary repository %s\n') % tmppath)
1038 ui.write(_('removing temporary repository %s\n') % tmppath)
852 repo.vfs.rmtree(tmppath, forcibly=True)
1039 repo.vfs.rmtree(tmppath, forcibly=True)
853
1040
854 if backuppath:
1041 if backuppath:
855 ui.warn(_('copy of old repository backed up at %s\n') %
1042 ui.warn(_('copy of old repository backed up at %s\n') %
856 backuppath)
1043 backuppath)
857 ui.warn(_('the old repository will not be deleted; remove '
1044 ui.warn(_('the old repository will not be deleted; remove '
858 'it to free up disk space once the upgraded '
1045 'it to free up disk space once the upgraded '
859 'repository is verified\n'))
1046 'repository is verified\n'))
@@ -1,255 +1,311 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > share =
3 > share =
4 > EOF
4 > EOF
5
5
6 store and revlogv1 are required in source
6 store and revlogv1 are required in source
7
7
8 $ hg --config format.usestore=false init no-store
8 $ hg --config format.usestore=false init no-store
9 $ hg -R no-store debugupgraderepo
9 $ hg -R no-store debugupgraderepo
10 abort: cannot upgrade repository; requirement missing: store
10 abort: cannot upgrade repository; requirement missing: store
11 [255]
11 [255]
12
12
13 $ hg init no-revlogv1
13 $ hg init no-revlogv1
14 $ cat > no-revlogv1/.hg/requires << EOF
14 $ cat > no-revlogv1/.hg/requires << EOF
15 > dotencode
15 > dotencode
16 > fncache
16 > fncache
17 > generaldelta
17 > generaldelta
18 > store
18 > store
19 > EOF
19 > EOF
20
20
21 $ hg -R no-revlogv1 debugupgraderepo
21 $ hg -R no-revlogv1 debugupgraderepo
22 abort: cannot upgrade repository; requirement missing: revlogv1
22 abort: cannot upgrade repository; requirement missing: revlogv1
23 [255]
23 [255]
24
24
25 Cannot upgrade shared repositories
25 Cannot upgrade shared repositories
26
26
27 $ hg init share-parent
27 $ hg init share-parent
28 $ hg -q share share-parent share-child
28 $ hg -q share share-parent share-child
29
29
30 $ hg -R share-child debugupgraderepo
30 $ hg -R share-child debugupgraderepo
31 abort: cannot upgrade repository; unsupported source requirement: shared
31 abort: cannot upgrade repository; unsupported source requirement: shared
32 [255]
32 [255]
33
33
34 Do not yet support upgrading manifestv2 and treemanifest repos
34 Do not yet support upgrading manifestv2 and treemanifest repos
35
35
36 $ hg --config experimental.manifestv2=true init manifestv2
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
39 [255]
40
40
41 $ hg --config experimental.treemanifest=true init treemanifest
41 $ hg --config experimental.treemanifest=true init treemanifest
42 $ hg -R treemanifest debugupgraderepo
42 $ hg -R treemanifest debugupgraderepo
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 [255]
44 [255]
45
45
46 Cannot add manifestv2 or treemanifest requirement during upgrade
46 Cannot add manifestv2 or treemanifest requirement during upgrade
47
47
48 $ hg init disallowaddedreq
48 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 [255]
51 [255]
52
52
53 An upgrade of a repository created with recommended settings only suggests optimizations
53 An upgrade of a repository created with recommended settings only suggests optimizations
54
54
55 $ hg init empty
55 $ hg init empty
56 $ cd empty
56 $ cd empty
57 $ hg debugupgraderepo
57 $ hg debugupgraderepo
58 (no feature deficiencies found in existing repository)
58 (no feature deficiencies found in existing repository)
59 performing an upgrade with "--run" will make the following changes:
59 performing an upgrade with "--run" will make the following changes:
60
60
61 requirements
61 requirements
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63
63
64 additional optimizations are available by specifying "--optimize <name>":
64 additional optimizations are available by specifying "--optimize <name>":
65
65
66 redeltaparent
66 redeltaparent
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68
68
69 redeltamultibase
69 redeltamultibase
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71
71
72 redeltaall
72 redeltaall
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74
74
75
75
76 --optimize can be used to add optimizations
76 --optimize can be used to add optimizations
77
77
78 $ hg debugupgrade --optimize redeltaparent
78 $ hg debugupgrade --optimize redeltaparent
79 (no feature deficiencies found in existing repository)
79 (no feature deficiencies found in existing repository)
80 performing an upgrade with "--run" will make the following changes:
80 performing an upgrade with "--run" will make the following changes:
81
81
82 requirements
82 requirements
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84
84
85 redeltaparent
85 redeltaparent
86 deltas within internal storage will choose a new base revision if needed
86 deltas within internal storage will choose a new base revision if needed
87
87
88 additional optimizations are available by specifying "--optimize <name>":
88 additional optimizations are available by specifying "--optimize <name>":
89
89
90 redeltamultibase
90 redeltamultibase
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92
92
93 redeltaall
93 redeltaall
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95
95
96
96
97 Various sub-optimal detections work
97 Various sub-optimal detections work
98
98
99 $ cat > .hg/requires << EOF
99 $ cat > .hg/requires << EOF
100 > revlogv1
100 > revlogv1
101 > store
101 > store
102 > EOF
102 > EOF
103
103
104 $ hg debugupgraderepo
104 $ hg debugupgraderepo
105 repository lacks features recommended by current config options:
105 repository lacks features recommended by current config options:
106
106
107 fncache
107 fncache
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109
109
110 dotencode
110 dotencode
111 storage of filenames beginning with a period or space may not work correctly
111 storage of filenames beginning with a period or space may not work correctly
112
112
113 generaldelta
113 generaldelta
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115
115
116
116
117 performing an upgrade with "--run" will make the following changes:
117 performing an upgrade with "--run" will make the following changes:
118
118
119 requirements
119 requirements
120 preserved: revlogv1, store
120 preserved: revlogv1, store
121 added: dotencode, fncache, generaldelta
121 added: dotencode, fncache, generaldelta
122
122
123 fncache
123 fncache
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125
125
126 dotencode
126 dotencode
127 repository will be better able to store files beginning with a space or period
127 repository will be better able to store files beginning with a space or period
128
128
129 generaldelta
129 generaldelta
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131
131
132 additional optimizations are available by specifying "--optimize <name>":
132 additional optimizations are available by specifying "--optimize <name>":
133
133
134 redeltaparent
134 redeltaparent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
136
137 redeltamultibase
137 redeltamultibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
139
140 redeltaall
140 redeltaall
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
142
143
143
144 $ hg --config format.dotencode=false debugupgraderepo
144 $ hg --config format.dotencode=false debugupgraderepo
145 repository lacks features recommended by current config options:
145 repository lacks features recommended by current config options:
146
146
147 fncache
147 fncache
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149
149
150 generaldelta
150 generaldelta
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152
152
153 repository lacks features used by the default config options:
153 repository lacks features used by the default config options:
154
154
155 dotencode
155 dotencode
156 storage of filenames beginning with a period or space may not work correctly
156 storage of filenames beginning with a period or space may not work correctly
157
157
158
158
159 performing an upgrade with "--run" will make the following changes:
159 performing an upgrade with "--run" will make the following changes:
160
160
161 requirements
161 requirements
162 preserved: revlogv1, store
162 preserved: revlogv1, store
163 added: fncache, generaldelta
163 added: fncache, generaldelta
164
164
165 fncache
165 fncache
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167
167
168 generaldelta
168 generaldelta
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170
170
171 additional optimizations are available by specifying "--optimize <name>":
171 additional optimizations are available by specifying "--optimize <name>":
172
172
173 redeltaparent
173 redeltaparent
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175
175
176 redeltamultibase
176 redeltamultibase
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178
178
179 redeltaall
179 redeltaall
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181
181
182
182
183 $ cd ..
183 $ cd ..
184
184
185 Upgrading a repository that is already modern essentially no-ops
185 Upgrading a repository that is already modern essentially no-ops
186
186
187 $ hg init modern
187 $ hg init modern
188 $ hg -R modern debugupgraderepo --run
188 $ hg -R modern debugupgraderepo --run
189 upgrade will perform the following actions:
189 upgrade will perform the following actions:
190
190
191 requirements
191 requirements
192 preserved: dotencode, fncache, generaldelta, revlogv1, store
192 preserved: dotencode, fncache, generaldelta, revlogv1, store
193
193
194 beginning upgrade...
194 beginning upgrade...
195 repository locked and read-only
195 repository locked and read-only
196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
197 (it is safe to interrupt this process any time before data migration completes)
198 data fully migrated to temporary repository
197 marking source repository as being upgraded; clients will be unable to read from repository
199 marking source repository as being upgraded; clients will be unable to read from repository
198 starting in-place swap of repository data
200 starting in-place swap of repository data
199 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
201 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
202 replacing store...
203 store replacement complete; repository was inconsistent for *s (glob)
200 finalizing requirements file and making repository readable again
204 finalizing requirements file and making repository readable again
201 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
205 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
202 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
206 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
203 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
204
208
205 Upgrading a repository to generaldelta works
209 Upgrading a repository to generaldelta works
206
210
207 $ hg --config format.usegeneraldelta=false init upgradegd
211 $ hg --config format.usegeneraldelta=false init upgradegd
208 $ cd upgradegd
212 $ cd upgradegd
209 $ touch f0
213 $ touch f0
210 $ hg -q commit -A -m initial
214 $ hg -q commit -A -m initial
211 $ touch f1
215 $ touch f1
212 $ hg -q commit -A -m 'add f1'
216 $ hg -q commit -A -m 'add f1'
213 $ hg -q up -r 0
217 $ hg -q up -r 0
214 $ touch f2
218 $ touch f2
215 $ hg -q commit -A -m 'add f2'
219 $ hg -q commit -A -m 'add f2'
216
220
217 $ hg debugupgraderepo --run
221 $ hg debugupgraderepo --run
218 upgrade will perform the following actions:
222 upgrade will perform the following actions:
219
223
220 requirements
224 requirements
221 preserved: dotencode, fncache, revlogv1, store
225 preserved: dotencode, fncache, revlogv1, store
222 added: generaldelta
226 added: generaldelta
223
227
224 generaldelta
228 generaldelta
225 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
229 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
226
230
227 beginning upgrade...
231 beginning upgrade...
228 repository locked and read-only
232 repository locked and read-only
229 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
233 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
234 (it is safe to interrupt this process any time before data migration completes)
235 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
236 migrating 341 bytes in store; 401 bytes tracked data
237 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
238 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
239 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
240 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
241 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
242 finished migrating 3 changelog revisions; change in size: 0 bytes
243 finished migrating 9 total revisions; total change in store size: 0 bytes
244 data fully migrated to temporary repository
230 marking source repository as being upgraded; clients will be unable to read from repository
245 marking source repository as being upgraded; clients will be unable to read from repository
231 starting in-place swap of repository data
246 starting in-place swap of repository data
232 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
247 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
248 replacing store...
249 store replacement complete; repository was inconsistent for *s (glob)
233 finalizing requirements file and making repository readable again
250 finalizing requirements file and making repository readable again
234 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
251 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
235 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
252 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
236 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
253 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
237
254
238 Original requirements backed up
255 Original requirements backed up
239
256
240 $ cat .hg/upgradebackup.*/requires
257 $ cat .hg/upgradebackup.*/requires
241 dotencode
258 dotencode
242 fncache
259 fncache
243 revlogv1
260 revlogv1
244 store
261 store
245
262
246 generaldelta added to original requirements files
263 generaldelta added to original requirements files
247
264
248 $ cat .hg/requires
265 $ cat .hg/requires
249 dotencode
266 dotencode
250 fncache
267 fncache
251 generaldelta
268 generaldelta
252 revlogv1
269 revlogv1
253 store
270 store
254
271
272 store directory has files we expect
273
274 $ ls .hg/store
275 00changelog.i
276 00manifest.i
277 data
278 fncache
279 undo
280 undo.backupfiles
281 undo.phaseroots
282
283 manifest should be generaldelta
284
285 $ hg debugrevlog -m | grep flags
286 flags : inline, generaldelta
287
288 verify should be happy
289
290 $ hg verify
291 checking changesets
292 checking manifests
293 crosschecking files in changesets and manifests
294 checking files
295 3 files, 3 changesets, 3 total revisions
296
297 old store should be backed up
298
299 $ ls .hg/upgradebackup.*/store
300 00changelog.i
301 00manifest.i
302 data
303 fncache
304 lock
305 phaseroots
306 undo
307 undo.backup.fncache
308 undo.backupfiles
309 undo.phaseroots
310
255 $ cd ..
311 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now