##// END OF EJS Templates
repair: use rawvfs when copying extra store files...
Gregory Szorc -
r31799:8110d49e stable
parent child Browse files
Show More
@@ -1,1103 +1,1103 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import stat
13 import stat
14 import tempfile
14 import tempfile
15 import time
15 import time
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import short
18 from .node import short
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 changelog,
22 changelog,
23 error,
23 error,
24 exchange,
24 exchange,
25 manifest,
25 manifest,
26 obsolete,
26 obsolete,
27 revlog,
27 revlog,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
34 cgversion = changegroup.safeversion(repo)
35
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
37 version=cgversion)
38 backupdir = "strip-backup"
38 backupdir = "strip-backup"
39 vfs = repo.vfs
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
41 vfs.mkdir(backupdir)
42
42
43 # Include a hash of all the nodes in the filename for uniqueness
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
48
49 comp = None
49 comp = None
50 if cgversion != '01':
50 if cgversion != '01':
51 bundletype = "HG20"
51 bundletype = "HG20"
52 if compress:
52 if compress:
53 comp = 'BZ'
53 comp = 'BZ'
54 elif compress:
54 elif compress:
55 bundletype = "HG10BZ"
55 bundletype = "HG10BZ"
56 else:
56 else:
57 bundletype = "HG10UN"
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
59 compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
72 s = set()
72 s = set()
73 def collectone(revlog):
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
76
76
77 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
78 for fname in files:
79 collectone(repo.file(fname))
79 collectone(repo.file(fname))
80
80
81 return s
81 return s
82
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
86 # Simple way to maintain backwards compatibility for this
87 # argument.
87 # argument.
88 if backup in ['none', 'strip']:
88 if backup in ['none', 'strip']:
89 backup = False
89 backup = False
90
90
91 repo = repo.unfiltered()
91 repo = repo.unfiltered()
92 repo.destroying()
92 repo.destroying()
93
93
94 cl = repo.changelog
94 cl = repo.changelog
95 # TODO handle undo of merge sets
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
99 striprev = min(striplist)
100
100
101 files = _collectfiles(repo, striprev)
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
115 tostrip.add(r)
116
116
117 if r not in tostrip:
117 if r not in tostrip:
118 saverevs.add(r)
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
121 saveheads = [cl.node(r) for r in saveheads]
122
122
123 # compute base nodes
123 # compute base nodes
124 if saverevs:
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
128 stripbases = [cl.node(r) for r in tostrip]
129
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
135 else:
136 newbmtarget = '.'
136 newbmtarget = '.'
137
137
138 bm = repo._bookmarks
138 bm = repo._bookmarks
139 updatebm = []
139 updatebm = []
140 for m in bm:
140 for m in bm:
141 rev = repo[bm[m]].rev()
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
142 if rev in tostrip:
143 updatebm.append(m)
143 updatebm.append(m)
144
144
145 # create a changegroup for all the branches we need to keep
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
146 backupfile = None
147 vfs = repo.vfs
147 vfs = repo.vfs
148 node = nodelist[-1]
148 node = nodelist[-1]
149 if backup:
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
154 vfs.join(backupfile))
155 tmpbundlefile = None
155 tmpbundlefile = None
156 if saveheads:
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
159 compress=False)
160
160
161 mfst = repo.manifestlog._revlog
161 mfst = repo.manifestlog._revlog
162
162
163 curtr = repo.currenttransaction()
163 curtr = repo.currenttransaction()
164 if curtr is not None:
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
165 del curtr # avoid carrying reference to transaction for nothing
166 msg = _('programming error: cannot strip from inside a transaction')
166 msg = _('programming error: cannot strip from inside a transaction')
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
168
168
169 try:
169 try:
170 with repo.transaction("strip") as tr:
170 with repo.transaction("strip") as tr:
171 offset = len(tr.entries)
171 offset = len(tr.entries)
172
172
173 tr.startgroup()
173 tr.startgroup()
174 cl.strip(striprev, tr)
174 cl.strip(striprev, tr)
175 mfst.strip(striprev, tr)
175 mfst.strip(striprev, tr)
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 # otherwise
177 # otherwise
178 for unencoded, encoded, size in repo.store.datafiles():
178 for unencoded, encoded, size in repo.store.datafiles():
179 if (unencoded.startswith('meta/') and
179 if (unencoded.startswith('meta/') and
180 unencoded.endswith('00manifest.i')):
180 unencoded.endswith('00manifest.i')):
181 dir = unencoded[5:-12]
181 dir = unencoded[5:-12]
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 for fn in files:
183 for fn in files:
184 repo.file(fn).strip(striprev, tr)
184 repo.file(fn).strip(striprev, tr)
185 tr.endgroup()
185 tr.endgroup()
186
186
187 for i in xrange(offset, len(tr.entries)):
187 for i in xrange(offset, len(tr.entries)):
188 file, troffset, ignore = tr.entries[i]
188 file, troffset, ignore = tr.entries[i]
189 with repo.svfs(file, 'a', checkambig=True) as fp:
189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 fp.truncate(troffset)
190 fp.truncate(troffset)
191 if troffset == 0:
191 if troffset == 0:
192 repo.store.markremoved(file)
192 repo.store.markremoved(file)
193
193
194 if tmpbundlefile:
194 if tmpbundlefile:
195 ui.note(_("adding branch\n"))
195 ui.note(_("adding branch\n"))
196 f = vfs.open(tmpbundlefile, "rb")
196 f = vfs.open(tmpbundlefile, "rb")
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 if not repo.ui.verbose:
198 if not repo.ui.verbose:
199 # silence internal shuffling chatter
199 # silence internal shuffling chatter
200 repo.ui.pushbuffer()
200 repo.ui.pushbuffer()
201 if isinstance(gen, bundle2.unbundle20):
201 if isinstance(gen, bundle2.unbundle20):
202 with repo.transaction('strip') as tr:
202 with repo.transaction('strip') as tr:
203 tr.hookargs = {'source': 'strip',
203 tr.hookargs = {'source': 'strip',
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 bundle2.applybundle(repo, gen, tr, source='strip',
205 bundle2.applybundle(repo, gen, tr, source='strip',
206 url='bundle:' + vfs.join(tmpbundlefile))
206 url='bundle:' + vfs.join(tmpbundlefile))
207 else:
207 else:
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 True)
209 True)
210 if not repo.ui.verbose:
210 if not repo.ui.verbose:
211 repo.ui.popbuffer()
211 repo.ui.popbuffer()
212 f.close()
212 f.close()
213 repo._phasecache.invalidate()
213 repo._phasecache.invalidate()
214
214
215 for m in updatebm:
215 for m in updatebm:
216 bm[m] = repo[newbmtarget].node()
216 bm[m] = repo[newbmtarget].node()
217 lock = tr = None
217 lock = tr = None
218 try:
218 try:
219 lock = repo.lock()
219 lock = repo.lock()
220 tr = repo.transaction('repair')
220 tr = repo.transaction('repair')
221 bm.recordchange(tr)
221 bm.recordchange(tr)
222 tr.close()
222 tr.close()
223 finally:
223 finally:
224 tr.release()
224 tr.release()
225 lock.release()
225 lock.release()
226
226
227 # remove undo files
227 # remove undo files
228 for undovfs, undofile in repo.undofiles():
228 for undovfs, undofile in repo.undofiles():
229 try:
229 try:
230 undovfs.unlink(undofile)
230 undovfs.unlink(undofile)
231 except OSError as e:
231 except OSError as e:
232 if e.errno != errno.ENOENT:
232 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
233 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
234 (undovfs.join(undofile), str(e)))
235
235
236 except: # re-raises
236 except: # re-raises
237 if backupfile:
237 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
239 % vfs.join(backupfile))
240 if tmpbundlefile:
240 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
242 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
243 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
245 raise
246 else:
246 else:
247 if tmpbundlefile:
247 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
248 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
249 vfs.unlink(tmpbundlefile)
250
250
251 repo.destroyed()
251 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
252 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
253 # extensions can use it
254 return backupfile
254 return backupfile
255
255
256 def rebuildfncache(ui, repo):
256 def rebuildfncache(ui, repo):
257 """Rebuilds the fncache file from repo history.
257 """Rebuilds the fncache file from repo history.
258
258
259 Missing entries will be added. Extra entries will be removed.
259 Missing entries will be added. Extra entries will be removed.
260 """
260 """
261 repo = repo.unfiltered()
261 repo = repo.unfiltered()
262
262
263 if 'fncache' not in repo.requirements:
263 if 'fncache' not in repo.requirements:
264 ui.warn(_('(not rebuilding fncache because repository does not '
264 ui.warn(_('(not rebuilding fncache because repository does not '
265 'support fncache)\n'))
265 'support fncache)\n'))
266 return
266 return
267
267
268 with repo.lock():
268 with repo.lock():
269 fnc = repo.store.fncache
269 fnc = repo.store.fncache
270 # Trigger load of fncache.
270 # Trigger load of fncache.
271 if 'irrelevant' in fnc:
271 if 'irrelevant' in fnc:
272 pass
272 pass
273
273
274 oldentries = set(fnc.entries)
274 oldentries = set(fnc.entries)
275 newentries = set()
275 newentries = set()
276 seenfiles = set()
276 seenfiles = set()
277
277
278 repolen = len(repo)
278 repolen = len(repo)
279 for rev in repo:
279 for rev in repo:
280 ui.progress(_('rebuilding'), rev, total=repolen,
280 ui.progress(_('rebuilding'), rev, total=repolen,
281 unit=_('changesets'))
281 unit=_('changesets'))
282
282
283 ctx = repo[rev]
283 ctx = repo[rev]
284 for f in ctx.files():
284 for f in ctx.files():
285 # This is to minimize I/O.
285 # This is to minimize I/O.
286 if f in seenfiles:
286 if f in seenfiles:
287 continue
287 continue
288 seenfiles.add(f)
288 seenfiles.add(f)
289
289
290 i = 'data/%s.i' % f
290 i = 'data/%s.i' % f
291 d = 'data/%s.d' % f
291 d = 'data/%s.d' % f
292
292
293 if repo.store._exists(i):
293 if repo.store._exists(i):
294 newentries.add(i)
294 newentries.add(i)
295 if repo.store._exists(d):
295 if repo.store._exists(d):
296 newentries.add(d)
296 newentries.add(d)
297
297
298 ui.progress(_('rebuilding'), None)
298 ui.progress(_('rebuilding'), None)
299
299
300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
301 for dir in util.dirs(seenfiles):
301 for dir in util.dirs(seenfiles):
302 i = 'meta/%s/00manifest.i' % dir
302 i = 'meta/%s/00manifest.i' % dir
303 d = 'meta/%s/00manifest.d' % dir
303 d = 'meta/%s/00manifest.d' % dir
304
304
305 if repo.store._exists(i):
305 if repo.store._exists(i):
306 newentries.add(i)
306 newentries.add(i)
307 if repo.store._exists(d):
307 if repo.store._exists(d):
308 newentries.add(d)
308 newentries.add(d)
309
309
310 addcount = len(newentries - oldentries)
310 addcount = len(newentries - oldentries)
311 removecount = len(oldentries - newentries)
311 removecount = len(oldentries - newentries)
312 for p in sorted(oldentries - newentries):
312 for p in sorted(oldentries - newentries):
313 ui.write(_('removing %s\n') % p)
313 ui.write(_('removing %s\n') % p)
314 for p in sorted(newentries - oldentries):
314 for p in sorted(newentries - oldentries):
315 ui.write(_('adding %s\n') % p)
315 ui.write(_('adding %s\n') % p)
316
316
317 if addcount or removecount:
317 if addcount or removecount:
318 ui.write(_('%d items added, %d removed from fncache\n') %
318 ui.write(_('%d items added, %d removed from fncache\n') %
319 (addcount, removecount))
319 (addcount, removecount))
320 fnc.entries = newentries
320 fnc.entries = newentries
321 fnc._dirty = True
321 fnc._dirty = True
322
322
323 with repo.transaction('fncache') as tr:
323 with repo.transaction('fncache') as tr:
324 fnc.write(tr)
324 fnc.write(tr)
325 else:
325 else:
326 ui.write(_('fncache already up to date\n'))
326 ui.write(_('fncache already up to date\n'))
327
327
328 def stripbmrevset(repo, mark):
328 def stripbmrevset(repo, mark):
329 """
329 """
330 The revset to strip when strip is called with -B mark
330 The revset to strip when strip is called with -B mark
331
331
332 Needs to live here so extensions can use it and wrap it even when strip is
332 Needs to live here so extensions can use it and wrap it even when strip is
333 not enabled or not present on a box.
333 not enabled or not present on a box.
334 """
334 """
335 return repo.revs("ancestors(bookmark(%s)) - "
335 return repo.revs("ancestors(bookmark(%s)) - "
336 "ancestors(head() and not bookmark(%s)) - "
336 "ancestors(head() and not bookmark(%s)) - "
337 "ancestors(bookmark() and not bookmark(%s))",
337 "ancestors(bookmark() and not bookmark(%s))",
338 mark, mark, mark)
338 mark, mark, mark)
339
339
340 def deleteobsmarkers(obsstore, indices):
340 def deleteobsmarkers(obsstore, indices):
341 """Delete some obsmarkers from obsstore and return how many were deleted
341 """Delete some obsmarkers from obsstore and return how many were deleted
342
342
343 'indices' is a list of ints which are the indices
343 'indices' is a list of ints which are the indices
344 of the markers to be deleted.
344 of the markers to be deleted.
345
345
346 Every invocation of this function completely rewrites the obsstore file,
346 Every invocation of this function completely rewrites the obsstore file,
347 skipping the markers we want to be removed. The new temporary file is
347 skipping the markers we want to be removed. The new temporary file is
348 created, remaining markers are written there and on .close() this file
348 created, remaining markers are written there and on .close() this file
349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
350 if not indices:
350 if not indices:
351 # we don't want to rewrite the obsstore with the same content
351 # we don't want to rewrite the obsstore with the same content
352 return
352 return
353
353
354 left = []
354 left = []
355 current = obsstore._all
355 current = obsstore._all
356 n = 0
356 n = 0
357 for i, m in enumerate(current):
357 for i, m in enumerate(current):
358 if i in indices:
358 if i in indices:
359 n += 1
359 n += 1
360 continue
360 continue
361 left.append(m)
361 left.append(m)
362
362
363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
365 newobsstorefile.write(bytes)
365 newobsstorefile.write(bytes)
366 newobsstorefile.close()
366 newobsstorefile.close()
367 return n
367 return n
368
368
369 def upgraderequiredsourcerequirements(repo):
369 def upgraderequiredsourcerequirements(repo):
370 """Obtain requirements required to be present to upgrade a repo.
370 """Obtain requirements required to be present to upgrade a repo.
371
371
372 An upgrade will not be allowed if the repository doesn't have the
372 An upgrade will not be allowed if the repository doesn't have the
373 requirements returned by this function.
373 requirements returned by this function.
374 """
374 """
375 return set([
375 return set([
376 # Introduced in Mercurial 0.9.2.
376 # Introduced in Mercurial 0.9.2.
377 'revlogv1',
377 'revlogv1',
378 # Introduced in Mercurial 0.9.2.
378 # Introduced in Mercurial 0.9.2.
379 'store',
379 'store',
380 ])
380 ])
381
381
382 def upgradeblocksourcerequirements(repo):
382 def upgradeblocksourcerequirements(repo):
383 """Obtain requirements that will prevent an upgrade from occurring.
383 """Obtain requirements that will prevent an upgrade from occurring.
384
384
385 An upgrade cannot be performed if the source repository contains a
385 An upgrade cannot be performed if the source repository contains a
386 requirements in the returned set.
386 requirements in the returned set.
387 """
387 """
388 return set([
388 return set([
389 # The upgrade code does not yet support these experimental features.
389 # The upgrade code does not yet support these experimental features.
390 # This is an artificial limitation.
390 # This is an artificial limitation.
391 'manifestv2',
391 'manifestv2',
392 'treemanifest',
392 'treemanifest',
393 # This was a precursor to generaldelta and was never enabled by default.
393 # This was a precursor to generaldelta and was never enabled by default.
394 # It should (hopefully) not exist in the wild.
394 # It should (hopefully) not exist in the wild.
395 'parentdelta',
395 'parentdelta',
396 # Upgrade should operate on the actual store, not the shared link.
396 # Upgrade should operate on the actual store, not the shared link.
397 'shared',
397 'shared',
398 ])
398 ])
399
399
400 def upgradesupportremovedrequirements(repo):
400 def upgradesupportremovedrequirements(repo):
401 """Obtain requirements that can be removed during an upgrade.
401 """Obtain requirements that can be removed during an upgrade.
402
402
403 If an upgrade were to create a repository that dropped a requirement,
403 If an upgrade were to create a repository that dropped a requirement,
404 the dropped requirement must appear in the returned set for the upgrade
404 the dropped requirement must appear in the returned set for the upgrade
405 to be allowed.
405 to be allowed.
406 """
406 """
407 return set()
407 return set()
408
408
409 def upgradesupporteddestrequirements(repo):
409 def upgradesupporteddestrequirements(repo):
410 """Obtain requirements that upgrade supports in the destination.
410 """Obtain requirements that upgrade supports in the destination.
411
411
412 If the result of the upgrade would create requirements not in this set,
412 If the result of the upgrade would create requirements not in this set,
413 the upgrade is disallowed.
413 the upgrade is disallowed.
414
414
415 Extensions should monkeypatch this to add their custom requirements.
415 Extensions should monkeypatch this to add their custom requirements.
416 """
416 """
417 return set([
417 return set([
418 'dotencode',
418 'dotencode',
419 'fncache',
419 'fncache',
420 'generaldelta',
420 'generaldelta',
421 'revlogv1',
421 'revlogv1',
422 'store',
422 'store',
423 ])
423 ])
424
424
425 def upgradeallowednewrequirements(repo):
425 def upgradeallowednewrequirements(repo):
426 """Obtain requirements that can be added to a repository during upgrade.
426 """Obtain requirements that can be added to a repository during upgrade.
427
427
428 This is used to disallow proposed requirements from being added when
428 This is used to disallow proposed requirements from being added when
429 they weren't present before.
429 they weren't present before.
430
430
431 We use a list of allowed requirement additions instead of a list of known
431 We use a list of allowed requirement additions instead of a list of known
432 bad additions because the whitelist approach is safer and will prevent
432 bad additions because the whitelist approach is safer and will prevent
433 future, unknown requirements from accidentally being added.
433 future, unknown requirements from accidentally being added.
434 """
434 """
435 return set([
435 return set([
436 'dotencode',
436 'dotencode',
437 'fncache',
437 'fncache',
438 'generaldelta',
438 'generaldelta',
439 ])
439 ])
440
440
441 deficiency = 'deficiency'
441 deficiency = 'deficiency'
442 optimisation = 'optimization'
442 optimisation = 'optimization'
443
443
444 class upgradeimprovement(object):
444 class upgradeimprovement(object):
445 """Represents an improvement that can be made as part of an upgrade.
445 """Represents an improvement that can be made as part of an upgrade.
446
446
447 The following attributes are defined on each instance:
447 The following attributes are defined on each instance:
448
448
449 name
449 name
450 Machine-readable string uniquely identifying this improvement. It
450 Machine-readable string uniquely identifying this improvement. It
451 will be mapped to an action later in the upgrade process.
451 will be mapped to an action later in the upgrade process.
452
452
453 type
453 type
454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
455 problem. An optimization is an action (sometimes optional) that
455 problem. An optimization is an action (sometimes optional) that
456 can be taken to further improve the state of the repository.
456 can be taken to further improve the state of the repository.
457
457
458 description
458 description
459 Message intended for humans explaining the improvement in more detail,
459 Message intended for humans explaining the improvement in more detail,
460 including the implications of it. For ``deficiency`` types, should be
460 including the implications of it. For ``deficiency`` types, should be
461 worded in the present tense. For ``optimisation`` types, should be
461 worded in the present tense. For ``optimisation`` types, should be
462 worded in the future tense.
462 worded in the future tense.
463
463
464 upgrademessage
464 upgrademessage
465 Message intended for humans explaining what an upgrade addressing this
465 Message intended for humans explaining what an upgrade addressing this
466 issue will do. Should be worded in the future tense.
466 issue will do. Should be worded in the future tense.
467
467
468 fromdefault (``deficiency`` types only)
468 fromdefault (``deficiency`` types only)
469 Boolean indicating whether the current (deficient) state deviates
469 Boolean indicating whether the current (deficient) state deviates
470 from Mercurial's default configuration.
470 from Mercurial's default configuration.
471
471
472 fromconfig (``deficiency`` types only)
472 fromconfig (``deficiency`` types only)
473 Boolean indicating whether the current (deficient) state deviates
473 Boolean indicating whether the current (deficient) state deviates
474 from the current Mercurial configuration.
474 from the current Mercurial configuration.
475 """
475 """
476 def __init__(self, name, type, description, upgrademessage, **kwargs):
476 def __init__(self, name, type, description, upgrademessage, **kwargs):
477 self.name = name
477 self.name = name
478 self.type = type
478 self.type = type
479 self.description = description
479 self.description = description
480 self.upgrademessage = upgrademessage
480 self.upgrademessage = upgrademessage
481
481
482 for k, v in kwargs.items():
482 for k, v in kwargs.items():
483 setattr(self, k, v)
483 setattr(self, k, v)
484
484
485 def upgradefindimprovements(repo):
485 def upgradefindimprovements(repo):
486 """Determine improvements that can be made to the repo during upgrade.
486 """Determine improvements that can be made to the repo during upgrade.
487
487
488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
489 and optimizations.
489 and optimizations.
490 """
490 """
491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
492 from . import localrepo
492 from . import localrepo
493
493
494 newreporeqs = localrepo.newreporequirements(repo)
494 newreporeqs = localrepo.newreporequirements(repo)
495
495
496 improvements = []
496 improvements = []
497
497
498 # We could detect lack of revlogv1 and store here, but they were added
498 # We could detect lack of revlogv1 and store here, but they were added
499 # in 0.9.2 and we don't support upgrading repos without these
499 # in 0.9.2 and we don't support upgrading repos without these
500 # requirements, so let's not bother.
500 # requirements, so let's not bother.
501
501
502 if 'fncache' not in repo.requirements:
502 if 'fncache' not in repo.requirements:
503 improvements.append(upgradeimprovement(
503 improvements.append(upgradeimprovement(
504 name='fncache',
504 name='fncache',
505 type=deficiency,
505 type=deficiency,
506 description=_('long and reserved filenames may not work correctly; '
506 description=_('long and reserved filenames may not work correctly; '
507 'repository performance is sub-optimal'),
507 'repository performance is sub-optimal'),
508 upgrademessage=_('repository will be more resilient to storing '
508 upgrademessage=_('repository will be more resilient to storing '
509 'certain paths and performance of certain '
509 'certain paths and performance of certain '
510 'operations should be improved'),
510 'operations should be improved'),
511 fromdefault=True,
511 fromdefault=True,
512 fromconfig='fncache' in newreporeqs))
512 fromconfig='fncache' in newreporeqs))
513
513
514 if 'dotencode' not in repo.requirements:
514 if 'dotencode' not in repo.requirements:
515 improvements.append(upgradeimprovement(
515 improvements.append(upgradeimprovement(
516 name='dotencode',
516 name='dotencode',
517 type=deficiency,
517 type=deficiency,
518 description=_('storage of filenames beginning with a period or '
518 description=_('storage of filenames beginning with a period or '
519 'space may not work correctly'),
519 'space may not work correctly'),
520 upgrademessage=_('repository will be better able to store files '
520 upgrademessage=_('repository will be better able to store files '
521 'beginning with a space or period'),
521 'beginning with a space or period'),
522 fromdefault=True,
522 fromdefault=True,
523 fromconfig='dotencode' in newreporeqs))
523 fromconfig='dotencode' in newreporeqs))
524
524
525 if 'generaldelta' not in repo.requirements:
525 if 'generaldelta' not in repo.requirements:
526 improvements.append(upgradeimprovement(
526 improvements.append(upgradeimprovement(
527 name='generaldelta',
527 name='generaldelta',
528 type=deficiency,
528 type=deficiency,
529 description=_('deltas within internal storage are unable to '
529 description=_('deltas within internal storage are unable to '
530 'choose optimal revisions; repository is larger and '
530 'choose optimal revisions; repository is larger and '
531 'slower than it could be; interaction with other '
531 'slower than it could be; interaction with other '
532 'repositories may require extra network and CPU '
532 'repositories may require extra network and CPU '
533 'resources, making "hg push" and "hg pull" slower'),
533 'resources, making "hg push" and "hg pull" slower'),
534 upgrademessage=_('repository storage will be able to create '
534 upgrademessage=_('repository storage will be able to create '
535 'optimal deltas; new repository data will be '
535 'optimal deltas; new repository data will be '
536 'smaller and read times should decrease; '
536 'smaller and read times should decrease; '
537 'interacting with other repositories using this '
537 'interacting with other repositories using this '
538 'storage model should require less network and '
538 'storage model should require less network and '
539 'CPU resources, making "hg push" and "hg pull" '
539 'CPU resources, making "hg push" and "hg pull" '
540 'faster'),
540 'faster'),
541 fromdefault=True,
541 fromdefault=True,
542 fromconfig='generaldelta' in newreporeqs))
542 fromconfig='generaldelta' in newreporeqs))
543
543
544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
545 # changelogs with deltas.
545 # changelogs with deltas.
546 cl = repo.changelog
546 cl = repo.changelog
547 for rev in cl:
547 for rev in cl:
548 chainbase = cl.chainbase(rev)
548 chainbase = cl.chainbase(rev)
549 if chainbase != rev:
549 if chainbase != rev:
550 improvements.append(upgradeimprovement(
550 improvements.append(upgradeimprovement(
551 name='removecldeltachain',
551 name='removecldeltachain',
552 type=deficiency,
552 type=deficiency,
553 description=_('changelog storage is using deltas instead of '
553 description=_('changelog storage is using deltas instead of '
554 'raw entries; changelog reading and any '
554 'raw entries; changelog reading and any '
555 'operation relying on changelog data are slower '
555 'operation relying on changelog data are slower '
556 'than they could be'),
556 'than they could be'),
557 upgrademessage=_('changelog storage will be reformated to '
557 upgrademessage=_('changelog storage will be reformated to '
558 'store raw entries; changelog reading will be '
558 'store raw entries; changelog reading will be '
559 'faster; changelog size may be reduced'),
559 'faster; changelog size may be reduced'),
560 fromdefault=True,
560 fromdefault=True,
561 fromconfig=True))
561 fromconfig=True))
562 break
562 break
563
563
564 # Now for the optimizations.
564 # Now for the optimizations.
565
565
566 # These are unconditionally added. There is logic later that figures out
566 # These are unconditionally added. There is logic later that figures out
567 # which ones to apply.
567 # which ones to apply.
568
568
569 improvements.append(upgradeimprovement(
569 improvements.append(upgradeimprovement(
570 name='redeltaparent',
570 name='redeltaparent',
571 type=optimisation,
571 type=optimisation,
572 description=_('deltas within internal storage will be recalculated to '
572 description=_('deltas within internal storage will be recalculated to '
573 'choose an optimal base revision where this was not '
573 'choose an optimal base revision where this was not '
574 'already done; the size of the repository may shrink and '
574 'already done; the size of the repository may shrink and '
575 'various operations may become faster; the first time '
575 'various operations may become faster; the first time '
576 'this optimization is performed could slow down upgrade '
576 'this optimization is performed could slow down upgrade '
577 'execution considerably; subsequent invocations should '
577 'execution considerably; subsequent invocations should '
578 'not run noticeably slower'),
578 'not run noticeably slower'),
579 upgrademessage=_('deltas within internal storage will choose a new '
579 upgrademessage=_('deltas within internal storage will choose a new '
580 'base revision if needed')))
580 'base revision if needed')))
581
581
582 improvements.append(upgradeimprovement(
582 improvements.append(upgradeimprovement(
583 name='redeltamultibase',
583 name='redeltamultibase',
584 type=optimisation,
584 type=optimisation,
585 description=_('deltas within internal storage will be recalculated '
585 description=_('deltas within internal storage will be recalculated '
586 'against multiple base revision and the smallest '
586 'against multiple base revision and the smallest '
587 'difference will be used; the size of the repository may '
587 'difference will be used; the size of the repository may '
588 'shrink significantly when there are many merges; this '
588 'shrink significantly when there are many merges; this '
589 'optimization will slow down execution in proportion to '
589 'optimization will slow down execution in proportion to '
590 'the number of merges in the repository and the amount '
590 'the number of merges in the repository and the amount '
591 'of files in the repository; this slow down should not '
591 'of files in the repository; this slow down should not '
592 'be significant unless there are tens of thousands of '
592 'be significant unless there are tens of thousands of '
593 'files and thousands of merges'),
593 'files and thousands of merges'),
594 upgrademessage=_('deltas within internal storage will choose an '
594 upgrademessage=_('deltas within internal storage will choose an '
595 'optimal delta by computing deltas against multiple '
595 'optimal delta by computing deltas against multiple '
596 'parents; may slow down execution time '
596 'parents; may slow down execution time '
597 'significantly')))
597 'significantly')))
598
598
599 improvements.append(upgradeimprovement(
599 improvements.append(upgradeimprovement(
600 name='redeltaall',
600 name='redeltaall',
601 type=optimisation,
601 type=optimisation,
602 description=_('deltas within internal storage will always be '
602 description=_('deltas within internal storage will always be '
603 'recalculated without reusing prior deltas; this will '
603 'recalculated without reusing prior deltas; this will '
604 'likely make execution run several times slower; this '
604 'likely make execution run several times slower; this '
605 'optimization is typically not needed'),
605 'optimization is typically not needed'),
606 upgrademessage=_('deltas within internal storage will be fully '
606 upgrademessage=_('deltas within internal storage will be fully '
607 'recomputed; this will likely drastically slow down '
607 'recomputed; this will likely drastically slow down '
608 'execution time')))
608 'execution time')))
609
609
610 return improvements
610 return improvements
611
611
612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
613 optimize):
613 optimize):
614 """Determine upgrade actions that will be performed.
614 """Determine upgrade actions that will be performed.
615
615
616 Given a list of improvements as returned by ``upgradefindimprovements``,
616 Given a list of improvements as returned by ``upgradefindimprovements``,
617 determine the list of upgrade actions that will be performed.
617 determine the list of upgrade actions that will be performed.
618
618
619 The role of this function is to filter improvements if needed, apply
619 The role of this function is to filter improvements if needed, apply
620 recommended optimizations from the improvements list that make sense,
620 recommended optimizations from the improvements list that make sense,
621 etc.
621 etc.
622
622
623 Returns a list of action names.
623 Returns a list of action names.
624 """
624 """
625 newactions = []
625 newactions = []
626
626
627 knownreqs = upgradesupporteddestrequirements(repo)
627 knownreqs = upgradesupporteddestrequirements(repo)
628
628
629 for i in improvements:
629 for i in improvements:
630 name = i.name
630 name = i.name
631
631
632 # If the action is a requirement that doesn't show up in the
632 # If the action is a requirement that doesn't show up in the
633 # destination requirements, prune the action.
633 # destination requirements, prune the action.
634 if name in knownreqs and name not in destreqs:
634 if name in knownreqs and name not in destreqs:
635 continue
635 continue
636
636
637 if i.type == deficiency:
637 if i.type == deficiency:
638 newactions.append(name)
638 newactions.append(name)
639
639
640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
641
641
642 # FUTURE consider adding some optimizations here for certain transitions.
642 # FUTURE consider adding some optimizations here for certain transitions.
643 # e.g. adding generaldelta could schedule parent redeltas.
643 # e.g. adding generaldelta could schedule parent redeltas.
644
644
645 return newactions
645 return newactions
646
646
647 def _revlogfrompath(repo, path):
647 def _revlogfrompath(repo, path):
648 """Obtain a revlog from a repo path.
648 """Obtain a revlog from a repo path.
649
649
650 An instance of the appropriate class is returned.
650 An instance of the appropriate class is returned.
651 """
651 """
652 if path == '00changelog.i':
652 if path == '00changelog.i':
653 return changelog.changelog(repo.svfs)
653 return changelog.changelog(repo.svfs)
654 elif path.endswith('00manifest.i'):
654 elif path.endswith('00manifest.i'):
655 mandir = path[:-len('00manifest.i')]
655 mandir = path[:-len('00manifest.i')]
656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
657 else:
657 else:
658 # Filelogs don't do anything special with settings. So we can use a
658 # Filelogs don't do anything special with settings. So we can use a
659 # vanilla revlog.
659 # vanilla revlog.
660 return revlog.revlog(repo.svfs, path)
660 return revlog.revlog(repo.svfs, path)
661
661
662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
663 """Copy revlogs between 2 repos."""
663 """Copy revlogs between 2 repos."""
664 revcount = 0
664 revcount = 0
665 srcsize = 0
665 srcsize = 0
666 srcrawsize = 0
666 srcrawsize = 0
667 dstsize = 0
667 dstsize = 0
668 fcount = 0
668 fcount = 0
669 frevcount = 0
669 frevcount = 0
670 fsrcsize = 0
670 fsrcsize = 0
671 frawsize = 0
671 frawsize = 0
672 fdstsize = 0
672 fdstsize = 0
673 mcount = 0
673 mcount = 0
674 mrevcount = 0
674 mrevcount = 0
675 msrcsize = 0
675 msrcsize = 0
676 mrawsize = 0
676 mrawsize = 0
677 mdstsize = 0
677 mdstsize = 0
678 crevcount = 0
678 crevcount = 0
679 csrcsize = 0
679 csrcsize = 0
680 crawsize = 0
680 crawsize = 0
681 cdstsize = 0
681 cdstsize = 0
682
682
683 # Perform a pass to collect metadata. This validates we can open all
683 # Perform a pass to collect metadata. This validates we can open all
684 # source files and allows a unified progress bar to be displayed.
684 # source files and allows a unified progress bar to be displayed.
685 for unencoded, encoded, size in srcrepo.store.walk():
685 for unencoded, encoded, size in srcrepo.store.walk():
686 if unencoded.endswith('.d'):
686 if unencoded.endswith('.d'):
687 continue
687 continue
688
688
689 rl = _revlogfrompath(srcrepo, unencoded)
689 rl = _revlogfrompath(srcrepo, unencoded)
690 revcount += len(rl)
690 revcount += len(rl)
691
691
692 datasize = 0
692 datasize = 0
693 rawsize = 0
693 rawsize = 0
694 idx = rl.index
694 idx = rl.index
695 for rev in rl:
695 for rev in rl:
696 e = idx[rev]
696 e = idx[rev]
697 datasize += e[1]
697 datasize += e[1]
698 rawsize += e[2]
698 rawsize += e[2]
699
699
700 srcsize += datasize
700 srcsize += datasize
701 srcrawsize += rawsize
701 srcrawsize += rawsize
702
702
703 # This is for the separate progress bars.
703 # This is for the separate progress bars.
704 if isinstance(rl, changelog.changelog):
704 if isinstance(rl, changelog.changelog):
705 crevcount += len(rl)
705 crevcount += len(rl)
706 csrcsize += datasize
706 csrcsize += datasize
707 crawsize += rawsize
707 crawsize += rawsize
708 elif isinstance(rl, manifest.manifestrevlog):
708 elif isinstance(rl, manifest.manifestrevlog):
709 mcount += 1
709 mcount += 1
710 mrevcount += len(rl)
710 mrevcount += len(rl)
711 msrcsize += datasize
711 msrcsize += datasize
712 mrawsize += rawsize
712 mrawsize += rawsize
713 elif isinstance(rl, revlog.revlog):
713 elif isinstance(rl, revlog.revlog):
714 fcount += 1
714 fcount += 1
715 frevcount += len(rl)
715 frevcount += len(rl)
716 fsrcsize += datasize
716 fsrcsize += datasize
717 frawsize += rawsize
717 frawsize += rawsize
718
718
719 if not revcount:
719 if not revcount:
720 return
720 return
721
721
722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
723 '%d in changelog)\n') %
723 '%d in changelog)\n') %
724 (revcount, frevcount, mrevcount, crevcount))
724 (revcount, frevcount, mrevcount, crevcount))
725 ui.write(_('migrating %s in store; %s tracked data\n') % (
725 ui.write(_('migrating %s in store; %s tracked data\n') % (
726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
727
727
728 # Used to keep track of progress.
728 # Used to keep track of progress.
729 progress = []
729 progress = []
730 def oncopiedrevision(rl, rev, node):
730 def oncopiedrevision(rl, rev, node):
731 progress[1] += 1
731 progress[1] += 1
732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
733
733
734 # Do the actual copying.
734 # Do the actual copying.
735 # FUTURE this operation can be farmed off to worker processes.
735 # FUTURE this operation can be farmed off to worker processes.
736 seen = set()
736 seen = set()
737 for unencoded, encoded, size in srcrepo.store.walk():
737 for unencoded, encoded, size in srcrepo.store.walk():
738 if unencoded.endswith('.d'):
738 if unencoded.endswith('.d'):
739 continue
739 continue
740
740
741 oldrl = _revlogfrompath(srcrepo, unencoded)
741 oldrl = _revlogfrompath(srcrepo, unencoded)
742 newrl = _revlogfrompath(dstrepo, unencoded)
742 newrl = _revlogfrompath(dstrepo, unencoded)
743
743
744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
745 ui.write(_('finished migrating %d manifest revisions across %d '
745 ui.write(_('finished migrating %d manifest revisions across %d '
746 'manifests; change in size: %s\n') %
746 'manifests; change in size: %s\n') %
747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
748
748
749 ui.write(_('migrating changelog containing %d revisions '
749 ui.write(_('migrating changelog containing %d revisions '
750 '(%s in store; %s tracked data)\n') %
750 '(%s in store; %s tracked data)\n') %
751 (crevcount, util.bytecount(csrcsize),
751 (crevcount, util.bytecount(csrcsize),
752 util.bytecount(crawsize)))
752 util.bytecount(crawsize)))
753 seen.add('c')
753 seen.add('c')
754 progress[:] = [_('changelog revisions'), 0, crevcount]
754 progress[:] = [_('changelog revisions'), 0, crevcount]
755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
756 ui.write(_('finished migrating %d filelog revisions across %d '
756 ui.write(_('finished migrating %d filelog revisions across %d '
757 'filelogs; change in size: %s\n') %
757 'filelogs; change in size: %s\n') %
758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
759
759
760 ui.write(_('migrating %d manifests containing %d revisions '
760 ui.write(_('migrating %d manifests containing %d revisions '
761 '(%s in store; %s tracked data)\n') %
761 '(%s in store; %s tracked data)\n') %
762 (mcount, mrevcount, util.bytecount(msrcsize),
762 (mcount, mrevcount, util.bytecount(msrcsize),
763 util.bytecount(mrawsize)))
763 util.bytecount(mrawsize)))
764 seen.add('m')
764 seen.add('m')
765 progress[:] = [_('manifest revisions'), 0, mrevcount]
765 progress[:] = [_('manifest revisions'), 0, mrevcount]
766 elif 'f' not in seen:
766 elif 'f' not in seen:
767 ui.write(_('migrating %d filelogs containing %d revisions '
767 ui.write(_('migrating %d filelogs containing %d revisions '
768 '(%s in store; %s tracked data)\n') %
768 '(%s in store; %s tracked data)\n') %
769 (fcount, frevcount, util.bytecount(fsrcsize),
769 (fcount, frevcount, util.bytecount(fsrcsize),
770 util.bytecount(frawsize)))
770 util.bytecount(frawsize)))
771 seen.add('f')
771 seen.add('f')
772 progress[:] = [_('file revisions'), 0, frevcount]
772 progress[:] = [_('file revisions'), 0, frevcount]
773
773
774 ui.progress(progress[0], progress[1], total=progress[2])
774 ui.progress(progress[0], progress[1], total=progress[2])
775
775
776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
778 deltareuse=deltareuse,
778 deltareuse=deltareuse,
779 aggressivemergedeltas=aggressivemergedeltas)
779 aggressivemergedeltas=aggressivemergedeltas)
780
780
781 datasize = 0
781 datasize = 0
782 idx = newrl.index
782 idx = newrl.index
783 for rev in newrl:
783 for rev in newrl:
784 datasize += idx[rev][1]
784 datasize += idx[rev][1]
785
785
786 dstsize += datasize
786 dstsize += datasize
787
787
788 if isinstance(newrl, changelog.changelog):
788 if isinstance(newrl, changelog.changelog):
789 cdstsize += datasize
789 cdstsize += datasize
790 elif isinstance(newrl, manifest.manifestrevlog):
790 elif isinstance(newrl, manifest.manifestrevlog):
791 mdstsize += datasize
791 mdstsize += datasize
792 else:
792 else:
793 fdstsize += datasize
793 fdstsize += datasize
794
794
795 ui.progress(progress[0], None)
795 ui.progress(progress[0], None)
796
796
797 ui.write(_('finished migrating %d changelog revisions; change in size: '
797 ui.write(_('finished migrating %d changelog revisions; change in size: '
798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
799
799
800 ui.write(_('finished migrating %d total revisions; total change in store '
800 ui.write(_('finished migrating %d total revisions; total change in store '
801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
802
802
803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
804 """Determine whether to copy a store file during upgrade.
804 """Determine whether to copy a store file during upgrade.
805
805
806 This function is called when migrating store files from ``srcrepo`` to
806 This function is called when migrating store files from ``srcrepo`` to
807 ``dstrepo`` as part of upgrading a repository.
807 ``dstrepo`` as part of upgrading a repository.
808
808
809 Args:
809 Args:
810 srcrepo: repo we are copying from
810 srcrepo: repo we are copying from
811 dstrepo: repo we are copying to
811 dstrepo: repo we are copying to
812 requirements: set of requirements for ``dstrepo``
812 requirements: set of requirements for ``dstrepo``
813 path: store file being examined
813 path: store file being examined
814 mode: the ``ST_MODE`` file type of ``path``
814 mode: the ``ST_MODE`` file type of ``path``
815 st: ``stat`` data structure for ``path``
815 st: ``stat`` data structure for ``path``
816
816
817 Function should return ``True`` if the file is to be copied.
817 Function should return ``True`` if the file is to be copied.
818 """
818 """
819 # Skip revlogs.
819 # Skip revlogs.
820 if path.endswith(('.i', '.d')):
820 if path.endswith(('.i', '.d')):
821 return False
821 return False
822 # Skip transaction related files.
822 # Skip transaction related files.
823 if path.startswith('undo'):
823 if path.startswith('undo'):
824 return False
824 return False
825 # Only copy regular files.
825 # Only copy regular files.
826 if mode != stat.S_IFREG:
826 if mode != stat.S_IFREG:
827 return False
827 return False
828 # Skip other skipped files.
828 # Skip other skipped files.
829 if path in ('lock', 'fncache'):
829 if path in ('lock', 'fncache'):
830 return False
830 return False
831
831
832 return True
832 return True
833
833
834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
835 """Hook point for extensions to perform additional actions during upgrade.
835 """Hook point for extensions to perform additional actions during upgrade.
836
836
837 This function is called after revlogs and store files have been copied but
837 This function is called after revlogs and store files have been copied but
838 before the new store is swapped into the original location.
838 before the new store is swapped into the original location.
839 """
839 """
840
840
841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
842 """Do the low-level work of upgrading a repository.
842 """Do the low-level work of upgrading a repository.
843
843
844 The upgrade is effectively performed as a copy between a source
844 The upgrade is effectively performed as a copy between a source
845 repository and a temporary destination repository.
845 repository and a temporary destination repository.
846
846
847 The source repository is unmodified for as long as possible so the
847 The source repository is unmodified for as long as possible so the
848 upgrade can abort at any time without causing loss of service for
848 upgrade can abort at any time without causing loss of service for
849 readers and without corrupting the source repository.
849 readers and without corrupting the source repository.
850 """
850 """
851 assert srcrepo.currentwlock()
851 assert srcrepo.currentwlock()
852 assert dstrepo.currentwlock()
852 assert dstrepo.currentwlock()
853
853
854 ui.write(_('(it is safe to interrupt this process any time before '
854 ui.write(_('(it is safe to interrupt this process any time before '
855 'data migration completes)\n'))
855 'data migration completes)\n'))
856
856
857 if 'redeltaall' in actions:
857 if 'redeltaall' in actions:
858 deltareuse = revlog.revlog.DELTAREUSENEVER
858 deltareuse = revlog.revlog.DELTAREUSENEVER
859 elif 'redeltaparent' in actions:
859 elif 'redeltaparent' in actions:
860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
861 elif 'redeltamultibase' in actions:
861 elif 'redeltamultibase' in actions:
862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
863 else:
863 else:
864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
865
865
866 with dstrepo.transaction('upgrade') as tr:
866 with dstrepo.transaction('upgrade') as tr:
867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
868 'redeltamultibase' in actions)
868 'redeltamultibase' in actions)
869
869
870 # Now copy other files in the store directory.
870 # Now copy other files in the store directory.
871 # The sorted() makes execution deterministic.
871 # The sorted() makes execution deterministic.
872 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
872 for p, kind, st in sorted(srcrepo.store.vfs.readdir('', stat=True)):
873 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
873 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
874 p, kind, st):
874 p, kind, st):
875 continue
875 continue
876
876
877 srcrepo.ui.write(_('copying %s\n') % p)
877 srcrepo.ui.write(_('copying %s\n') % p)
878 src = srcrepo.store.vfs.join(p)
878 src = srcrepo.store.rawvfs.join(p)
879 dst = dstrepo.store.vfs.join(p)
879 dst = dstrepo.store.rawvfs.join(p)
880 util.copyfile(src, dst, copystat=True)
880 util.copyfile(src, dst, copystat=True)
881
881
882 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
882 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
883
883
884 ui.write(_('data fully migrated to temporary repository\n'))
884 ui.write(_('data fully migrated to temporary repository\n'))
885
885
886 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
886 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
887 backupvfs = scmutil.vfs(backuppath)
887 backupvfs = scmutil.vfs(backuppath)
888
888
889 # Make a backup of requires file first, as it is the first to be modified.
889 # Make a backup of requires file first, as it is the first to be modified.
890 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
890 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
891
891
892 # We install an arbitrary requirement that clients must not support
892 # We install an arbitrary requirement that clients must not support
893 # as a mechanism to lock out new clients during the data swap. This is
893 # as a mechanism to lock out new clients during the data swap. This is
894 # better than allowing a client to continue while the repository is in
894 # better than allowing a client to continue while the repository is in
895 # an inconsistent state.
895 # an inconsistent state.
896 ui.write(_('marking source repository as being upgraded; clients will be '
896 ui.write(_('marking source repository as being upgraded; clients will be '
897 'unable to read from repository\n'))
897 'unable to read from repository\n'))
898 scmutil.writerequires(srcrepo.vfs,
898 scmutil.writerequires(srcrepo.vfs,
899 srcrepo.requirements | set(['upgradeinprogress']))
899 srcrepo.requirements | set(['upgradeinprogress']))
900
900
901 ui.write(_('starting in-place swap of repository data\n'))
901 ui.write(_('starting in-place swap of repository data\n'))
902 ui.write(_('replaced files will be backed up at %s\n') %
902 ui.write(_('replaced files will be backed up at %s\n') %
903 backuppath)
903 backuppath)
904
904
905 # Now swap in the new store directory. Doing it as a rename should make
905 # Now swap in the new store directory. Doing it as a rename should make
906 # the operation nearly instantaneous and atomic (at least in well-behaved
906 # the operation nearly instantaneous and atomic (at least in well-behaved
907 # environments).
907 # environments).
908 ui.write(_('replacing store...\n'))
908 ui.write(_('replacing store...\n'))
909 tstart = time.time()
909 tstart = time.time()
910 util.rename(srcrepo.spath, backupvfs.join('store'))
910 util.rename(srcrepo.spath, backupvfs.join('store'))
911 util.rename(dstrepo.spath, srcrepo.spath)
911 util.rename(dstrepo.spath, srcrepo.spath)
912 elapsed = time.time() - tstart
912 elapsed = time.time() - tstart
913 ui.write(_('store replacement complete; repository was inconsistent for '
913 ui.write(_('store replacement complete; repository was inconsistent for '
914 '%0.1fs\n') % elapsed)
914 '%0.1fs\n') % elapsed)
915
915
916 # We first write the requirements file. Any new requirements will lock
916 # We first write the requirements file. Any new requirements will lock
917 # out legacy clients.
917 # out legacy clients.
918 ui.write(_('finalizing requirements file and making repository readable '
918 ui.write(_('finalizing requirements file and making repository readable '
919 'again\n'))
919 'again\n'))
920 scmutil.writerequires(srcrepo.vfs, requirements)
920 scmutil.writerequires(srcrepo.vfs, requirements)
921
921
922 # The lock file from the old store won't be removed because nothing has a
922 # The lock file from the old store won't be removed because nothing has a
923 # reference to its new location. So clean it up manually. Alternatively, we
923 # reference to its new location. So clean it up manually. Alternatively, we
924 # could update srcrepo.svfs and other variables to point to the new
924 # could update srcrepo.svfs and other variables to point to the new
925 # location. This is simpler.
925 # location. This is simpler.
926 backupvfs.unlink('store/lock')
926 backupvfs.unlink('store/lock')
927
927
928 return backuppath
928 return backuppath
929
929
930 def upgraderepo(ui, repo, run=False, optimize=None):
930 def upgraderepo(ui, repo, run=False, optimize=None):
931 """Upgrade a repository in place."""
931 """Upgrade a repository in place."""
932 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
932 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
933 from . import localrepo
933 from . import localrepo
934
934
935 optimize = set(optimize or [])
935 optimize = set(optimize or [])
936 repo = repo.unfiltered()
936 repo = repo.unfiltered()
937
937
938 # Ensure the repository can be upgraded.
938 # Ensure the repository can be upgraded.
939 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
939 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
940 if missingreqs:
940 if missingreqs:
941 raise error.Abort(_('cannot upgrade repository; requirement '
941 raise error.Abort(_('cannot upgrade repository; requirement '
942 'missing: %s') % _(', ').join(sorted(missingreqs)))
942 'missing: %s') % _(', ').join(sorted(missingreqs)))
943
943
944 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
944 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
945 if blockedreqs:
945 if blockedreqs:
946 raise error.Abort(_('cannot upgrade repository; unsupported source '
946 raise error.Abort(_('cannot upgrade repository; unsupported source '
947 'requirement: %s') %
947 'requirement: %s') %
948 _(', ').join(sorted(blockedreqs)))
948 _(', ').join(sorted(blockedreqs)))
949
949
950 # FUTURE there is potentially a need to control the wanted requirements via
950 # FUTURE there is potentially a need to control the wanted requirements via
951 # command arguments or via an extension hook point.
951 # command arguments or via an extension hook point.
952 newreqs = localrepo.newreporequirements(repo)
952 newreqs = localrepo.newreporequirements(repo)
953
953
954 noremovereqs = (repo.requirements - newreqs -
954 noremovereqs = (repo.requirements - newreqs -
955 upgradesupportremovedrequirements(repo))
955 upgradesupportremovedrequirements(repo))
956 if noremovereqs:
956 if noremovereqs:
957 raise error.Abort(_('cannot upgrade repository; requirement would be '
957 raise error.Abort(_('cannot upgrade repository; requirement would be '
958 'removed: %s') % _(', ').join(sorted(noremovereqs)))
958 'removed: %s') % _(', ').join(sorted(noremovereqs)))
959
959
960 noaddreqs = (newreqs - repo.requirements -
960 noaddreqs = (newreqs - repo.requirements -
961 upgradeallowednewrequirements(repo))
961 upgradeallowednewrequirements(repo))
962 if noaddreqs:
962 if noaddreqs:
963 raise error.Abort(_('cannot upgrade repository; do not support adding '
963 raise error.Abort(_('cannot upgrade repository; do not support adding '
964 'requirement: %s') %
964 'requirement: %s') %
965 _(', ').join(sorted(noaddreqs)))
965 _(', ').join(sorted(noaddreqs)))
966
966
967 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
967 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
968 if unsupportedreqs:
968 if unsupportedreqs:
969 raise error.Abort(_('cannot upgrade repository; do not support '
969 raise error.Abort(_('cannot upgrade repository; do not support '
970 'destination requirement: %s') %
970 'destination requirement: %s') %
971 _(', ').join(sorted(unsupportedreqs)))
971 _(', ').join(sorted(unsupportedreqs)))
972
972
973 # Find and validate all improvements that can be made.
973 # Find and validate all improvements that can be made.
974 improvements = upgradefindimprovements(repo)
974 improvements = upgradefindimprovements(repo)
975 for i in improvements:
975 for i in improvements:
976 if i.type not in (deficiency, optimisation):
976 if i.type not in (deficiency, optimisation):
977 raise error.Abort(_('unexpected improvement type %s for %s') % (
977 raise error.Abort(_('unexpected improvement type %s for %s') % (
978 i.type, i.name))
978 i.type, i.name))
979
979
980 # Validate arguments.
980 # Validate arguments.
981 unknownoptimize = optimize - set(i.name for i in improvements
981 unknownoptimize = optimize - set(i.name for i in improvements
982 if i.type == optimisation)
982 if i.type == optimisation)
983 if unknownoptimize:
983 if unknownoptimize:
984 raise error.Abort(_('unknown optimization action requested: %s') %
984 raise error.Abort(_('unknown optimization action requested: %s') %
985 ', '.join(sorted(unknownoptimize)),
985 ', '.join(sorted(unknownoptimize)),
986 hint=_('run without arguments to see valid '
986 hint=_('run without arguments to see valid '
987 'optimizations'))
987 'optimizations'))
988
988
989 actions = upgradedetermineactions(repo, improvements, repo.requirements,
989 actions = upgradedetermineactions(repo, improvements, repo.requirements,
990 newreqs, optimize)
990 newreqs, optimize)
991
991
992 def printrequirements():
992 def printrequirements():
993 ui.write(_('requirements\n'))
993 ui.write(_('requirements\n'))
994 ui.write(_(' preserved: %s\n') %
994 ui.write(_(' preserved: %s\n') %
995 _(', ').join(sorted(newreqs & repo.requirements)))
995 _(', ').join(sorted(newreqs & repo.requirements)))
996
996
997 if repo.requirements - newreqs:
997 if repo.requirements - newreqs:
998 ui.write(_(' removed: %s\n') %
998 ui.write(_(' removed: %s\n') %
999 _(', ').join(sorted(repo.requirements - newreqs)))
999 _(', ').join(sorted(repo.requirements - newreqs)))
1000
1000
1001 if newreqs - repo.requirements:
1001 if newreqs - repo.requirements:
1002 ui.write(_(' added: %s\n') %
1002 ui.write(_(' added: %s\n') %
1003 _(', ').join(sorted(newreqs - repo.requirements)))
1003 _(', ').join(sorted(newreqs - repo.requirements)))
1004
1004
1005 ui.write('\n')
1005 ui.write('\n')
1006
1006
1007 def printupgradeactions():
1007 def printupgradeactions():
1008 for action in actions:
1008 for action in actions:
1009 for i in improvements:
1009 for i in improvements:
1010 if i.name == action:
1010 if i.name == action:
1011 ui.write('%s\n %s\n\n' %
1011 ui.write('%s\n %s\n\n' %
1012 (i.name, i.upgrademessage))
1012 (i.name, i.upgrademessage))
1013
1013
1014 if not run:
1014 if not run:
1015 fromdefault = []
1015 fromdefault = []
1016 fromconfig = []
1016 fromconfig = []
1017 optimizations = []
1017 optimizations = []
1018
1018
1019 for i in improvements:
1019 for i in improvements:
1020 assert i.type in (deficiency, optimisation)
1020 assert i.type in (deficiency, optimisation)
1021 if i.type == deficiency:
1021 if i.type == deficiency:
1022 if i.fromdefault:
1022 if i.fromdefault:
1023 fromdefault.append(i)
1023 fromdefault.append(i)
1024 if i.fromconfig:
1024 if i.fromconfig:
1025 fromconfig.append(i)
1025 fromconfig.append(i)
1026 else:
1026 else:
1027 optimizations.append(i)
1027 optimizations.append(i)
1028
1028
1029 if fromdefault or fromconfig:
1029 if fromdefault or fromconfig:
1030 fromconfignames = set(x.name for x in fromconfig)
1030 fromconfignames = set(x.name for x in fromconfig)
1031 onlydefault = [i for i in fromdefault
1031 onlydefault = [i for i in fromdefault
1032 if i.name not in fromconfignames]
1032 if i.name not in fromconfignames]
1033
1033
1034 if fromconfig:
1034 if fromconfig:
1035 ui.write(_('repository lacks features recommended by '
1035 ui.write(_('repository lacks features recommended by '
1036 'current config options:\n\n'))
1036 'current config options:\n\n'))
1037 for i in fromconfig:
1037 for i in fromconfig:
1038 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038 ui.write('%s\n %s\n\n' % (i.name, i.description))
1039
1039
1040 if onlydefault:
1040 if onlydefault:
1041 ui.write(_('repository lacks features used by the default '
1041 ui.write(_('repository lacks features used by the default '
1042 'config options:\n\n'))
1042 'config options:\n\n'))
1043 for i in onlydefault:
1043 for i in onlydefault:
1044 ui.write('%s\n %s\n\n' % (i.name, i.description))
1044 ui.write('%s\n %s\n\n' % (i.name, i.description))
1045
1045
1046 ui.write('\n')
1046 ui.write('\n')
1047 else:
1047 else:
1048 ui.write(_('(no feature deficiencies found in existing '
1048 ui.write(_('(no feature deficiencies found in existing '
1049 'repository)\n'))
1049 'repository)\n'))
1050
1050
1051 ui.write(_('performing an upgrade with "--run" will make the following '
1051 ui.write(_('performing an upgrade with "--run" will make the following '
1052 'changes:\n\n'))
1052 'changes:\n\n'))
1053
1053
1054 printrequirements()
1054 printrequirements()
1055 printupgradeactions()
1055 printupgradeactions()
1056
1056
1057 unusedoptimize = [i for i in improvements
1057 unusedoptimize = [i for i in improvements
1058 if i.name not in actions and i.type == optimisation]
1058 if i.name not in actions and i.type == optimisation]
1059 if unusedoptimize:
1059 if unusedoptimize:
1060 ui.write(_('additional optimizations are available by specifying '
1060 ui.write(_('additional optimizations are available by specifying '
1061 '"--optimize <name>":\n\n'))
1061 '"--optimize <name>":\n\n'))
1062 for i in unusedoptimize:
1062 for i in unusedoptimize:
1063 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1063 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1064 return
1064 return
1065
1065
1066 # Else we're in the run=true case.
1066 # Else we're in the run=true case.
1067 ui.write(_('upgrade will perform the following actions:\n\n'))
1067 ui.write(_('upgrade will perform the following actions:\n\n'))
1068 printrequirements()
1068 printrequirements()
1069 printupgradeactions()
1069 printupgradeactions()
1070
1070
1071 ui.write(_('beginning upgrade...\n'))
1071 ui.write(_('beginning upgrade...\n'))
1072 with repo.wlock():
1072 with repo.wlock():
1073 with repo.lock():
1073 with repo.lock():
1074 ui.write(_('repository locked and read-only\n'))
1074 ui.write(_('repository locked and read-only\n'))
1075 # Our strategy for upgrading the repository is to create a new,
1075 # Our strategy for upgrading the repository is to create a new,
1076 # temporary repository, write data to it, then do a swap of the
1076 # temporary repository, write data to it, then do a swap of the
1077 # data. There are less heavyweight ways to do this, but it is easier
1077 # data. There are less heavyweight ways to do this, but it is easier
1078 # to create a new repo object than to instantiate all the components
1078 # to create a new repo object than to instantiate all the components
1079 # (like the store) separately.
1079 # (like the store) separately.
1080 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1080 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1081 backuppath = None
1081 backuppath = None
1082 try:
1082 try:
1083 ui.write(_('creating temporary repository to stage migrated '
1083 ui.write(_('creating temporary repository to stage migrated '
1084 'data: %s\n') % tmppath)
1084 'data: %s\n') % tmppath)
1085 dstrepo = localrepo.localrepository(repo.baseui,
1085 dstrepo = localrepo.localrepository(repo.baseui,
1086 path=tmppath,
1086 path=tmppath,
1087 create=True)
1087 create=True)
1088
1088
1089 with dstrepo.wlock():
1089 with dstrepo.wlock():
1090 with dstrepo.lock():
1090 with dstrepo.lock():
1091 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1091 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1092 actions)
1092 actions)
1093
1093
1094 finally:
1094 finally:
1095 ui.write(_('removing temporary repository %s\n') % tmppath)
1095 ui.write(_('removing temporary repository %s\n') % tmppath)
1096 repo.vfs.rmtree(tmppath, forcibly=True)
1096 repo.vfs.rmtree(tmppath, forcibly=True)
1097
1097
1098 if backuppath:
1098 if backuppath:
1099 ui.warn(_('copy of old repository backed up at %s\n') %
1099 ui.warn(_('copy of old repository backed up at %s\n') %
1100 backuppath)
1100 backuppath)
1101 ui.warn(_('the old repository will not be deleted; remove '
1101 ui.warn(_('the old repository will not be deleted; remove '
1102 'it to free up disk space once the upgraded '
1102 'it to free up disk space once the upgraded '
1103 'repository is verified\n'))
1103 'repository is verified\n'))
@@ -1,346 +1,354 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > share =
3 > share =
4 > EOF
4 > EOF
5
5
6 store and revlogv1 are required in source
6 store and revlogv1 are required in source
7
7
8 $ hg --config format.usestore=false init no-store
8 $ hg --config format.usestore=false init no-store
9 $ hg -R no-store debugupgraderepo
9 $ hg -R no-store debugupgraderepo
10 abort: cannot upgrade repository; requirement missing: store
10 abort: cannot upgrade repository; requirement missing: store
11 [255]
11 [255]
12
12
13 $ hg init no-revlogv1
13 $ hg init no-revlogv1
14 $ cat > no-revlogv1/.hg/requires << EOF
14 $ cat > no-revlogv1/.hg/requires << EOF
15 > dotencode
15 > dotencode
16 > fncache
16 > fncache
17 > generaldelta
17 > generaldelta
18 > store
18 > store
19 > EOF
19 > EOF
20
20
21 $ hg -R no-revlogv1 debugupgraderepo
21 $ hg -R no-revlogv1 debugupgraderepo
22 abort: cannot upgrade repository; requirement missing: revlogv1
22 abort: cannot upgrade repository; requirement missing: revlogv1
23 [255]
23 [255]
24
24
25 Cannot upgrade shared repositories
25 Cannot upgrade shared repositories
26
26
27 $ hg init share-parent
27 $ hg init share-parent
28 $ hg -q share share-parent share-child
28 $ hg -q share share-parent share-child
29
29
30 $ hg -R share-child debugupgraderepo
30 $ hg -R share-child debugupgraderepo
31 abort: cannot upgrade repository; unsupported source requirement: shared
31 abort: cannot upgrade repository; unsupported source requirement: shared
32 [255]
32 [255]
33
33
34 Do not yet support upgrading manifestv2 and treemanifest repos
34 Do not yet support upgrading manifestv2 and treemanifest repos
35
35
36 $ hg --config experimental.manifestv2=true init manifestv2
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
39 [255]
40
40
41 $ hg --config experimental.treemanifest=true init treemanifest
41 $ hg --config experimental.treemanifest=true init treemanifest
42 $ hg -R treemanifest debugupgraderepo
42 $ hg -R treemanifest debugupgraderepo
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 [255]
44 [255]
45
45
46 Cannot add manifestv2 or treemanifest requirement during upgrade
46 Cannot add manifestv2 or treemanifest requirement during upgrade
47
47
48 $ hg init disallowaddedreq
48 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 [255]
51 [255]
52
52
53 An upgrade of a repository created with recommended settings only suggests optimizations
53 An upgrade of a repository created with recommended settings only suggests optimizations
54
54
55 $ hg init empty
55 $ hg init empty
56 $ cd empty
56 $ cd empty
57 $ hg debugupgraderepo
57 $ hg debugupgraderepo
58 (no feature deficiencies found in existing repository)
58 (no feature deficiencies found in existing repository)
59 performing an upgrade with "--run" will make the following changes:
59 performing an upgrade with "--run" will make the following changes:
60
60
61 requirements
61 requirements
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63
63
64 additional optimizations are available by specifying "--optimize <name>":
64 additional optimizations are available by specifying "--optimize <name>":
65
65
66 redeltaparent
66 redeltaparent
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68
68
69 redeltamultibase
69 redeltamultibase
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71
71
72 redeltaall
72 redeltaall
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74
74
75
75
76 --optimize can be used to add optimizations
76 --optimize can be used to add optimizations
77
77
78 $ hg debugupgrade --optimize redeltaparent
78 $ hg debugupgrade --optimize redeltaparent
79 (no feature deficiencies found in existing repository)
79 (no feature deficiencies found in existing repository)
80 performing an upgrade with "--run" will make the following changes:
80 performing an upgrade with "--run" will make the following changes:
81
81
82 requirements
82 requirements
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84
84
85 redeltaparent
85 redeltaparent
86 deltas within internal storage will choose a new base revision if needed
86 deltas within internal storage will choose a new base revision if needed
87
87
88 additional optimizations are available by specifying "--optimize <name>":
88 additional optimizations are available by specifying "--optimize <name>":
89
89
90 redeltamultibase
90 redeltamultibase
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92
92
93 redeltaall
93 redeltaall
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95
95
96
96
97 Various sub-optimal detections work
97 Various sub-optimal detections work
98
98
99 $ cat > .hg/requires << EOF
99 $ cat > .hg/requires << EOF
100 > revlogv1
100 > revlogv1
101 > store
101 > store
102 > EOF
102 > EOF
103
103
104 $ hg debugupgraderepo
104 $ hg debugupgraderepo
105 repository lacks features recommended by current config options:
105 repository lacks features recommended by current config options:
106
106
107 fncache
107 fncache
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109
109
110 dotencode
110 dotencode
111 storage of filenames beginning with a period or space may not work correctly
111 storage of filenames beginning with a period or space may not work correctly
112
112
113 generaldelta
113 generaldelta
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115
115
116
116
117 performing an upgrade with "--run" will make the following changes:
117 performing an upgrade with "--run" will make the following changes:
118
118
119 requirements
119 requirements
120 preserved: revlogv1, store
120 preserved: revlogv1, store
121 added: dotencode, fncache, generaldelta
121 added: dotencode, fncache, generaldelta
122
122
123 fncache
123 fncache
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125
125
126 dotencode
126 dotencode
127 repository will be better able to store files beginning with a space or period
127 repository will be better able to store files beginning with a space or period
128
128
129 generaldelta
129 generaldelta
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131
131
132 additional optimizations are available by specifying "--optimize <name>":
132 additional optimizations are available by specifying "--optimize <name>":
133
133
134 redeltaparent
134 redeltaparent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
136
137 redeltamultibase
137 redeltamultibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
139
140 redeltaall
140 redeltaall
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
142
143
143
144 $ hg --config format.dotencode=false debugupgraderepo
144 $ hg --config format.dotencode=false debugupgraderepo
145 repository lacks features recommended by current config options:
145 repository lacks features recommended by current config options:
146
146
147 fncache
147 fncache
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149
149
150 generaldelta
150 generaldelta
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152
152
153 repository lacks features used by the default config options:
153 repository lacks features used by the default config options:
154
154
155 dotencode
155 dotencode
156 storage of filenames beginning with a period or space may not work correctly
156 storage of filenames beginning with a period or space may not work correctly
157
157
158
158
159 performing an upgrade with "--run" will make the following changes:
159 performing an upgrade with "--run" will make the following changes:
160
160
161 requirements
161 requirements
162 preserved: revlogv1, store
162 preserved: revlogv1, store
163 added: fncache, generaldelta
163 added: fncache, generaldelta
164
164
165 fncache
165 fncache
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167
167
168 generaldelta
168 generaldelta
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170
170
171 additional optimizations are available by specifying "--optimize <name>":
171 additional optimizations are available by specifying "--optimize <name>":
172
172
173 redeltaparent
173 redeltaparent
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175
175
176 redeltamultibase
176 redeltamultibase
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178
178
179 redeltaall
179 redeltaall
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181
181
182
182
183 $ cd ..
183 $ cd ..
184
184
185 Upgrading a repository that is already modern essentially no-ops
185 Upgrading a repository that is already modern essentially no-ops
186
186
187 $ hg init modern
187 $ hg init modern
188 $ hg -R modern debugupgraderepo --run
188 $ hg -R modern debugupgraderepo --run
189 upgrade will perform the following actions:
189 upgrade will perform the following actions:
190
190
191 requirements
191 requirements
192 preserved: dotencode, fncache, generaldelta, revlogv1, store
192 preserved: dotencode, fncache, generaldelta, revlogv1, store
193
193
194 beginning upgrade...
194 beginning upgrade...
195 repository locked and read-only
195 repository locked and read-only
196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
196 creating temporary repository to stage migrated data: $TESTTMP/modern/.hg/upgrade.* (glob)
197 (it is safe to interrupt this process any time before data migration completes)
197 (it is safe to interrupt this process any time before data migration completes)
198 data fully migrated to temporary repository
198 data fully migrated to temporary repository
199 marking source repository as being upgraded; clients will be unable to read from repository
199 marking source repository as being upgraded; clients will be unable to read from repository
200 starting in-place swap of repository data
200 starting in-place swap of repository data
201 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
201 replaced files will be backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
202 replacing store...
202 replacing store...
203 store replacement complete; repository was inconsistent for *s (glob)
203 store replacement complete; repository was inconsistent for *s (glob)
204 finalizing requirements file and making repository readable again
204 finalizing requirements file and making repository readable again
205 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
205 removing temporary repository $TESTTMP/modern/.hg/upgrade.* (glob)
206 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
206 copy of old repository backed up at $TESTTMP/modern/.hg/upgradebackup.* (glob)
207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
207 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
208
208
209 Upgrading a repository to generaldelta works
209 Upgrading a repository to generaldelta works
210
210
211 $ hg --config format.usegeneraldelta=false init upgradegd
211 $ hg --config format.usegeneraldelta=false init upgradegd
212 $ cd upgradegd
212 $ cd upgradegd
213 $ touch f0
213 $ touch f0
214 $ hg -q commit -A -m initial
214 $ hg -q commit -A -m initial
215 $ touch f1
215 $ touch f1
216 $ hg -q commit -A -m 'add f1'
216 $ hg -q commit -A -m 'add f1'
217 $ hg -q up -r 0
217 $ hg -q up -r 0
218 $ touch f2
218 $ touch f2
219 $ hg -q commit -A -m 'add f2'
219 $ hg -q commit -A -m 'add f2'
220
220
221 $ hg debugupgraderepo --run
221 $ hg debugupgraderepo --run
222 upgrade will perform the following actions:
222 upgrade will perform the following actions:
223
223
224 requirements
224 requirements
225 preserved: dotencode, fncache, revlogv1, store
225 preserved: dotencode, fncache, revlogv1, store
226 added: generaldelta
226 added: generaldelta
227
227
228 generaldelta
228 generaldelta
229 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
229 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
230
230
231 beginning upgrade...
231 beginning upgrade...
232 repository locked and read-only
232 repository locked and read-only
233 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
233 creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
234 (it is safe to interrupt this process any time before data migration completes)
234 (it is safe to interrupt this process any time before data migration completes)
235 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
235 migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
236 migrating 341 bytes in store; 401 bytes tracked data
236 migrating 341 bytes in store; 401 bytes tracked data
237 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
237 migrating 3 filelogs containing 3 revisions (0 bytes in store; 0 bytes tracked data)
238 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
238 finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
239 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
239 migrating 1 manifests containing 3 revisions (157 bytes in store; 220 bytes tracked data)
240 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
240 finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
241 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
241 migrating changelog containing 3 revisions (184 bytes in store; 181 bytes tracked data)
242 finished migrating 3 changelog revisions; change in size: 0 bytes
242 finished migrating 3 changelog revisions; change in size: 0 bytes
243 finished migrating 9 total revisions; total change in store size: 0 bytes
243 finished migrating 9 total revisions; total change in store size: 0 bytes
244 copying phaseroots
244 copying phaseroots
245 data fully migrated to temporary repository
245 data fully migrated to temporary repository
246 marking source repository as being upgraded; clients will be unable to read from repository
246 marking source repository as being upgraded; clients will be unable to read from repository
247 starting in-place swap of repository data
247 starting in-place swap of repository data
248 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
248 replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
249 replacing store...
249 replacing store...
250 store replacement complete; repository was inconsistent for *s (glob)
250 store replacement complete; repository was inconsistent for *s (glob)
251 finalizing requirements file and making repository readable again
251 finalizing requirements file and making repository readable again
252 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
252 removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
253 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
253 copy of old repository backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
254 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
254 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
255
255
256 Original requirements backed up
256 Original requirements backed up
257
257
258 $ cat .hg/upgradebackup.*/requires
258 $ cat .hg/upgradebackup.*/requires
259 dotencode
259 dotencode
260 fncache
260 fncache
261 revlogv1
261 revlogv1
262 store
262 store
263
263
264 generaldelta added to original requirements files
264 generaldelta added to original requirements files
265
265
266 $ cat .hg/requires
266 $ cat .hg/requires
267 dotencode
267 dotencode
268 fncache
268 fncache
269 generaldelta
269 generaldelta
270 revlogv1
270 revlogv1
271 store
271 store
272
272
273 store directory has files we expect
273 store directory has files we expect
274
274
275 $ ls .hg/store
275 $ ls .hg/store
276 00changelog.i
276 00changelog.i
277 00manifest.i
277 00manifest.i
278 data
278 data
279 fncache
279 fncache
280 phaseroots
280 phaseroots
281 undo
281 undo
282 undo.backupfiles
282 undo.backupfiles
283 undo.phaseroots
283 undo.phaseroots
284
284
285 manifest should be generaldelta
285 manifest should be generaldelta
286
286
287 $ hg debugrevlog -m | grep flags
287 $ hg debugrevlog -m | grep flags
288 flags : inline, generaldelta
288 flags : inline, generaldelta
289
289
290 verify should be happy
290 verify should be happy
291
291
292 $ hg verify
292 $ hg verify
293 checking changesets
293 checking changesets
294 checking manifests
294 checking manifests
295 crosschecking files in changesets and manifests
295 crosschecking files in changesets and manifests
296 checking files
296 checking files
297 3 files, 3 changesets, 3 total revisions
297 3 files, 3 changesets, 3 total revisions
298
298
299 old store should be backed up
299 old store should be backed up
300
300
301 $ ls .hg/upgradebackup.*/store
301 $ ls .hg/upgradebackup.*/store
302 00changelog.i
302 00changelog.i
303 00manifest.i
303 00manifest.i
304 data
304 data
305 fncache
305 fncache
306 phaseroots
306 phaseroots
307 undo
307 undo
308 undo.backup.fncache
308 undo.backup.fncache
309 undo.backupfiles
309 undo.backupfiles
310 undo.phaseroots
310 undo.phaseroots
311
311
312 $ cd ..
312 $ cd ..
313
313
314 store files with special filenames aren't encoded during copy
314 store files with special filenames aren't encoded during copy
315
315
316 $ hg init store-filenames
316 $ hg init store-filenames
317 $ cd store-filenames
317 $ cd store-filenames
318 $ touch foo
318 $ touch foo
319 $ hg -q commit -A -m initial
319 $ hg -q commit -A -m initial
320 $ touch .hg/store/.XX_special_filename
320 $ touch .hg/store/.XX_special_filename
321
321
322 $ hg debugupgraderepo --run
322 $ hg debugupgraderepo --run
323 upgrade will perform the following actions:
323 upgrade will perform the following actions:
324
324
325 requirements
325 requirements
326 preserved: dotencode, fncache, generaldelta, revlogv1, store
326 preserved: dotencode, fncache, generaldelta, revlogv1, store
327
327
328 beginning upgrade...
328 beginning upgrade...
329 repository locked and read-only
329 repository locked and read-only
330 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
330 creating temporary repository to stage migrated data: $TESTTMP/store-filenames/.hg/upgrade.* (glob)
331 (it is safe to interrupt this process any time before data migration completes)
331 (it is safe to interrupt this process any time before data migration completes)
332 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
332 migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
333 migrating 109 bytes in store; 107 bytes tracked data
333 migrating 109 bytes in store; 107 bytes tracked data
334 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
334 migrating 1 filelogs containing 1 revisions (0 bytes in store; 0 bytes tracked data)
335 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
335 finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
336 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
336 migrating 1 manifests containing 1 revisions (46 bytes in store; 45 bytes tracked data)
337 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
337 finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
338 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
338 migrating changelog containing 1 revisions (63 bytes in store; 62 bytes tracked data)
339 finished migrating 1 changelog revisions; change in size: 0 bytes
339 finished migrating 1 changelog revisions; change in size: 0 bytes
340 finished migrating 3 total revisions; total change in store size: 0 bytes
340 finished migrating 3 total revisions; total change in store size: 0 bytes
341 copying .XX_special_filename
341 copying .XX_special_filename
342 copying phaseroots
343 data fully migrated to temporary repository
344 marking source repository as being upgraded; clients will be unable to read from repository
345 starting in-place swap of repository data
346 replaced files will be backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
347 replacing store...
348 store replacement complete; repository was inconsistent for *s (glob)
349 finalizing requirements file and making repository readable again
342 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
350 removing temporary repository $TESTTMP/store-filenames/.hg/upgrade.* (glob)
343 abort: No such file or directory: $TESTTMP/store-filenames/.hg/store/~2e_x_x__special__filename
351 copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
344 [255]
352 the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
345
353
346 $ cd ..
354 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now