##// END OF EJS Templates
repair: directly use repo.vfs.join...
Pierre-Yves David -
r31324:e712a9c3 default
parent child Browse files
Show More
@@ -1,1102 +1,1102 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import stat
13 import stat
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import short
17 from .node import short
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 changelog,
21 changelog,
22 error,
22 error,
23 exchange,
23 exchange,
24 manifest,
24 manifest,
25 obsolete,
25 obsolete,
26 revlog,
26 revlog,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
34 cgversion = changegroup.safeversion(repo)
35
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
37 version=cgversion)
38 backupdir = "strip-backup"
38 backupdir = "strip-backup"
39 vfs = repo.vfs
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
41 vfs.mkdir(backupdir)
42
42
43 # Include a hash of all the nodes in the filename for uniqueness
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
48
49 comp = None
49 comp = None
50 if cgversion != '01':
50 if cgversion != '01':
51 bundletype = "HG20"
51 bundletype = "HG20"
52 if compress:
52 if compress:
53 comp = 'BZ'
53 comp = 'BZ'
54 elif compress:
54 elif compress:
55 bundletype = "HG10BZ"
55 bundletype = "HG10BZ"
56 else:
56 else:
57 bundletype = "HG10UN"
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
59 compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
72 s = set()
72 s = set()
73 def collectone(revlog):
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
76
76
77 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
78 for fname in files:
79 collectone(repo.file(fname))
79 collectone(repo.file(fname))
80
80
81 return s
81 return s
82
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
86 # Simple way to maintain backwards compatibility for this
87 # argument.
87 # argument.
88 if backup in ['none', 'strip']:
88 if backup in ['none', 'strip']:
89 backup = False
89 backup = False
90
90
91 repo = repo.unfiltered()
91 repo = repo.unfiltered()
92 repo.destroying()
92 repo.destroying()
93
93
94 cl = repo.changelog
94 cl = repo.changelog
95 # TODO handle undo of merge sets
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
99 striprev = min(striplist)
100
100
101 files = _collectfiles(repo, striprev)
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
115 tostrip.add(r)
116
116
117 if r not in tostrip:
117 if r not in tostrip:
118 saverevs.add(r)
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
121 saveheads = [cl.node(r) for r in saveheads]
122
122
123 # compute base nodes
123 # compute base nodes
124 if saverevs:
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
128 stripbases = [cl.node(r) for r in tostrip]
129
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
135 else:
136 newbmtarget = '.'
136 newbmtarget = '.'
137
137
138 bm = repo._bookmarks
138 bm = repo._bookmarks
139 updatebm = []
139 updatebm = []
140 for m in bm:
140 for m in bm:
141 rev = repo[bm[m]].rev()
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
142 if rev in tostrip:
143 updatebm.append(m)
143 updatebm.append(m)
144
144
145 # create a changegroup for all the branches we need to keep
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
146 backupfile = None
147 vfs = repo.vfs
147 vfs = repo.vfs
148 node = nodelist[-1]
148 node = nodelist[-1]
149 if backup:
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
154 vfs.join(backupfile))
155 tmpbundlefile = None
155 tmpbundlefile = None
156 if saveheads:
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
159 compress=False)
160
160
161 mfst = repo.manifestlog._revlog
161 mfst = repo.manifestlog._revlog
162
162
163 curtr = repo.currenttransaction()
163 curtr = repo.currenttransaction()
164 if curtr is not None:
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
165 del curtr # avoid carrying reference to transaction for nothing
166 msg = _('programming error: cannot strip from inside a transaction')
166 msg = _('programming error: cannot strip from inside a transaction')
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
168
168
169 try:
169 try:
170 with repo.transaction("strip") as tr:
170 with repo.transaction("strip") as tr:
171 offset = len(tr.entries)
171 offset = len(tr.entries)
172
172
173 tr.startgroup()
173 tr.startgroup()
174 cl.strip(striprev, tr)
174 cl.strip(striprev, tr)
175 mfst.strip(striprev, tr)
175 mfst.strip(striprev, tr)
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 # otherwise
177 # otherwise
178 for unencoded, encoded, size in repo.store.datafiles():
178 for unencoded, encoded, size in repo.store.datafiles():
179 if (unencoded.startswith('meta/') and
179 if (unencoded.startswith('meta/') and
180 unencoded.endswith('00manifest.i')):
180 unencoded.endswith('00manifest.i')):
181 dir = unencoded[5:-12]
181 dir = unencoded[5:-12]
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 for fn in files:
183 for fn in files:
184 repo.file(fn).strip(striprev, tr)
184 repo.file(fn).strip(striprev, tr)
185 tr.endgroup()
185 tr.endgroup()
186
186
187 for i in xrange(offset, len(tr.entries)):
187 for i in xrange(offset, len(tr.entries)):
188 file, troffset, ignore = tr.entries[i]
188 file, troffset, ignore = tr.entries[i]
189 with repo.svfs(file, 'a', checkambig=True) as fp:
189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 fp.truncate(troffset)
190 fp.truncate(troffset)
191 if troffset == 0:
191 if troffset == 0:
192 repo.store.markremoved(file)
192 repo.store.markremoved(file)
193
193
194 if tmpbundlefile:
194 if tmpbundlefile:
195 ui.note(_("adding branch\n"))
195 ui.note(_("adding branch\n"))
196 f = vfs.open(tmpbundlefile, "rb")
196 f = vfs.open(tmpbundlefile, "rb")
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 if not repo.ui.verbose:
198 if not repo.ui.verbose:
199 # silence internal shuffling chatter
199 # silence internal shuffling chatter
200 repo.ui.pushbuffer()
200 repo.ui.pushbuffer()
201 if isinstance(gen, bundle2.unbundle20):
201 if isinstance(gen, bundle2.unbundle20):
202 with repo.transaction('strip') as tr:
202 with repo.transaction('strip') as tr:
203 tr.hookargs = {'source': 'strip',
203 tr.hookargs = {'source': 'strip',
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 bundle2.applybundle(repo, gen, tr, source='strip',
205 bundle2.applybundle(repo, gen, tr, source='strip',
206 url='bundle:' + vfs.join(tmpbundlefile))
206 url='bundle:' + vfs.join(tmpbundlefile))
207 else:
207 else:
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 True)
209 True)
210 if not repo.ui.verbose:
210 if not repo.ui.verbose:
211 repo.ui.popbuffer()
211 repo.ui.popbuffer()
212 f.close()
212 f.close()
213 repo._phasecache.invalidate()
213 repo._phasecache.invalidate()
214
214
215 for m in updatebm:
215 for m in updatebm:
216 bm[m] = repo[newbmtarget].node()
216 bm[m] = repo[newbmtarget].node()
217 lock = tr = None
217 lock = tr = None
218 try:
218 try:
219 lock = repo.lock()
219 lock = repo.lock()
220 tr = repo.transaction('repair')
220 tr = repo.transaction('repair')
221 bm.recordchange(tr)
221 bm.recordchange(tr)
222 tr.close()
222 tr.close()
223 finally:
223 finally:
224 tr.release()
224 tr.release()
225 lock.release()
225 lock.release()
226
226
227 # remove undo files
227 # remove undo files
228 for undovfs, undofile in repo.undofiles():
228 for undovfs, undofile in repo.undofiles():
229 try:
229 try:
230 undovfs.unlink(undofile)
230 undovfs.unlink(undofile)
231 except OSError as e:
231 except OSError as e:
232 if e.errno != errno.ENOENT:
232 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
233 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
234 (undovfs.join(undofile), str(e)))
235
235
236 except: # re-raises
236 except: # re-raises
237 if backupfile:
237 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
239 % vfs.join(backupfile))
240 if tmpbundlefile:
240 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
242 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
243 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
245 raise
246 else:
246 else:
247 if tmpbundlefile:
247 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
248 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
249 vfs.unlink(tmpbundlefile)
250
250
251 repo.destroyed()
251 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
252 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
253 # extensions can use it
254 return backupfile
254 return backupfile
255
255
256 def rebuildfncache(ui, repo):
256 def rebuildfncache(ui, repo):
257 """Rebuilds the fncache file from repo history.
257 """Rebuilds the fncache file from repo history.
258
258
259 Missing entries will be added. Extra entries will be removed.
259 Missing entries will be added. Extra entries will be removed.
260 """
260 """
261 repo = repo.unfiltered()
261 repo = repo.unfiltered()
262
262
263 if 'fncache' not in repo.requirements:
263 if 'fncache' not in repo.requirements:
264 ui.warn(_('(not rebuilding fncache because repository does not '
264 ui.warn(_('(not rebuilding fncache because repository does not '
265 'support fncache)\n'))
265 'support fncache)\n'))
266 return
266 return
267
267
268 with repo.lock():
268 with repo.lock():
269 fnc = repo.store.fncache
269 fnc = repo.store.fncache
270 # Trigger load of fncache.
270 # Trigger load of fncache.
271 if 'irrelevant' in fnc:
271 if 'irrelevant' in fnc:
272 pass
272 pass
273
273
274 oldentries = set(fnc.entries)
274 oldentries = set(fnc.entries)
275 newentries = set()
275 newentries = set()
276 seenfiles = set()
276 seenfiles = set()
277
277
278 repolen = len(repo)
278 repolen = len(repo)
279 for rev in repo:
279 for rev in repo:
280 ui.progress(_('rebuilding'), rev, total=repolen,
280 ui.progress(_('rebuilding'), rev, total=repolen,
281 unit=_('changesets'))
281 unit=_('changesets'))
282
282
283 ctx = repo[rev]
283 ctx = repo[rev]
284 for f in ctx.files():
284 for f in ctx.files():
285 # This is to minimize I/O.
285 # This is to minimize I/O.
286 if f in seenfiles:
286 if f in seenfiles:
287 continue
287 continue
288 seenfiles.add(f)
288 seenfiles.add(f)
289
289
290 i = 'data/%s.i' % f
290 i = 'data/%s.i' % f
291 d = 'data/%s.d' % f
291 d = 'data/%s.d' % f
292
292
293 if repo.store._exists(i):
293 if repo.store._exists(i):
294 newentries.add(i)
294 newentries.add(i)
295 if repo.store._exists(d):
295 if repo.store._exists(d):
296 newentries.add(d)
296 newentries.add(d)
297
297
298 ui.progress(_('rebuilding'), None)
298 ui.progress(_('rebuilding'), None)
299
299
300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
301 for dir in util.dirs(seenfiles):
301 for dir in util.dirs(seenfiles):
302 i = 'meta/%s/00manifest.i' % dir
302 i = 'meta/%s/00manifest.i' % dir
303 d = 'meta/%s/00manifest.d' % dir
303 d = 'meta/%s/00manifest.d' % dir
304
304
305 if repo.store._exists(i):
305 if repo.store._exists(i):
306 newentries.add(i)
306 newentries.add(i)
307 if repo.store._exists(d):
307 if repo.store._exists(d):
308 newentries.add(d)
308 newentries.add(d)
309
309
310 addcount = len(newentries - oldentries)
310 addcount = len(newentries - oldentries)
311 removecount = len(oldentries - newentries)
311 removecount = len(oldentries - newentries)
312 for p in sorted(oldentries - newentries):
312 for p in sorted(oldentries - newentries):
313 ui.write(_('removing %s\n') % p)
313 ui.write(_('removing %s\n') % p)
314 for p in sorted(newentries - oldentries):
314 for p in sorted(newentries - oldentries):
315 ui.write(_('adding %s\n') % p)
315 ui.write(_('adding %s\n') % p)
316
316
317 if addcount or removecount:
317 if addcount or removecount:
318 ui.write(_('%d items added, %d removed from fncache\n') %
318 ui.write(_('%d items added, %d removed from fncache\n') %
319 (addcount, removecount))
319 (addcount, removecount))
320 fnc.entries = newentries
320 fnc.entries = newentries
321 fnc._dirty = True
321 fnc._dirty = True
322
322
323 with repo.transaction('fncache') as tr:
323 with repo.transaction('fncache') as tr:
324 fnc.write(tr)
324 fnc.write(tr)
325 else:
325 else:
326 ui.write(_('fncache already up to date\n'))
326 ui.write(_('fncache already up to date\n'))
327
327
328 def stripbmrevset(repo, mark):
328 def stripbmrevset(repo, mark):
329 """
329 """
330 The revset to strip when strip is called with -B mark
330 The revset to strip when strip is called with -B mark
331
331
332 Needs to live here so extensions can use it and wrap it even when strip is
332 Needs to live here so extensions can use it and wrap it even when strip is
333 not enabled or not present on a box.
333 not enabled or not present on a box.
334 """
334 """
335 return repo.revs("ancestors(bookmark(%s)) - "
335 return repo.revs("ancestors(bookmark(%s)) - "
336 "ancestors(head() and not bookmark(%s)) - "
336 "ancestors(head() and not bookmark(%s)) - "
337 "ancestors(bookmark() and not bookmark(%s))",
337 "ancestors(bookmark() and not bookmark(%s))",
338 mark, mark, mark)
338 mark, mark, mark)
339
339
340 def deleteobsmarkers(obsstore, indices):
340 def deleteobsmarkers(obsstore, indices):
341 """Delete some obsmarkers from obsstore and return how many were deleted
341 """Delete some obsmarkers from obsstore and return how many were deleted
342
342
343 'indices' is a list of ints which are the indices
343 'indices' is a list of ints which are the indices
344 of the markers to be deleted.
344 of the markers to be deleted.
345
345
346 Every invocation of this function completely rewrites the obsstore file,
346 Every invocation of this function completely rewrites the obsstore file,
347 skipping the markers we want to be removed. The new temporary file is
347 skipping the markers we want to be removed. The new temporary file is
348 created, remaining markers are written there and on .close() this file
348 created, remaining markers are written there and on .close() this file
349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
350 if not indices:
350 if not indices:
351 # we don't want to rewrite the obsstore with the same content
351 # we don't want to rewrite the obsstore with the same content
352 return
352 return
353
353
354 left = []
354 left = []
355 current = obsstore._all
355 current = obsstore._all
356 n = 0
356 n = 0
357 for i, m in enumerate(current):
357 for i, m in enumerate(current):
358 if i in indices:
358 if i in indices:
359 n += 1
359 n += 1
360 continue
360 continue
361 left.append(m)
361 left.append(m)
362
362
363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
365 newobsstorefile.write(bytes)
365 newobsstorefile.write(bytes)
366 newobsstorefile.close()
366 newobsstorefile.close()
367 return n
367 return n
368
368
369 def upgraderequiredsourcerequirements(repo):
369 def upgraderequiredsourcerequirements(repo):
370 """Obtain requirements required to be present to upgrade a repo.
370 """Obtain requirements required to be present to upgrade a repo.
371
371
372 An upgrade will not be allowed if the repository doesn't have the
372 An upgrade will not be allowed if the repository doesn't have the
373 requirements returned by this function.
373 requirements returned by this function.
374 """
374 """
375 return set([
375 return set([
376 # Introduced in Mercurial 0.9.2.
376 # Introduced in Mercurial 0.9.2.
377 'revlogv1',
377 'revlogv1',
378 # Introduced in Mercurial 0.9.2.
378 # Introduced in Mercurial 0.9.2.
379 'store',
379 'store',
380 ])
380 ])
381
381
382 def upgradeblocksourcerequirements(repo):
382 def upgradeblocksourcerequirements(repo):
383 """Obtain requirements that will prevent an upgrade from occurring.
383 """Obtain requirements that will prevent an upgrade from occurring.
384
384
385 An upgrade cannot be performed if the source repository contains a
385 An upgrade cannot be performed if the source repository contains a
386 requirements in the returned set.
386 requirements in the returned set.
387 """
387 """
388 return set([
388 return set([
389 # The upgrade code does not yet support these experimental features.
389 # The upgrade code does not yet support these experimental features.
390 # This is an artificial limitation.
390 # This is an artificial limitation.
391 'manifestv2',
391 'manifestv2',
392 'treemanifest',
392 'treemanifest',
393 # This was a precursor to generaldelta and was never enabled by default.
393 # This was a precursor to generaldelta and was never enabled by default.
394 # It should (hopefully) not exist in the wild.
394 # It should (hopefully) not exist in the wild.
395 'parentdelta',
395 'parentdelta',
396 # Upgrade should operate on the actual store, not the shared link.
396 # Upgrade should operate on the actual store, not the shared link.
397 'shared',
397 'shared',
398 ])
398 ])
399
399
400 def upgradesupportremovedrequirements(repo):
400 def upgradesupportremovedrequirements(repo):
401 """Obtain requirements that can be removed during an upgrade.
401 """Obtain requirements that can be removed during an upgrade.
402
402
403 If an upgrade were to create a repository that dropped a requirement,
403 If an upgrade were to create a repository that dropped a requirement,
404 the dropped requirement must appear in the returned set for the upgrade
404 the dropped requirement must appear in the returned set for the upgrade
405 to be allowed.
405 to be allowed.
406 """
406 """
407 return set()
407 return set()
408
408
409 def upgradesupporteddestrequirements(repo):
409 def upgradesupporteddestrequirements(repo):
410 """Obtain requirements that upgrade supports in the destination.
410 """Obtain requirements that upgrade supports in the destination.
411
411
412 If the result of the upgrade would create requirements not in this set,
412 If the result of the upgrade would create requirements not in this set,
413 the upgrade is disallowed.
413 the upgrade is disallowed.
414
414
415 Extensions should monkeypatch this to add their custom requirements.
415 Extensions should monkeypatch this to add their custom requirements.
416 """
416 """
417 return set([
417 return set([
418 'dotencode',
418 'dotencode',
419 'fncache',
419 'fncache',
420 'generaldelta',
420 'generaldelta',
421 'revlogv1',
421 'revlogv1',
422 'store',
422 'store',
423 ])
423 ])
424
424
425 def upgradeallowednewrequirements(repo):
425 def upgradeallowednewrequirements(repo):
426 """Obtain requirements that can be added to a repository during upgrade.
426 """Obtain requirements that can be added to a repository during upgrade.
427
427
428 This is used to disallow proposed requirements from being added when
428 This is used to disallow proposed requirements from being added when
429 they weren't present before.
429 they weren't present before.
430
430
431 We use a list of allowed requirement additions instead of a list of known
431 We use a list of allowed requirement additions instead of a list of known
432 bad additions because the whitelist approach is safer and will prevent
432 bad additions because the whitelist approach is safer and will prevent
433 future, unknown requirements from accidentally being added.
433 future, unknown requirements from accidentally being added.
434 """
434 """
435 return set([
435 return set([
436 'dotencode',
436 'dotencode',
437 'fncache',
437 'fncache',
438 'generaldelta',
438 'generaldelta',
439 ])
439 ])
440
440
441 deficiency = 'deficiency'
441 deficiency = 'deficiency'
442 optimisation = 'optimization'
442 optimisation = 'optimization'
443
443
444 class upgradeimprovement(object):
444 class upgradeimprovement(object):
445 """Represents an improvement that can be made as part of an upgrade.
445 """Represents an improvement that can be made as part of an upgrade.
446
446
447 The following attributes are defined on each instance:
447 The following attributes are defined on each instance:
448
448
449 name
449 name
450 Machine-readable string uniquely identifying this improvement. It
450 Machine-readable string uniquely identifying this improvement. It
451 will be mapped to an action later in the upgrade process.
451 will be mapped to an action later in the upgrade process.
452
452
453 type
453 type
454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
455 problem. An optimization is an action (sometimes optional) that
455 problem. An optimization is an action (sometimes optional) that
456 can be taken to further improve the state of the repository.
456 can be taken to further improve the state of the repository.
457
457
458 description
458 description
459 Message intended for humans explaining the improvement in more detail,
459 Message intended for humans explaining the improvement in more detail,
460 including the implications of it. For ``deficiency`` types, should be
460 including the implications of it. For ``deficiency`` types, should be
461 worded in the present tense. For ``optimisation`` types, should be
461 worded in the present tense. For ``optimisation`` types, should be
462 worded in the future tense.
462 worded in the future tense.
463
463
464 upgrademessage
464 upgrademessage
465 Message intended for humans explaining what an upgrade addressing this
465 Message intended for humans explaining what an upgrade addressing this
466 issue will do. Should be worded in the future tense.
466 issue will do. Should be worded in the future tense.
467
467
468 fromdefault (``deficiency`` types only)
468 fromdefault (``deficiency`` types only)
469 Boolean indicating whether the current (deficient) state deviates
469 Boolean indicating whether the current (deficient) state deviates
470 from Mercurial's default configuration.
470 from Mercurial's default configuration.
471
471
472 fromconfig (``deficiency`` types only)
472 fromconfig (``deficiency`` types only)
473 Boolean indicating whether the current (deficient) state deviates
473 Boolean indicating whether the current (deficient) state deviates
474 from the current Mercurial configuration.
474 from the current Mercurial configuration.
475 """
475 """
476 def __init__(self, name, type, description, upgrademessage, **kwargs):
476 def __init__(self, name, type, description, upgrademessage, **kwargs):
477 self.name = name
477 self.name = name
478 self.type = type
478 self.type = type
479 self.description = description
479 self.description = description
480 self.upgrademessage = upgrademessage
480 self.upgrademessage = upgrademessage
481
481
482 for k, v in kwargs.items():
482 for k, v in kwargs.items():
483 setattr(self, k, v)
483 setattr(self, k, v)
484
484
485 def upgradefindimprovements(repo):
485 def upgradefindimprovements(repo):
486 """Determine improvements that can be made to the repo during upgrade.
486 """Determine improvements that can be made to the repo during upgrade.
487
487
488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
489 and optimizations.
489 and optimizations.
490 """
490 """
491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
492 from . import localrepo
492 from . import localrepo
493
493
494 newreporeqs = localrepo.newreporequirements(repo)
494 newreporeqs = localrepo.newreporequirements(repo)
495
495
496 improvements = []
496 improvements = []
497
497
498 # We could detect lack of revlogv1 and store here, but they were added
498 # We could detect lack of revlogv1 and store here, but they were added
499 # in 0.9.2 and we don't support upgrading repos without these
499 # in 0.9.2 and we don't support upgrading repos without these
500 # requirements, so let's not bother.
500 # requirements, so let's not bother.
501
501
502 if 'fncache' not in repo.requirements:
502 if 'fncache' not in repo.requirements:
503 improvements.append(upgradeimprovement(
503 improvements.append(upgradeimprovement(
504 name='fncache',
504 name='fncache',
505 type=deficiency,
505 type=deficiency,
506 description=_('long and reserved filenames may not work correctly; '
506 description=_('long and reserved filenames may not work correctly; '
507 'repository performance is sub-optimal'),
507 'repository performance is sub-optimal'),
508 upgrademessage=_('repository will be more resilient to storing '
508 upgrademessage=_('repository will be more resilient to storing '
509 'certain paths and performance of certain '
509 'certain paths and performance of certain '
510 'operations should be improved'),
510 'operations should be improved'),
511 fromdefault=True,
511 fromdefault=True,
512 fromconfig='fncache' in newreporeqs))
512 fromconfig='fncache' in newreporeqs))
513
513
514 if 'dotencode' not in repo.requirements:
514 if 'dotencode' not in repo.requirements:
515 improvements.append(upgradeimprovement(
515 improvements.append(upgradeimprovement(
516 name='dotencode',
516 name='dotencode',
517 type=deficiency,
517 type=deficiency,
518 description=_('storage of filenames beginning with a period or '
518 description=_('storage of filenames beginning with a period or '
519 'space may not work correctly'),
519 'space may not work correctly'),
520 upgrademessage=_('repository will be better able to store files '
520 upgrademessage=_('repository will be better able to store files '
521 'beginning with a space or period'),
521 'beginning with a space or period'),
522 fromdefault=True,
522 fromdefault=True,
523 fromconfig='dotencode' in newreporeqs))
523 fromconfig='dotencode' in newreporeqs))
524
524
525 if 'generaldelta' not in repo.requirements:
525 if 'generaldelta' not in repo.requirements:
526 improvements.append(upgradeimprovement(
526 improvements.append(upgradeimprovement(
527 name='generaldelta',
527 name='generaldelta',
528 type=deficiency,
528 type=deficiency,
529 description=_('deltas within internal storage are unable to '
529 description=_('deltas within internal storage are unable to '
530 'choose optimal revisions; repository is larger and '
530 'choose optimal revisions; repository is larger and '
531 'slower than it could be; interaction with other '
531 'slower than it could be; interaction with other '
532 'repositories may require extra network and CPU '
532 'repositories may require extra network and CPU '
533 'resources, making "hg push" and "hg pull" slower'),
533 'resources, making "hg push" and "hg pull" slower'),
534 upgrademessage=_('repository storage will be able to create '
534 upgrademessage=_('repository storage will be able to create '
535 'optimal deltas; new repository data will be '
535 'optimal deltas; new repository data will be '
536 'smaller and read times should decrease; '
536 'smaller and read times should decrease; '
537 'interacting with other repositories using this '
537 'interacting with other repositories using this '
538 'storage model should require less network and '
538 'storage model should require less network and '
539 'CPU resources, making "hg push" and "hg pull" '
539 'CPU resources, making "hg push" and "hg pull" '
540 'faster'),
540 'faster'),
541 fromdefault=True,
541 fromdefault=True,
542 fromconfig='generaldelta' in newreporeqs))
542 fromconfig='generaldelta' in newreporeqs))
543
543
544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
545 # changelogs with deltas.
545 # changelogs with deltas.
546 cl = repo.changelog
546 cl = repo.changelog
547 for rev in cl:
547 for rev in cl:
548 chainbase = cl.chainbase(rev)
548 chainbase = cl.chainbase(rev)
549 if chainbase != rev:
549 if chainbase != rev:
550 improvements.append(upgradeimprovement(
550 improvements.append(upgradeimprovement(
551 name='removecldeltachain',
551 name='removecldeltachain',
552 type=deficiency,
552 type=deficiency,
553 description=_('changelog storage is using deltas instead of '
553 description=_('changelog storage is using deltas instead of '
554 'raw entries; changelog reading and any '
554 'raw entries; changelog reading and any '
555 'operation relying on changelog data are slower '
555 'operation relying on changelog data are slower '
556 'than they could be'),
556 'than they could be'),
557 upgrademessage=_('changelog storage will be reformated to '
557 upgrademessage=_('changelog storage will be reformated to '
558 'store raw entries; changelog reading will be '
558 'store raw entries; changelog reading will be '
559 'faster; changelog size may be reduced'),
559 'faster; changelog size may be reduced'),
560 fromdefault=True,
560 fromdefault=True,
561 fromconfig=True))
561 fromconfig=True))
562 break
562 break
563
563
564 # Now for the optimizations.
564 # Now for the optimizations.
565
565
566 # These are unconditionally added. There is logic later that figures out
566 # These are unconditionally added. There is logic later that figures out
567 # which ones to apply.
567 # which ones to apply.
568
568
569 improvements.append(upgradeimprovement(
569 improvements.append(upgradeimprovement(
570 name='redeltaparent',
570 name='redeltaparent',
571 type=optimisation,
571 type=optimisation,
572 description=_('deltas within internal storage will be recalculated to '
572 description=_('deltas within internal storage will be recalculated to '
573 'choose an optimal base revision where this was not '
573 'choose an optimal base revision where this was not '
574 'already done; the size of the repository may shrink and '
574 'already done; the size of the repository may shrink and '
575 'various operations may become faster; the first time '
575 'various operations may become faster; the first time '
576 'this optimization is performed could slow down upgrade '
576 'this optimization is performed could slow down upgrade '
577 'execution considerably; subsequent invocations should '
577 'execution considerably; subsequent invocations should '
578 'not run noticeably slower'),
578 'not run noticeably slower'),
579 upgrademessage=_('deltas within internal storage will choose a new '
579 upgrademessage=_('deltas within internal storage will choose a new '
580 'base revision if needed')))
580 'base revision if needed')))
581
581
582 improvements.append(upgradeimprovement(
582 improvements.append(upgradeimprovement(
583 name='redeltamultibase',
583 name='redeltamultibase',
584 type=optimisation,
584 type=optimisation,
585 description=_('deltas within internal storage will be recalculated '
585 description=_('deltas within internal storage will be recalculated '
586 'against multiple base revision and the smallest '
586 'against multiple base revision and the smallest '
587 'difference will be used; the size of the repository may '
587 'difference will be used; the size of the repository may '
588 'shrink significantly when there are many merges; this '
588 'shrink significantly when there are many merges; this '
589 'optimization will slow down execution in proportion to '
589 'optimization will slow down execution in proportion to '
590 'the number of merges in the repository and the amount '
590 'the number of merges in the repository and the amount '
591 'of files in the repository; this slow down should not '
591 'of files in the repository; this slow down should not '
592 'be significant unless there are tens of thousands of '
592 'be significant unless there are tens of thousands of '
593 'files and thousands of merges'),
593 'files and thousands of merges'),
594 upgrademessage=_('deltas within internal storage will choose an '
594 upgrademessage=_('deltas within internal storage will choose an '
595 'optimal delta by computing deltas against multiple '
595 'optimal delta by computing deltas against multiple '
596 'parents; may slow down execution time '
596 'parents; may slow down execution time '
597 'significantly')))
597 'significantly')))
598
598
599 improvements.append(upgradeimprovement(
599 improvements.append(upgradeimprovement(
600 name='redeltaall',
600 name='redeltaall',
601 type=optimisation,
601 type=optimisation,
602 description=_('deltas within internal storage will always be '
602 description=_('deltas within internal storage will always be '
603 'recalculated without reusing prior deltas; this will '
603 'recalculated without reusing prior deltas; this will '
604 'likely make execution run several times slower; this '
604 'likely make execution run several times slower; this '
605 'optimization is typically not needed'),
605 'optimization is typically not needed'),
606 upgrademessage=_('deltas within internal storage will be fully '
606 upgrademessage=_('deltas within internal storage will be fully '
607 'recomputed; this will likely drastically slow down '
607 'recomputed; this will likely drastically slow down '
608 'execution time')))
608 'execution time')))
609
609
610 return improvements
610 return improvements
611
611
612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
613 optimize):
613 optimize):
614 """Determine upgrade actions that will be performed.
614 """Determine upgrade actions that will be performed.
615
615
616 Given a list of improvements as returned by ``upgradefindimprovements``,
616 Given a list of improvements as returned by ``upgradefindimprovements``,
617 determine the list of upgrade actions that will be performed.
617 determine the list of upgrade actions that will be performed.
618
618
619 The role of this function is to filter improvements if needed, apply
619 The role of this function is to filter improvements if needed, apply
620 recommended optimizations from the improvements list that make sense,
620 recommended optimizations from the improvements list that make sense,
621 etc.
621 etc.
622
622
623 Returns a list of action names.
623 Returns a list of action names.
624 """
624 """
625 newactions = []
625 newactions = []
626
626
627 knownreqs = upgradesupporteddestrequirements(repo)
627 knownreqs = upgradesupporteddestrequirements(repo)
628
628
629 for i in improvements:
629 for i in improvements:
630 name = i.name
630 name = i.name
631
631
632 # If the action is a requirement that doesn't show up in the
632 # If the action is a requirement that doesn't show up in the
633 # destination requirements, prune the action.
633 # destination requirements, prune the action.
634 if name in knownreqs and name not in destreqs:
634 if name in knownreqs and name not in destreqs:
635 continue
635 continue
636
636
637 if i.type == deficiency:
637 if i.type == deficiency:
638 newactions.append(name)
638 newactions.append(name)
639
639
640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
641
641
642 # FUTURE consider adding some optimizations here for certain transitions.
642 # FUTURE consider adding some optimizations here for certain transitions.
643 # e.g. adding generaldelta could schedule parent redeltas.
643 # e.g. adding generaldelta could schedule parent redeltas.
644
644
645 return newactions
645 return newactions
646
646
647 def _revlogfrompath(repo, path):
647 def _revlogfrompath(repo, path):
648 """Obtain a revlog from a repo path.
648 """Obtain a revlog from a repo path.
649
649
650 An instance of the appropriate class is returned.
650 An instance of the appropriate class is returned.
651 """
651 """
652 if path == '00changelog.i':
652 if path == '00changelog.i':
653 return changelog.changelog(repo.svfs)
653 return changelog.changelog(repo.svfs)
654 elif path.endswith('00manifest.i'):
654 elif path.endswith('00manifest.i'):
655 mandir = path[:-len('00manifest.i')]
655 mandir = path[:-len('00manifest.i')]
656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
657 else:
657 else:
658 # Filelogs don't do anything special with settings. So we can use a
658 # Filelogs don't do anything special with settings. So we can use a
659 # vanilla revlog.
659 # vanilla revlog.
660 return revlog.revlog(repo.svfs, path)
660 return revlog.revlog(repo.svfs, path)
661
661
662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
663 """Copy revlogs between 2 repos."""
663 """Copy revlogs between 2 repos."""
664 revcount = 0
664 revcount = 0
665 srcsize = 0
665 srcsize = 0
666 srcrawsize = 0
666 srcrawsize = 0
667 dstsize = 0
667 dstsize = 0
668 fcount = 0
668 fcount = 0
669 frevcount = 0
669 frevcount = 0
670 fsrcsize = 0
670 fsrcsize = 0
671 frawsize = 0
671 frawsize = 0
672 fdstsize = 0
672 fdstsize = 0
673 mcount = 0
673 mcount = 0
674 mrevcount = 0
674 mrevcount = 0
675 msrcsize = 0
675 msrcsize = 0
676 mrawsize = 0
676 mrawsize = 0
677 mdstsize = 0
677 mdstsize = 0
678 crevcount = 0
678 crevcount = 0
679 csrcsize = 0
679 csrcsize = 0
680 crawsize = 0
680 crawsize = 0
681 cdstsize = 0
681 cdstsize = 0
682
682
683 # Perform a pass to collect metadata. This validates we can open all
683 # Perform a pass to collect metadata. This validates we can open all
684 # source files and allows a unified progress bar to be displayed.
684 # source files and allows a unified progress bar to be displayed.
685 for unencoded, encoded, size in srcrepo.store.walk():
685 for unencoded, encoded, size in srcrepo.store.walk():
686 if unencoded.endswith('.d'):
686 if unencoded.endswith('.d'):
687 continue
687 continue
688
688
689 rl = _revlogfrompath(srcrepo, unencoded)
689 rl = _revlogfrompath(srcrepo, unencoded)
690 revcount += len(rl)
690 revcount += len(rl)
691
691
692 datasize = 0
692 datasize = 0
693 rawsize = 0
693 rawsize = 0
694 idx = rl.index
694 idx = rl.index
695 for rev in rl:
695 for rev in rl:
696 e = idx[rev]
696 e = idx[rev]
697 datasize += e[1]
697 datasize += e[1]
698 rawsize += e[2]
698 rawsize += e[2]
699
699
700 srcsize += datasize
700 srcsize += datasize
701 srcrawsize += rawsize
701 srcrawsize += rawsize
702
702
703 # This is for the separate progress bars.
703 # This is for the separate progress bars.
704 if isinstance(rl, changelog.changelog):
704 if isinstance(rl, changelog.changelog):
705 crevcount += len(rl)
705 crevcount += len(rl)
706 csrcsize += datasize
706 csrcsize += datasize
707 crawsize += rawsize
707 crawsize += rawsize
708 elif isinstance(rl, manifest.manifestrevlog):
708 elif isinstance(rl, manifest.manifestrevlog):
709 mcount += 1
709 mcount += 1
710 mrevcount += len(rl)
710 mrevcount += len(rl)
711 msrcsize += datasize
711 msrcsize += datasize
712 mrawsize += rawsize
712 mrawsize += rawsize
713 elif isinstance(rl, revlog.revlog):
713 elif isinstance(rl, revlog.revlog):
714 fcount += 1
714 fcount += 1
715 frevcount += len(rl)
715 frevcount += len(rl)
716 fsrcsize += datasize
716 fsrcsize += datasize
717 frawsize += rawsize
717 frawsize += rawsize
718
718
719 if not revcount:
719 if not revcount:
720 return
720 return
721
721
722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
723 '%d in changelog)\n') %
723 '%d in changelog)\n') %
724 (revcount, frevcount, mrevcount, crevcount))
724 (revcount, frevcount, mrevcount, crevcount))
725 ui.write(_('migrating %s in store; %s tracked data\n') % (
725 ui.write(_('migrating %s in store; %s tracked data\n') % (
726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
727
727
728 # Used to keep track of progress.
728 # Used to keep track of progress.
729 progress = []
729 progress = []
730 def oncopiedrevision(rl, rev, node):
730 def oncopiedrevision(rl, rev, node):
731 progress[1] += 1
731 progress[1] += 1
732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
733
733
734 # Do the actual copying.
734 # Do the actual copying.
735 # FUTURE this operation can be farmed off to worker processes.
735 # FUTURE this operation can be farmed off to worker processes.
736 seen = set()
736 seen = set()
737 for unencoded, encoded, size in srcrepo.store.walk():
737 for unencoded, encoded, size in srcrepo.store.walk():
738 if unencoded.endswith('.d'):
738 if unencoded.endswith('.d'):
739 continue
739 continue
740
740
741 oldrl = _revlogfrompath(srcrepo, unencoded)
741 oldrl = _revlogfrompath(srcrepo, unencoded)
742 newrl = _revlogfrompath(dstrepo, unencoded)
742 newrl = _revlogfrompath(dstrepo, unencoded)
743
743
744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
745 ui.write(_('finished migrating %d manifest revisions across %d '
745 ui.write(_('finished migrating %d manifest revisions across %d '
746 'manifests; change in size: %s\n') %
746 'manifests; change in size: %s\n') %
747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
748
748
749 ui.write(_('migrating changelog containing %d revisions '
749 ui.write(_('migrating changelog containing %d revisions '
750 '(%s in store; %s tracked data)\n') %
750 '(%s in store; %s tracked data)\n') %
751 (crevcount, util.bytecount(csrcsize),
751 (crevcount, util.bytecount(csrcsize),
752 util.bytecount(crawsize)))
752 util.bytecount(crawsize)))
753 seen.add('c')
753 seen.add('c')
754 progress[:] = [_('changelog revisions'), 0, crevcount]
754 progress[:] = [_('changelog revisions'), 0, crevcount]
755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
756 ui.write(_('finished migrating %d filelog revisions across %d '
756 ui.write(_('finished migrating %d filelog revisions across %d '
757 'filelogs; change in size: %s\n') %
757 'filelogs; change in size: %s\n') %
758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
759
759
760 ui.write(_('migrating %d manifests containing %d revisions '
760 ui.write(_('migrating %d manifests containing %d revisions '
761 '(%s in store; %s tracked data)\n') %
761 '(%s in store; %s tracked data)\n') %
762 (mcount, mrevcount, util.bytecount(msrcsize),
762 (mcount, mrevcount, util.bytecount(msrcsize),
763 util.bytecount(mrawsize)))
763 util.bytecount(mrawsize)))
764 seen.add('m')
764 seen.add('m')
765 progress[:] = [_('manifest revisions'), 0, mrevcount]
765 progress[:] = [_('manifest revisions'), 0, mrevcount]
766 elif 'f' not in seen:
766 elif 'f' not in seen:
767 ui.write(_('migrating %d filelogs containing %d revisions '
767 ui.write(_('migrating %d filelogs containing %d revisions '
768 '(%s in store; %s tracked data)\n') %
768 '(%s in store; %s tracked data)\n') %
769 (fcount, frevcount, util.bytecount(fsrcsize),
769 (fcount, frevcount, util.bytecount(fsrcsize),
770 util.bytecount(frawsize)))
770 util.bytecount(frawsize)))
771 seen.add('f')
771 seen.add('f')
772 progress[:] = [_('file revisions'), 0, frevcount]
772 progress[:] = [_('file revisions'), 0, frevcount]
773
773
774 ui.progress(progress[0], progress[1], total=progress[2])
774 ui.progress(progress[0], progress[1], total=progress[2])
775
775
776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
778 deltareuse=deltareuse,
778 deltareuse=deltareuse,
779 aggressivemergedeltas=aggressivemergedeltas)
779 aggressivemergedeltas=aggressivemergedeltas)
780
780
781 datasize = 0
781 datasize = 0
782 idx = newrl.index
782 idx = newrl.index
783 for rev in newrl:
783 for rev in newrl:
784 datasize += idx[rev][1]
784 datasize += idx[rev][1]
785
785
786 dstsize += datasize
786 dstsize += datasize
787
787
788 if isinstance(newrl, changelog.changelog):
788 if isinstance(newrl, changelog.changelog):
789 cdstsize += datasize
789 cdstsize += datasize
790 elif isinstance(newrl, manifest.manifestrevlog):
790 elif isinstance(newrl, manifest.manifestrevlog):
791 mdstsize += datasize
791 mdstsize += datasize
792 else:
792 else:
793 fdstsize += datasize
793 fdstsize += datasize
794
794
795 ui.progress(progress[0], None)
795 ui.progress(progress[0], None)
796
796
797 ui.write(_('finished migrating %d changelog revisions; change in size: '
797 ui.write(_('finished migrating %d changelog revisions; change in size: '
798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
799
799
800 ui.write(_('finished migrating %d total revisions; total change in store '
800 ui.write(_('finished migrating %d total revisions; total change in store '
801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
802
802
803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
804 """Determine whether to copy a store file during upgrade.
804 """Determine whether to copy a store file during upgrade.
805
805
806 This function is called when migrating store files from ``srcrepo`` to
806 This function is called when migrating store files from ``srcrepo`` to
807 ``dstrepo`` as part of upgrading a repository.
807 ``dstrepo`` as part of upgrading a repository.
808
808
809 Args:
809 Args:
810 srcrepo: repo we are copying from
810 srcrepo: repo we are copying from
811 dstrepo: repo we are copying to
811 dstrepo: repo we are copying to
812 requirements: set of requirements for ``dstrepo``
812 requirements: set of requirements for ``dstrepo``
813 path: store file being examined
813 path: store file being examined
814 mode: the ``ST_MODE`` file type of ``path``
814 mode: the ``ST_MODE`` file type of ``path``
815 st: ``stat`` data structure for ``path``
815 st: ``stat`` data structure for ``path``
816
816
817 Function should return ``True`` if the file is to be copied.
817 Function should return ``True`` if the file is to be copied.
818 """
818 """
819 # Skip revlogs.
819 # Skip revlogs.
820 if path.endswith(('.i', '.d')):
820 if path.endswith(('.i', '.d')):
821 return False
821 return False
822 # Skip transaction related files.
822 # Skip transaction related files.
823 if path.startswith('undo'):
823 if path.startswith('undo'):
824 return False
824 return False
825 # Only copy regular files.
825 # Only copy regular files.
826 if mode != stat.S_IFREG:
826 if mode != stat.S_IFREG:
827 return False
827 return False
828 # Skip other skipped files.
828 # Skip other skipped files.
829 if path in ('lock', 'fncache'):
829 if path in ('lock', 'fncache'):
830 return False
830 return False
831
831
832 return True
832 return True
833
833
834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
835 """Hook point for extensions to perform additional actions during upgrade.
835 """Hook point for extensions to perform additional actions during upgrade.
836
836
837 This function is called after revlogs and store files have been copied but
837 This function is called after revlogs and store files have been copied but
838 before the new store is swapped into the original location.
838 before the new store is swapped into the original location.
839 """
839 """
840
840
841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
842 """Do the low-level work of upgrading a repository.
842 """Do the low-level work of upgrading a repository.
843
843
844 The upgrade is effectively performed as a copy between a source
844 The upgrade is effectively performed as a copy between a source
845 repository and a temporary destination repository.
845 repository and a temporary destination repository.
846
846
847 The source repository is unmodified for as long as possible so the
847 The source repository is unmodified for as long as possible so the
848 upgrade can abort at any time without causing loss of service for
848 upgrade can abort at any time without causing loss of service for
849 readers and without corrupting the source repository.
849 readers and without corrupting the source repository.
850 """
850 """
851 assert srcrepo.currentwlock()
851 assert srcrepo.currentwlock()
852 assert dstrepo.currentwlock()
852 assert dstrepo.currentwlock()
853
853
854 ui.write(_('(it is safe to interrupt this process any time before '
854 ui.write(_('(it is safe to interrupt this process any time before '
855 'data migration completes)\n'))
855 'data migration completes)\n'))
856
856
857 if 'redeltaall' in actions:
857 if 'redeltaall' in actions:
858 deltareuse = revlog.revlog.DELTAREUSENEVER
858 deltareuse = revlog.revlog.DELTAREUSENEVER
859 elif 'redeltaparent' in actions:
859 elif 'redeltaparent' in actions:
860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
861 elif 'redeltamultibase' in actions:
861 elif 'redeltamultibase' in actions:
862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
863 else:
863 else:
864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
865
865
866 with dstrepo.transaction('upgrade') as tr:
866 with dstrepo.transaction('upgrade') as tr:
867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
868 'redeltamultibase' in actions)
868 'redeltamultibase' in actions)
869
869
870 # Now copy other files in the store directory.
870 # Now copy other files in the store directory.
871 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
871 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
872 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
872 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
873 p, kind, st):
873 p, kind, st):
874 continue
874 continue
875
875
876 srcrepo.ui.write(_('copying %s\n') % p)
876 srcrepo.ui.write(_('copying %s\n') % p)
877 src = srcrepo.store.vfs.join(p)
877 src = srcrepo.store.vfs.join(p)
878 dst = dstrepo.store.vfs.join(p)
878 dst = dstrepo.store.vfs.join(p)
879 util.copyfile(src, dst, copystat=True)
879 util.copyfile(src, dst, copystat=True)
880
880
881 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
881 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
882
882
883 ui.write(_('data fully migrated to temporary repository\n'))
883 ui.write(_('data fully migrated to temporary repository\n'))
884
884
885 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
885 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
886 backupvfs = vfsmod.vfs(backuppath)
886 backupvfs = vfsmod.vfs(backuppath)
887
887
888 # Make a backup of requires file first, as it is the first to be modified.
888 # Make a backup of requires file first, as it is the first to be modified.
889 util.copyfile(srcrepo.join('requires'), backupvfs.join('requires'))
889 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
890
890
891 # We install an arbitrary requirement that clients must not support
891 # We install an arbitrary requirement that clients must not support
892 # as a mechanism to lock out new clients during the data swap. This is
892 # as a mechanism to lock out new clients during the data swap. This is
893 # better than allowing a client to continue while the repository is in
893 # better than allowing a client to continue while the repository is in
894 # an inconsistent state.
894 # an inconsistent state.
895 ui.write(_('marking source repository as being upgraded; clients will be '
895 ui.write(_('marking source repository as being upgraded; clients will be '
896 'unable to read from repository\n'))
896 'unable to read from repository\n'))
897 scmutil.writerequires(srcrepo.vfs,
897 scmutil.writerequires(srcrepo.vfs,
898 srcrepo.requirements | set(['upgradeinprogress']))
898 srcrepo.requirements | set(['upgradeinprogress']))
899
899
900 ui.write(_('starting in-place swap of repository data\n'))
900 ui.write(_('starting in-place swap of repository data\n'))
901 ui.write(_('replaced files will be backed up at %s\n') %
901 ui.write(_('replaced files will be backed up at %s\n') %
902 backuppath)
902 backuppath)
903
903
904 # Now swap in the new store directory. Doing it as a rename should make
904 # Now swap in the new store directory. Doing it as a rename should make
905 # the operation nearly instantaneous and atomic (at least in well-behaved
905 # the operation nearly instantaneous and atomic (at least in well-behaved
906 # environments).
906 # environments).
907 ui.write(_('replacing store...\n'))
907 ui.write(_('replacing store...\n'))
908 tstart = util.timer()
908 tstart = util.timer()
909 util.rename(srcrepo.spath, backupvfs.join('store'))
909 util.rename(srcrepo.spath, backupvfs.join('store'))
910 util.rename(dstrepo.spath, srcrepo.spath)
910 util.rename(dstrepo.spath, srcrepo.spath)
911 elapsed = util.timer() - tstart
911 elapsed = util.timer() - tstart
912 ui.write(_('store replacement complete; repository was inconsistent for '
912 ui.write(_('store replacement complete; repository was inconsistent for '
913 '%0.1fs\n') % elapsed)
913 '%0.1fs\n') % elapsed)
914
914
915 # We first write the requirements file. Any new requirements will lock
915 # We first write the requirements file. Any new requirements will lock
916 # out legacy clients.
916 # out legacy clients.
917 ui.write(_('finalizing requirements file and making repository readable '
917 ui.write(_('finalizing requirements file and making repository readable '
918 'again\n'))
918 'again\n'))
919 scmutil.writerequires(srcrepo.vfs, requirements)
919 scmutil.writerequires(srcrepo.vfs, requirements)
920
920
921 # The lock file from the old store won't be removed because nothing has a
921 # The lock file from the old store won't be removed because nothing has a
922 # reference to its new location. So clean it up manually. Alternatively, we
922 # reference to its new location. So clean it up manually. Alternatively, we
923 # could update srcrepo.svfs and other variables to point to the new
923 # could update srcrepo.svfs and other variables to point to the new
924 # location. This is simpler.
924 # location. This is simpler.
925 backupvfs.unlink('store/lock')
925 backupvfs.unlink('store/lock')
926
926
927 return backuppath
927 return backuppath
928
928
929 def upgraderepo(ui, repo, run=False, optimize=None):
929 def upgraderepo(ui, repo, run=False, optimize=None):
930 """Upgrade a repository in place."""
930 """Upgrade a repository in place."""
931 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
931 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
932 from . import localrepo
932 from . import localrepo
933
933
934 optimize = set(optimize or [])
934 optimize = set(optimize or [])
935 repo = repo.unfiltered()
935 repo = repo.unfiltered()
936
936
937 # Ensure the repository can be upgraded.
937 # Ensure the repository can be upgraded.
938 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
938 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
939 if missingreqs:
939 if missingreqs:
940 raise error.Abort(_('cannot upgrade repository; requirement '
940 raise error.Abort(_('cannot upgrade repository; requirement '
941 'missing: %s') % _(', ').join(sorted(missingreqs)))
941 'missing: %s') % _(', ').join(sorted(missingreqs)))
942
942
943 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
943 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
944 if blockedreqs:
944 if blockedreqs:
945 raise error.Abort(_('cannot upgrade repository; unsupported source '
945 raise error.Abort(_('cannot upgrade repository; unsupported source '
946 'requirement: %s') %
946 'requirement: %s') %
947 _(', ').join(sorted(blockedreqs)))
947 _(', ').join(sorted(blockedreqs)))
948
948
949 # FUTURE there is potentially a need to control the wanted requirements via
949 # FUTURE there is potentially a need to control the wanted requirements via
950 # command arguments or via an extension hook point.
950 # command arguments or via an extension hook point.
951 newreqs = localrepo.newreporequirements(repo)
951 newreqs = localrepo.newreporequirements(repo)
952
952
953 noremovereqs = (repo.requirements - newreqs -
953 noremovereqs = (repo.requirements - newreqs -
954 upgradesupportremovedrequirements(repo))
954 upgradesupportremovedrequirements(repo))
955 if noremovereqs:
955 if noremovereqs:
956 raise error.Abort(_('cannot upgrade repository; requirement would be '
956 raise error.Abort(_('cannot upgrade repository; requirement would be '
957 'removed: %s') % _(', ').join(sorted(noremovereqs)))
957 'removed: %s') % _(', ').join(sorted(noremovereqs)))
958
958
959 noaddreqs = (newreqs - repo.requirements -
959 noaddreqs = (newreqs - repo.requirements -
960 upgradeallowednewrequirements(repo))
960 upgradeallowednewrequirements(repo))
961 if noaddreqs:
961 if noaddreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support adding '
962 raise error.Abort(_('cannot upgrade repository; do not support adding '
963 'requirement: %s') %
963 'requirement: %s') %
964 _(', ').join(sorted(noaddreqs)))
964 _(', ').join(sorted(noaddreqs)))
965
965
966 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
966 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
967 if unsupportedreqs:
967 if unsupportedreqs:
968 raise error.Abort(_('cannot upgrade repository; do not support '
968 raise error.Abort(_('cannot upgrade repository; do not support '
969 'destination requirement: %s') %
969 'destination requirement: %s') %
970 _(', ').join(sorted(unsupportedreqs)))
970 _(', ').join(sorted(unsupportedreqs)))
971
971
972 # Find and validate all improvements that can be made.
972 # Find and validate all improvements that can be made.
973 improvements = upgradefindimprovements(repo)
973 improvements = upgradefindimprovements(repo)
974 for i in improvements:
974 for i in improvements:
975 if i.type not in (deficiency, optimisation):
975 if i.type not in (deficiency, optimisation):
976 raise error.Abort(_('unexpected improvement type %s for %s') % (
976 raise error.Abort(_('unexpected improvement type %s for %s') % (
977 i.type, i.name))
977 i.type, i.name))
978
978
979 # Validate arguments.
979 # Validate arguments.
980 unknownoptimize = optimize - set(i.name for i in improvements
980 unknownoptimize = optimize - set(i.name for i in improvements
981 if i.type == optimisation)
981 if i.type == optimisation)
982 if unknownoptimize:
982 if unknownoptimize:
983 raise error.Abort(_('unknown optimization action requested: %s') %
983 raise error.Abort(_('unknown optimization action requested: %s') %
984 ', '.join(sorted(unknownoptimize)),
984 ', '.join(sorted(unknownoptimize)),
985 hint=_('run without arguments to see valid '
985 hint=_('run without arguments to see valid '
986 'optimizations'))
986 'optimizations'))
987
987
988 actions = upgradedetermineactions(repo, improvements, repo.requirements,
988 actions = upgradedetermineactions(repo, improvements, repo.requirements,
989 newreqs, optimize)
989 newreqs, optimize)
990
990
991 def printrequirements():
991 def printrequirements():
992 ui.write(_('requirements\n'))
992 ui.write(_('requirements\n'))
993 ui.write(_(' preserved: %s\n') %
993 ui.write(_(' preserved: %s\n') %
994 _(', ').join(sorted(newreqs & repo.requirements)))
994 _(', ').join(sorted(newreqs & repo.requirements)))
995
995
996 if repo.requirements - newreqs:
996 if repo.requirements - newreqs:
997 ui.write(_(' removed: %s\n') %
997 ui.write(_(' removed: %s\n') %
998 _(', ').join(sorted(repo.requirements - newreqs)))
998 _(', ').join(sorted(repo.requirements - newreqs)))
999
999
1000 if newreqs - repo.requirements:
1000 if newreqs - repo.requirements:
1001 ui.write(_(' added: %s\n') %
1001 ui.write(_(' added: %s\n') %
1002 _(', ').join(sorted(newreqs - repo.requirements)))
1002 _(', ').join(sorted(newreqs - repo.requirements)))
1003
1003
1004 ui.write('\n')
1004 ui.write('\n')
1005
1005
1006 def printupgradeactions():
1006 def printupgradeactions():
1007 for action in actions:
1007 for action in actions:
1008 for i in improvements:
1008 for i in improvements:
1009 if i.name == action:
1009 if i.name == action:
1010 ui.write('%s\n %s\n\n' %
1010 ui.write('%s\n %s\n\n' %
1011 (i.name, i.upgrademessage))
1011 (i.name, i.upgrademessage))
1012
1012
1013 if not run:
1013 if not run:
1014 fromdefault = []
1014 fromdefault = []
1015 fromconfig = []
1015 fromconfig = []
1016 optimizations = []
1016 optimizations = []
1017
1017
1018 for i in improvements:
1018 for i in improvements:
1019 assert i.type in (deficiency, optimisation)
1019 assert i.type in (deficiency, optimisation)
1020 if i.type == deficiency:
1020 if i.type == deficiency:
1021 if i.fromdefault:
1021 if i.fromdefault:
1022 fromdefault.append(i)
1022 fromdefault.append(i)
1023 if i.fromconfig:
1023 if i.fromconfig:
1024 fromconfig.append(i)
1024 fromconfig.append(i)
1025 else:
1025 else:
1026 optimizations.append(i)
1026 optimizations.append(i)
1027
1027
1028 if fromdefault or fromconfig:
1028 if fromdefault or fromconfig:
1029 fromconfignames = set(x.name for x in fromconfig)
1029 fromconfignames = set(x.name for x in fromconfig)
1030 onlydefault = [i for i in fromdefault
1030 onlydefault = [i for i in fromdefault
1031 if i.name not in fromconfignames]
1031 if i.name not in fromconfignames]
1032
1032
1033 if fromconfig:
1033 if fromconfig:
1034 ui.write(_('repository lacks features recommended by '
1034 ui.write(_('repository lacks features recommended by '
1035 'current config options:\n\n'))
1035 'current config options:\n\n'))
1036 for i in fromconfig:
1036 for i in fromconfig:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
1038
1039 if onlydefault:
1039 if onlydefault:
1040 ui.write(_('repository lacks features used by the default '
1040 ui.write(_('repository lacks features used by the default '
1041 'config options:\n\n'))
1041 'config options:\n\n'))
1042 for i in onlydefault:
1042 for i in onlydefault:
1043 ui.write('%s\n %s\n\n' % (i.name, i.description))
1043 ui.write('%s\n %s\n\n' % (i.name, i.description))
1044
1044
1045 ui.write('\n')
1045 ui.write('\n')
1046 else:
1046 else:
1047 ui.write(_('(no feature deficiencies found in existing '
1047 ui.write(_('(no feature deficiencies found in existing '
1048 'repository)\n'))
1048 'repository)\n'))
1049
1049
1050 ui.write(_('performing an upgrade with "--run" will make the following '
1050 ui.write(_('performing an upgrade with "--run" will make the following '
1051 'changes:\n\n'))
1051 'changes:\n\n'))
1052
1052
1053 printrequirements()
1053 printrequirements()
1054 printupgradeactions()
1054 printupgradeactions()
1055
1055
1056 unusedoptimize = [i for i in improvements
1056 unusedoptimize = [i for i in improvements
1057 if i.name not in actions and i.type == optimisation]
1057 if i.name not in actions and i.type == optimisation]
1058 if unusedoptimize:
1058 if unusedoptimize:
1059 ui.write(_('additional optimizations are available by specifying '
1059 ui.write(_('additional optimizations are available by specifying '
1060 '"--optimize <name>":\n\n'))
1060 '"--optimize <name>":\n\n'))
1061 for i in unusedoptimize:
1061 for i in unusedoptimize:
1062 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1062 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1063 return
1063 return
1064
1064
1065 # Else we're in the run=true case.
1065 # Else we're in the run=true case.
1066 ui.write(_('upgrade will perform the following actions:\n\n'))
1066 ui.write(_('upgrade will perform the following actions:\n\n'))
1067 printrequirements()
1067 printrequirements()
1068 printupgradeactions()
1068 printupgradeactions()
1069
1069
1070 ui.write(_('beginning upgrade...\n'))
1070 ui.write(_('beginning upgrade...\n'))
1071 with repo.wlock():
1071 with repo.wlock():
1072 with repo.lock():
1072 with repo.lock():
1073 ui.write(_('repository locked and read-only\n'))
1073 ui.write(_('repository locked and read-only\n'))
1074 # Our strategy for upgrading the repository is to create a new,
1074 # Our strategy for upgrading the repository is to create a new,
1075 # temporary repository, write data to it, then do a swap of the
1075 # temporary repository, write data to it, then do a swap of the
1076 # data. There are less heavyweight ways to do this, but it is easier
1076 # data. There are less heavyweight ways to do this, but it is easier
1077 # to create a new repo object than to instantiate all the components
1077 # to create a new repo object than to instantiate all the components
1078 # (like the store) separately.
1078 # (like the store) separately.
1079 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1079 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1080 backuppath = None
1080 backuppath = None
1081 try:
1081 try:
1082 ui.write(_('creating temporary repository to stage migrated '
1082 ui.write(_('creating temporary repository to stage migrated '
1083 'data: %s\n') % tmppath)
1083 'data: %s\n') % tmppath)
1084 dstrepo = localrepo.localrepository(repo.baseui,
1084 dstrepo = localrepo.localrepository(repo.baseui,
1085 path=tmppath,
1085 path=tmppath,
1086 create=True)
1086 create=True)
1087
1087
1088 with dstrepo.wlock():
1088 with dstrepo.wlock():
1089 with dstrepo.lock():
1089 with dstrepo.lock():
1090 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1090 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1091 actions)
1091 actions)
1092
1092
1093 finally:
1093 finally:
1094 ui.write(_('removing temporary repository %s\n') % tmppath)
1094 ui.write(_('removing temporary repository %s\n') % tmppath)
1095 repo.vfs.rmtree(tmppath, forcibly=True)
1095 repo.vfs.rmtree(tmppath, forcibly=True)
1096
1096
1097 if backuppath:
1097 if backuppath:
1098 ui.warn(_('copy of old repository backed up at %s\n') %
1098 ui.warn(_('copy of old repository backed up at %s\n') %
1099 backuppath)
1099 backuppath)
1100 ui.warn(_('the old repository will not be deleted; remove '
1100 ui.warn(_('the old repository will not be deleted; remove '
1101 'it to free up disk space once the upgraded '
1101 'it to free up disk space once the upgraded '
1102 'repository is verified\n'))
1102 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now