##// END OF EJS Templates
repair: use context manager for lock management...
Matt Harbison -
r31626:0febf8e4 default
parent child Browse files
Show More
@@ -1,1102 +1,1097 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import stat
13 import stat
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import short
17 from .node import short
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 changelog,
21 changelog,
22 error,
22 error,
23 exchange,
23 exchange,
24 manifest,
24 manifest,
25 obsolete,
25 obsolete,
26 revlog,
26 revlog,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
34 cgversion = changegroup.safeversion(repo)
35
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
37 version=cgversion)
38 backupdir = "strip-backup"
38 backupdir = "strip-backup"
39 vfs = repo.vfs
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
41 vfs.mkdir(backupdir)
42
42
43 # Include a hash of all the nodes in the filename for uniqueness
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
48
49 comp = None
49 comp = None
50 if cgversion != '01':
50 if cgversion != '01':
51 bundletype = "HG20"
51 bundletype = "HG20"
52 if compress:
52 if compress:
53 comp = 'BZ'
53 comp = 'BZ'
54 elif compress:
54 elif compress:
55 bundletype = "HG10BZ"
55 bundletype = "HG10BZ"
56 else:
56 else:
57 bundletype = "HG10UN"
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
59 compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
72 s = set()
72 s = set()
73 def collectone(revlog):
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
76
76
77 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
78 for fname in files:
79 collectone(repo.file(fname))
79 collectone(repo.file(fname))
80
80
81 return s
81 return s
82
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
86 # Simple way to maintain backwards compatibility for this
87 # argument.
87 # argument.
88 if backup in ['none', 'strip']:
88 if backup in ['none', 'strip']:
89 backup = False
89 backup = False
90
90
91 repo = repo.unfiltered()
91 repo = repo.unfiltered()
92 repo.destroying()
92 repo.destroying()
93
93
94 cl = repo.changelog
94 cl = repo.changelog
95 # TODO handle undo of merge sets
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
99 striprev = min(striplist)
100
100
101 files = _collectfiles(repo, striprev)
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
115 tostrip.add(r)
116
116
117 if r not in tostrip:
117 if r not in tostrip:
118 saverevs.add(r)
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
121 saveheads = [cl.node(r) for r in saveheads]
122
122
123 # compute base nodes
123 # compute base nodes
124 if saverevs:
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
128 stripbases = [cl.node(r) for r in tostrip]
129
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
135 else:
136 newbmtarget = '.'
136 newbmtarget = '.'
137
137
138 bm = repo._bookmarks
138 bm = repo._bookmarks
139 updatebm = []
139 updatebm = []
140 for m in bm:
140 for m in bm:
141 rev = repo[bm[m]].rev()
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
142 if rev in tostrip:
143 updatebm.append(m)
143 updatebm.append(m)
144
144
145 # create a changegroup for all the branches we need to keep
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
146 backupfile = None
147 vfs = repo.vfs
147 vfs = repo.vfs
148 node = nodelist[-1]
148 node = nodelist[-1]
149 if backup:
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
154 vfs.join(backupfile))
155 tmpbundlefile = None
155 tmpbundlefile = None
156 if saveheads:
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
159 compress=False)
160
160
161 mfst = repo.manifestlog._revlog
161 mfst = repo.manifestlog._revlog
162
162
163 curtr = repo.currenttransaction()
163 curtr = repo.currenttransaction()
164 if curtr is not None:
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
165 del curtr # avoid carrying reference to transaction for nothing
166 msg = _('programming error: cannot strip from inside a transaction')
166 msg = _('programming error: cannot strip from inside a transaction')
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
168
168
169 try:
169 try:
170 with repo.transaction("strip") as tr:
170 with repo.transaction("strip") as tr:
171 offset = len(tr.entries)
171 offset = len(tr.entries)
172
172
173 tr.startgroup()
173 tr.startgroup()
174 cl.strip(striprev, tr)
174 cl.strip(striprev, tr)
175 mfst.strip(striprev, tr)
175 mfst.strip(striprev, tr)
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 # otherwise
177 # otherwise
178 for unencoded, encoded, size in repo.store.datafiles():
178 for unencoded, encoded, size in repo.store.datafiles():
179 if (unencoded.startswith('meta/') and
179 if (unencoded.startswith('meta/') and
180 unencoded.endswith('00manifest.i')):
180 unencoded.endswith('00manifest.i')):
181 dir = unencoded[5:-12]
181 dir = unencoded[5:-12]
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 for fn in files:
183 for fn in files:
184 repo.file(fn).strip(striprev, tr)
184 repo.file(fn).strip(striprev, tr)
185 tr.endgroup()
185 tr.endgroup()
186
186
187 for i in xrange(offset, len(tr.entries)):
187 for i in xrange(offset, len(tr.entries)):
188 file, troffset, ignore = tr.entries[i]
188 file, troffset, ignore = tr.entries[i]
189 with repo.svfs(file, 'a', checkambig=True) as fp:
189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 fp.truncate(troffset)
190 fp.truncate(troffset)
191 if troffset == 0:
191 if troffset == 0:
192 repo.store.markremoved(file)
192 repo.store.markremoved(file)
193
193
194 if tmpbundlefile:
194 if tmpbundlefile:
195 ui.note(_("adding branch\n"))
195 ui.note(_("adding branch\n"))
196 f = vfs.open(tmpbundlefile, "rb")
196 f = vfs.open(tmpbundlefile, "rb")
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 if not repo.ui.verbose:
198 if not repo.ui.verbose:
199 # silence internal shuffling chatter
199 # silence internal shuffling chatter
200 repo.ui.pushbuffer()
200 repo.ui.pushbuffer()
201 if isinstance(gen, bundle2.unbundle20):
201 if isinstance(gen, bundle2.unbundle20):
202 with repo.transaction('strip') as tr:
202 with repo.transaction('strip') as tr:
203 tr.hookargs = {'source': 'strip',
203 tr.hookargs = {'source': 'strip',
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 bundle2.applybundle(repo, gen, tr, source='strip',
205 bundle2.applybundle(repo, gen, tr, source='strip',
206 url='bundle:' + vfs.join(tmpbundlefile))
206 url='bundle:' + vfs.join(tmpbundlefile))
207 else:
207 else:
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 True)
209 True)
210 if not repo.ui.verbose:
210 if not repo.ui.verbose:
211 repo.ui.popbuffer()
211 repo.ui.popbuffer()
212 f.close()
212 f.close()
213 repo._phasecache.invalidate()
213 repo._phasecache.invalidate()
214
214
215 for m in updatebm:
215 for m in updatebm:
216 bm[m] = repo[newbmtarget].node()
216 bm[m] = repo[newbmtarget].node()
217 lock = tr = None
217
218 try:
218 with repo.lock():
219 lock = repo.lock()
219 with repo.transaction('repair') as tr:
220 tr = repo.transaction('repair')
220 bm.recordchange(tr)
221 bm.recordchange(tr)
222 tr.close()
223 finally:
224 tr.release()
225 lock.release()
226
221
227 # remove undo files
222 # remove undo files
228 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
229 try:
224 try:
230 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
231 except OSError as e:
226 except OSError as e:
232 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
235
230
236 except: # re-raises
231 except: # re-raises
237 if backupfile:
232 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
234 % vfs.join(backupfile))
240 if tmpbundlefile:
235 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
240 raise
246 else:
241 else:
247 if tmpbundlefile:
242 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
250
245
251 repo.destroyed()
246 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
247 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
248 # extensions can use it
254 return backupfile
249 return backupfile
255
250
256 def rebuildfncache(ui, repo):
251 def rebuildfncache(ui, repo):
257 """Rebuilds the fncache file from repo history.
252 """Rebuilds the fncache file from repo history.
258
253
259 Missing entries will be added. Extra entries will be removed.
254 Missing entries will be added. Extra entries will be removed.
260 """
255 """
261 repo = repo.unfiltered()
256 repo = repo.unfiltered()
262
257
263 if 'fncache' not in repo.requirements:
258 if 'fncache' not in repo.requirements:
264 ui.warn(_('(not rebuilding fncache because repository does not '
259 ui.warn(_('(not rebuilding fncache because repository does not '
265 'support fncache)\n'))
260 'support fncache)\n'))
266 return
261 return
267
262
268 with repo.lock():
263 with repo.lock():
269 fnc = repo.store.fncache
264 fnc = repo.store.fncache
270 # Trigger load of fncache.
265 # Trigger load of fncache.
271 if 'irrelevant' in fnc:
266 if 'irrelevant' in fnc:
272 pass
267 pass
273
268
274 oldentries = set(fnc.entries)
269 oldentries = set(fnc.entries)
275 newentries = set()
270 newentries = set()
276 seenfiles = set()
271 seenfiles = set()
277
272
278 repolen = len(repo)
273 repolen = len(repo)
279 for rev in repo:
274 for rev in repo:
280 ui.progress(_('rebuilding'), rev, total=repolen,
275 ui.progress(_('rebuilding'), rev, total=repolen,
281 unit=_('changesets'))
276 unit=_('changesets'))
282
277
283 ctx = repo[rev]
278 ctx = repo[rev]
284 for f in ctx.files():
279 for f in ctx.files():
285 # This is to minimize I/O.
280 # This is to minimize I/O.
286 if f in seenfiles:
281 if f in seenfiles:
287 continue
282 continue
288 seenfiles.add(f)
283 seenfiles.add(f)
289
284
290 i = 'data/%s.i' % f
285 i = 'data/%s.i' % f
291 d = 'data/%s.d' % f
286 d = 'data/%s.d' % f
292
287
293 if repo.store._exists(i):
288 if repo.store._exists(i):
294 newentries.add(i)
289 newentries.add(i)
295 if repo.store._exists(d):
290 if repo.store._exists(d):
296 newentries.add(d)
291 newentries.add(d)
297
292
298 ui.progress(_('rebuilding'), None)
293 ui.progress(_('rebuilding'), None)
299
294
300 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
301 for dir in util.dirs(seenfiles):
296 for dir in util.dirs(seenfiles):
302 i = 'meta/%s/00manifest.i' % dir
297 i = 'meta/%s/00manifest.i' % dir
303 d = 'meta/%s/00manifest.d' % dir
298 d = 'meta/%s/00manifest.d' % dir
304
299
305 if repo.store._exists(i):
300 if repo.store._exists(i):
306 newentries.add(i)
301 newentries.add(i)
307 if repo.store._exists(d):
302 if repo.store._exists(d):
308 newentries.add(d)
303 newentries.add(d)
309
304
310 addcount = len(newentries - oldentries)
305 addcount = len(newentries - oldentries)
311 removecount = len(oldentries - newentries)
306 removecount = len(oldentries - newentries)
312 for p in sorted(oldentries - newentries):
307 for p in sorted(oldentries - newentries):
313 ui.write(_('removing %s\n') % p)
308 ui.write(_('removing %s\n') % p)
314 for p in sorted(newentries - oldentries):
309 for p in sorted(newentries - oldentries):
315 ui.write(_('adding %s\n') % p)
310 ui.write(_('adding %s\n') % p)
316
311
317 if addcount or removecount:
312 if addcount or removecount:
318 ui.write(_('%d items added, %d removed from fncache\n') %
313 ui.write(_('%d items added, %d removed from fncache\n') %
319 (addcount, removecount))
314 (addcount, removecount))
320 fnc.entries = newentries
315 fnc.entries = newentries
321 fnc._dirty = True
316 fnc._dirty = True
322
317
323 with repo.transaction('fncache') as tr:
318 with repo.transaction('fncache') as tr:
324 fnc.write(tr)
319 fnc.write(tr)
325 else:
320 else:
326 ui.write(_('fncache already up to date\n'))
321 ui.write(_('fncache already up to date\n'))
327
322
328 def stripbmrevset(repo, mark):
323 def stripbmrevset(repo, mark):
329 """
324 """
330 The revset to strip when strip is called with -B mark
325 The revset to strip when strip is called with -B mark
331
326
332 Needs to live here so extensions can use it and wrap it even when strip is
327 Needs to live here so extensions can use it and wrap it even when strip is
333 not enabled or not present on a box.
328 not enabled or not present on a box.
334 """
329 """
335 return repo.revs("ancestors(bookmark(%s)) - "
330 return repo.revs("ancestors(bookmark(%s)) - "
336 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
337 "ancestors(bookmark() and not bookmark(%s))",
332 "ancestors(bookmark() and not bookmark(%s))",
338 mark, mark, mark)
333 mark, mark, mark)
339
334
340 def deleteobsmarkers(obsstore, indices):
335 def deleteobsmarkers(obsstore, indices):
341 """Delete some obsmarkers from obsstore and return how many were deleted
336 """Delete some obsmarkers from obsstore and return how many were deleted
342
337
343 'indices' is a list of ints which are the indices
338 'indices' is a list of ints which are the indices
344 of the markers to be deleted.
339 of the markers to be deleted.
345
340
346 Every invocation of this function completely rewrites the obsstore file,
341 Every invocation of this function completely rewrites the obsstore file,
347 skipping the markers we want to be removed. The new temporary file is
342 skipping the markers we want to be removed. The new temporary file is
348 created, remaining markers are written there and on .close() this file
343 created, remaining markers are written there and on .close() this file
349 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
350 if not indices:
345 if not indices:
351 # we don't want to rewrite the obsstore with the same content
346 # we don't want to rewrite the obsstore with the same content
352 return
347 return
353
348
354 left = []
349 left = []
355 current = obsstore._all
350 current = obsstore._all
356 n = 0
351 n = 0
357 for i, m in enumerate(current):
352 for i, m in enumerate(current):
358 if i in indices:
353 if i in indices:
359 n += 1
354 n += 1
360 continue
355 continue
361 left.append(m)
356 left.append(m)
362
357
363 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
364 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
365 newobsstorefile.write(bytes)
360 newobsstorefile.write(bytes)
366 newobsstorefile.close()
361 newobsstorefile.close()
367 return n
362 return n
368
363
369 def upgraderequiredsourcerequirements(repo):
364 def upgraderequiredsourcerequirements(repo):
370 """Obtain requirements required to be present to upgrade a repo.
365 """Obtain requirements required to be present to upgrade a repo.
371
366
372 An upgrade will not be allowed if the repository doesn't have the
367 An upgrade will not be allowed if the repository doesn't have the
373 requirements returned by this function.
368 requirements returned by this function.
374 """
369 """
375 return set([
370 return set([
376 # Introduced in Mercurial 0.9.2.
371 # Introduced in Mercurial 0.9.2.
377 'revlogv1',
372 'revlogv1',
378 # Introduced in Mercurial 0.9.2.
373 # Introduced in Mercurial 0.9.2.
379 'store',
374 'store',
380 ])
375 ])
381
376
382 def upgradeblocksourcerequirements(repo):
377 def upgradeblocksourcerequirements(repo):
383 """Obtain requirements that will prevent an upgrade from occurring.
378 """Obtain requirements that will prevent an upgrade from occurring.
384
379
385 An upgrade cannot be performed if the source repository contains a
380 An upgrade cannot be performed if the source repository contains a
386 requirements in the returned set.
381 requirements in the returned set.
387 """
382 """
388 return set([
383 return set([
389 # The upgrade code does not yet support these experimental features.
384 # The upgrade code does not yet support these experimental features.
390 # This is an artificial limitation.
385 # This is an artificial limitation.
391 'manifestv2',
386 'manifestv2',
392 'treemanifest',
387 'treemanifest',
393 # This was a precursor to generaldelta and was never enabled by default.
388 # This was a precursor to generaldelta and was never enabled by default.
394 # It should (hopefully) not exist in the wild.
389 # It should (hopefully) not exist in the wild.
395 'parentdelta',
390 'parentdelta',
396 # Upgrade should operate on the actual store, not the shared link.
391 # Upgrade should operate on the actual store, not the shared link.
397 'shared',
392 'shared',
398 ])
393 ])
399
394
400 def upgradesupportremovedrequirements(repo):
395 def upgradesupportremovedrequirements(repo):
401 """Obtain requirements that can be removed during an upgrade.
396 """Obtain requirements that can be removed during an upgrade.
402
397
403 If an upgrade were to create a repository that dropped a requirement,
398 If an upgrade were to create a repository that dropped a requirement,
404 the dropped requirement must appear in the returned set for the upgrade
399 the dropped requirement must appear in the returned set for the upgrade
405 to be allowed.
400 to be allowed.
406 """
401 """
407 return set()
402 return set()
408
403
409 def upgradesupporteddestrequirements(repo):
404 def upgradesupporteddestrequirements(repo):
410 """Obtain requirements that upgrade supports in the destination.
405 """Obtain requirements that upgrade supports in the destination.
411
406
412 If the result of the upgrade would create requirements not in this set,
407 If the result of the upgrade would create requirements not in this set,
413 the upgrade is disallowed.
408 the upgrade is disallowed.
414
409
415 Extensions should monkeypatch this to add their custom requirements.
410 Extensions should monkeypatch this to add their custom requirements.
416 """
411 """
417 return set([
412 return set([
418 'dotencode',
413 'dotencode',
419 'fncache',
414 'fncache',
420 'generaldelta',
415 'generaldelta',
421 'revlogv1',
416 'revlogv1',
422 'store',
417 'store',
423 ])
418 ])
424
419
425 def upgradeallowednewrequirements(repo):
420 def upgradeallowednewrequirements(repo):
426 """Obtain requirements that can be added to a repository during upgrade.
421 """Obtain requirements that can be added to a repository during upgrade.
427
422
428 This is used to disallow proposed requirements from being added when
423 This is used to disallow proposed requirements from being added when
429 they weren't present before.
424 they weren't present before.
430
425
431 We use a list of allowed requirement additions instead of a list of known
426 We use a list of allowed requirement additions instead of a list of known
432 bad additions because the whitelist approach is safer and will prevent
427 bad additions because the whitelist approach is safer and will prevent
433 future, unknown requirements from accidentally being added.
428 future, unknown requirements from accidentally being added.
434 """
429 """
435 return set([
430 return set([
436 'dotencode',
431 'dotencode',
437 'fncache',
432 'fncache',
438 'generaldelta',
433 'generaldelta',
439 ])
434 ])
440
435
441 deficiency = 'deficiency'
436 deficiency = 'deficiency'
442 optimisation = 'optimization'
437 optimisation = 'optimization'
443
438
444 class upgradeimprovement(object):
439 class upgradeimprovement(object):
445 """Represents an improvement that can be made as part of an upgrade.
440 """Represents an improvement that can be made as part of an upgrade.
446
441
447 The following attributes are defined on each instance:
442 The following attributes are defined on each instance:
448
443
449 name
444 name
450 Machine-readable string uniquely identifying this improvement. It
445 Machine-readable string uniquely identifying this improvement. It
451 will be mapped to an action later in the upgrade process.
446 will be mapped to an action later in the upgrade process.
452
447
453 type
448 type
454 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
449 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
455 problem. An optimization is an action (sometimes optional) that
450 problem. An optimization is an action (sometimes optional) that
456 can be taken to further improve the state of the repository.
451 can be taken to further improve the state of the repository.
457
452
458 description
453 description
459 Message intended for humans explaining the improvement in more detail,
454 Message intended for humans explaining the improvement in more detail,
460 including the implications of it. For ``deficiency`` types, should be
455 including the implications of it. For ``deficiency`` types, should be
461 worded in the present tense. For ``optimisation`` types, should be
456 worded in the present tense. For ``optimisation`` types, should be
462 worded in the future tense.
457 worded in the future tense.
463
458
464 upgrademessage
459 upgrademessage
465 Message intended for humans explaining what an upgrade addressing this
460 Message intended for humans explaining what an upgrade addressing this
466 issue will do. Should be worded in the future tense.
461 issue will do. Should be worded in the future tense.
467
462
468 fromdefault (``deficiency`` types only)
463 fromdefault (``deficiency`` types only)
469 Boolean indicating whether the current (deficient) state deviates
464 Boolean indicating whether the current (deficient) state deviates
470 from Mercurial's default configuration.
465 from Mercurial's default configuration.
471
466
472 fromconfig (``deficiency`` types only)
467 fromconfig (``deficiency`` types only)
473 Boolean indicating whether the current (deficient) state deviates
468 Boolean indicating whether the current (deficient) state deviates
474 from the current Mercurial configuration.
469 from the current Mercurial configuration.
475 """
470 """
476 def __init__(self, name, type, description, upgrademessage, **kwargs):
471 def __init__(self, name, type, description, upgrademessage, **kwargs):
477 self.name = name
472 self.name = name
478 self.type = type
473 self.type = type
479 self.description = description
474 self.description = description
480 self.upgrademessage = upgrademessage
475 self.upgrademessage = upgrademessage
481
476
482 for k, v in kwargs.items():
477 for k, v in kwargs.items():
483 setattr(self, k, v)
478 setattr(self, k, v)
484
479
485 def upgradefindimprovements(repo):
480 def upgradefindimprovements(repo):
486 """Determine improvements that can be made to the repo during upgrade.
481 """Determine improvements that can be made to the repo during upgrade.
487
482
488 Returns a list of ``upgradeimprovement`` describing repository deficiencies
483 Returns a list of ``upgradeimprovement`` describing repository deficiencies
489 and optimizations.
484 and optimizations.
490 """
485 """
491 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
486 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
492 from . import localrepo
487 from . import localrepo
493
488
494 newreporeqs = localrepo.newreporequirements(repo)
489 newreporeqs = localrepo.newreporequirements(repo)
495
490
496 improvements = []
491 improvements = []
497
492
498 # We could detect lack of revlogv1 and store here, but they were added
493 # We could detect lack of revlogv1 and store here, but they were added
499 # in 0.9.2 and we don't support upgrading repos without these
494 # in 0.9.2 and we don't support upgrading repos without these
500 # requirements, so let's not bother.
495 # requirements, so let's not bother.
501
496
502 if 'fncache' not in repo.requirements:
497 if 'fncache' not in repo.requirements:
503 improvements.append(upgradeimprovement(
498 improvements.append(upgradeimprovement(
504 name='fncache',
499 name='fncache',
505 type=deficiency,
500 type=deficiency,
506 description=_('long and reserved filenames may not work correctly; '
501 description=_('long and reserved filenames may not work correctly; '
507 'repository performance is sub-optimal'),
502 'repository performance is sub-optimal'),
508 upgrademessage=_('repository will be more resilient to storing '
503 upgrademessage=_('repository will be more resilient to storing '
509 'certain paths and performance of certain '
504 'certain paths and performance of certain '
510 'operations should be improved'),
505 'operations should be improved'),
511 fromdefault=True,
506 fromdefault=True,
512 fromconfig='fncache' in newreporeqs))
507 fromconfig='fncache' in newreporeqs))
513
508
514 if 'dotencode' not in repo.requirements:
509 if 'dotencode' not in repo.requirements:
515 improvements.append(upgradeimprovement(
510 improvements.append(upgradeimprovement(
516 name='dotencode',
511 name='dotencode',
517 type=deficiency,
512 type=deficiency,
518 description=_('storage of filenames beginning with a period or '
513 description=_('storage of filenames beginning with a period or '
519 'space may not work correctly'),
514 'space may not work correctly'),
520 upgrademessage=_('repository will be better able to store files '
515 upgrademessage=_('repository will be better able to store files '
521 'beginning with a space or period'),
516 'beginning with a space or period'),
522 fromdefault=True,
517 fromdefault=True,
523 fromconfig='dotencode' in newreporeqs))
518 fromconfig='dotencode' in newreporeqs))
524
519
525 if 'generaldelta' not in repo.requirements:
520 if 'generaldelta' not in repo.requirements:
526 improvements.append(upgradeimprovement(
521 improvements.append(upgradeimprovement(
527 name='generaldelta',
522 name='generaldelta',
528 type=deficiency,
523 type=deficiency,
529 description=_('deltas within internal storage are unable to '
524 description=_('deltas within internal storage are unable to '
530 'choose optimal revisions; repository is larger and '
525 'choose optimal revisions; repository is larger and '
531 'slower than it could be; interaction with other '
526 'slower than it could be; interaction with other '
532 'repositories may require extra network and CPU '
527 'repositories may require extra network and CPU '
533 'resources, making "hg push" and "hg pull" slower'),
528 'resources, making "hg push" and "hg pull" slower'),
534 upgrademessage=_('repository storage will be able to create '
529 upgrademessage=_('repository storage will be able to create '
535 'optimal deltas; new repository data will be '
530 'optimal deltas; new repository data will be '
536 'smaller and read times should decrease; '
531 'smaller and read times should decrease; '
537 'interacting with other repositories using this '
532 'interacting with other repositories using this '
538 'storage model should require less network and '
533 'storage model should require less network and '
539 'CPU resources, making "hg push" and "hg pull" '
534 'CPU resources, making "hg push" and "hg pull" '
540 'faster'),
535 'faster'),
541 fromdefault=True,
536 fromdefault=True,
542 fromconfig='generaldelta' in newreporeqs))
537 fromconfig='generaldelta' in newreporeqs))
543
538
544 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
539 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
545 # changelogs with deltas.
540 # changelogs with deltas.
546 cl = repo.changelog
541 cl = repo.changelog
547 for rev in cl:
542 for rev in cl:
548 chainbase = cl.chainbase(rev)
543 chainbase = cl.chainbase(rev)
549 if chainbase != rev:
544 if chainbase != rev:
550 improvements.append(upgradeimprovement(
545 improvements.append(upgradeimprovement(
551 name='removecldeltachain',
546 name='removecldeltachain',
552 type=deficiency,
547 type=deficiency,
553 description=_('changelog storage is using deltas instead of '
548 description=_('changelog storage is using deltas instead of '
554 'raw entries; changelog reading and any '
549 'raw entries; changelog reading and any '
555 'operation relying on changelog data are slower '
550 'operation relying on changelog data are slower '
556 'than they could be'),
551 'than they could be'),
557 upgrademessage=_('changelog storage will be reformated to '
552 upgrademessage=_('changelog storage will be reformated to '
558 'store raw entries; changelog reading will be '
553 'store raw entries; changelog reading will be '
559 'faster; changelog size may be reduced'),
554 'faster; changelog size may be reduced'),
560 fromdefault=True,
555 fromdefault=True,
561 fromconfig=True))
556 fromconfig=True))
562 break
557 break
563
558
564 # Now for the optimizations.
559 # Now for the optimizations.
565
560
566 # These are unconditionally added. There is logic later that figures out
561 # These are unconditionally added. There is logic later that figures out
567 # which ones to apply.
562 # which ones to apply.
568
563
569 improvements.append(upgradeimprovement(
564 improvements.append(upgradeimprovement(
570 name='redeltaparent',
565 name='redeltaparent',
571 type=optimisation,
566 type=optimisation,
572 description=_('deltas within internal storage will be recalculated to '
567 description=_('deltas within internal storage will be recalculated to '
573 'choose an optimal base revision where this was not '
568 'choose an optimal base revision where this was not '
574 'already done; the size of the repository may shrink and '
569 'already done; the size of the repository may shrink and '
575 'various operations may become faster; the first time '
570 'various operations may become faster; the first time '
576 'this optimization is performed could slow down upgrade '
571 'this optimization is performed could slow down upgrade '
577 'execution considerably; subsequent invocations should '
572 'execution considerably; subsequent invocations should '
578 'not run noticeably slower'),
573 'not run noticeably slower'),
579 upgrademessage=_('deltas within internal storage will choose a new '
574 upgrademessage=_('deltas within internal storage will choose a new '
580 'base revision if needed')))
575 'base revision if needed')))
581
576
582 improvements.append(upgradeimprovement(
577 improvements.append(upgradeimprovement(
583 name='redeltamultibase',
578 name='redeltamultibase',
584 type=optimisation,
579 type=optimisation,
585 description=_('deltas within internal storage will be recalculated '
580 description=_('deltas within internal storage will be recalculated '
586 'against multiple base revision and the smallest '
581 'against multiple base revision and the smallest '
587 'difference will be used; the size of the repository may '
582 'difference will be used; the size of the repository may '
588 'shrink significantly when there are many merges; this '
583 'shrink significantly when there are many merges; this '
589 'optimization will slow down execution in proportion to '
584 'optimization will slow down execution in proportion to '
590 'the number of merges in the repository and the amount '
585 'the number of merges in the repository and the amount '
591 'of files in the repository; this slow down should not '
586 'of files in the repository; this slow down should not '
592 'be significant unless there are tens of thousands of '
587 'be significant unless there are tens of thousands of '
593 'files and thousands of merges'),
588 'files and thousands of merges'),
594 upgrademessage=_('deltas within internal storage will choose an '
589 upgrademessage=_('deltas within internal storage will choose an '
595 'optimal delta by computing deltas against multiple '
590 'optimal delta by computing deltas against multiple '
596 'parents; may slow down execution time '
591 'parents; may slow down execution time '
597 'significantly')))
592 'significantly')))
598
593
599 improvements.append(upgradeimprovement(
594 improvements.append(upgradeimprovement(
600 name='redeltaall',
595 name='redeltaall',
601 type=optimisation,
596 type=optimisation,
602 description=_('deltas within internal storage will always be '
597 description=_('deltas within internal storage will always be '
603 'recalculated without reusing prior deltas; this will '
598 'recalculated without reusing prior deltas; this will '
604 'likely make execution run several times slower; this '
599 'likely make execution run several times slower; this '
605 'optimization is typically not needed'),
600 'optimization is typically not needed'),
606 upgrademessage=_('deltas within internal storage will be fully '
601 upgrademessage=_('deltas within internal storage will be fully '
607 'recomputed; this will likely drastically slow down '
602 'recomputed; this will likely drastically slow down '
608 'execution time')))
603 'execution time')))
609
604
610 return improvements
605 return improvements
611
606
612 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
607 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
613 optimize):
608 optimize):
614 """Determine upgrade actions that will be performed.
609 """Determine upgrade actions that will be performed.
615
610
616 Given a list of improvements as returned by ``upgradefindimprovements``,
611 Given a list of improvements as returned by ``upgradefindimprovements``,
617 determine the list of upgrade actions that will be performed.
612 determine the list of upgrade actions that will be performed.
618
613
619 The role of this function is to filter improvements if needed, apply
614 The role of this function is to filter improvements if needed, apply
620 recommended optimizations from the improvements list that make sense,
615 recommended optimizations from the improvements list that make sense,
621 etc.
616 etc.
622
617
623 Returns a list of action names.
618 Returns a list of action names.
624 """
619 """
625 newactions = []
620 newactions = []
626
621
627 knownreqs = upgradesupporteddestrequirements(repo)
622 knownreqs = upgradesupporteddestrequirements(repo)
628
623
629 for i in improvements:
624 for i in improvements:
630 name = i.name
625 name = i.name
631
626
632 # If the action is a requirement that doesn't show up in the
627 # If the action is a requirement that doesn't show up in the
633 # destination requirements, prune the action.
628 # destination requirements, prune the action.
634 if name in knownreqs and name not in destreqs:
629 if name in knownreqs and name not in destreqs:
635 continue
630 continue
636
631
637 if i.type == deficiency:
632 if i.type == deficiency:
638 newactions.append(name)
633 newactions.append(name)
639
634
640 newactions.extend(o for o in sorted(optimize) if o not in newactions)
635 newactions.extend(o for o in sorted(optimize) if o not in newactions)
641
636
642 # FUTURE consider adding some optimizations here for certain transitions.
637 # FUTURE consider adding some optimizations here for certain transitions.
643 # e.g. adding generaldelta could schedule parent redeltas.
638 # e.g. adding generaldelta could schedule parent redeltas.
644
639
645 return newactions
640 return newactions
646
641
647 def _revlogfrompath(repo, path):
642 def _revlogfrompath(repo, path):
648 """Obtain a revlog from a repo path.
643 """Obtain a revlog from a repo path.
649
644
650 An instance of the appropriate class is returned.
645 An instance of the appropriate class is returned.
651 """
646 """
652 if path == '00changelog.i':
647 if path == '00changelog.i':
653 return changelog.changelog(repo.svfs)
648 return changelog.changelog(repo.svfs)
654 elif path.endswith('00manifest.i'):
649 elif path.endswith('00manifest.i'):
655 mandir = path[:-len('00manifest.i')]
650 mandir = path[:-len('00manifest.i')]
656 return manifest.manifestrevlog(repo.svfs, dir=mandir)
651 return manifest.manifestrevlog(repo.svfs, dir=mandir)
657 else:
652 else:
658 # Filelogs don't do anything special with settings. So we can use a
653 # Filelogs don't do anything special with settings. So we can use a
659 # vanilla revlog.
654 # vanilla revlog.
660 return revlog.revlog(repo.svfs, path)
655 return revlog.revlog(repo.svfs, path)
661
656
662 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
657 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
663 """Copy revlogs between 2 repos."""
658 """Copy revlogs between 2 repos."""
664 revcount = 0
659 revcount = 0
665 srcsize = 0
660 srcsize = 0
666 srcrawsize = 0
661 srcrawsize = 0
667 dstsize = 0
662 dstsize = 0
668 fcount = 0
663 fcount = 0
669 frevcount = 0
664 frevcount = 0
670 fsrcsize = 0
665 fsrcsize = 0
671 frawsize = 0
666 frawsize = 0
672 fdstsize = 0
667 fdstsize = 0
673 mcount = 0
668 mcount = 0
674 mrevcount = 0
669 mrevcount = 0
675 msrcsize = 0
670 msrcsize = 0
676 mrawsize = 0
671 mrawsize = 0
677 mdstsize = 0
672 mdstsize = 0
678 crevcount = 0
673 crevcount = 0
679 csrcsize = 0
674 csrcsize = 0
680 crawsize = 0
675 crawsize = 0
681 cdstsize = 0
676 cdstsize = 0
682
677
683 # Perform a pass to collect metadata. This validates we can open all
678 # Perform a pass to collect metadata. This validates we can open all
684 # source files and allows a unified progress bar to be displayed.
679 # source files and allows a unified progress bar to be displayed.
685 for unencoded, encoded, size in srcrepo.store.walk():
680 for unencoded, encoded, size in srcrepo.store.walk():
686 if unencoded.endswith('.d'):
681 if unencoded.endswith('.d'):
687 continue
682 continue
688
683
689 rl = _revlogfrompath(srcrepo, unencoded)
684 rl = _revlogfrompath(srcrepo, unencoded)
690 revcount += len(rl)
685 revcount += len(rl)
691
686
692 datasize = 0
687 datasize = 0
693 rawsize = 0
688 rawsize = 0
694 idx = rl.index
689 idx = rl.index
695 for rev in rl:
690 for rev in rl:
696 e = idx[rev]
691 e = idx[rev]
697 datasize += e[1]
692 datasize += e[1]
698 rawsize += e[2]
693 rawsize += e[2]
699
694
700 srcsize += datasize
695 srcsize += datasize
701 srcrawsize += rawsize
696 srcrawsize += rawsize
702
697
703 # This is for the separate progress bars.
698 # This is for the separate progress bars.
704 if isinstance(rl, changelog.changelog):
699 if isinstance(rl, changelog.changelog):
705 crevcount += len(rl)
700 crevcount += len(rl)
706 csrcsize += datasize
701 csrcsize += datasize
707 crawsize += rawsize
702 crawsize += rawsize
708 elif isinstance(rl, manifest.manifestrevlog):
703 elif isinstance(rl, manifest.manifestrevlog):
709 mcount += 1
704 mcount += 1
710 mrevcount += len(rl)
705 mrevcount += len(rl)
711 msrcsize += datasize
706 msrcsize += datasize
712 mrawsize += rawsize
707 mrawsize += rawsize
713 elif isinstance(rl, revlog.revlog):
708 elif isinstance(rl, revlog.revlog):
714 fcount += 1
709 fcount += 1
715 frevcount += len(rl)
710 frevcount += len(rl)
716 fsrcsize += datasize
711 fsrcsize += datasize
717 frawsize += rawsize
712 frawsize += rawsize
718
713
719 if not revcount:
714 if not revcount:
720 return
715 return
721
716
722 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
717 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
723 '%d in changelog)\n') %
718 '%d in changelog)\n') %
724 (revcount, frevcount, mrevcount, crevcount))
719 (revcount, frevcount, mrevcount, crevcount))
725 ui.write(_('migrating %s in store; %s tracked data\n') % (
720 ui.write(_('migrating %s in store; %s tracked data\n') % (
726 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
721 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
727
722
728 # Used to keep track of progress.
723 # Used to keep track of progress.
729 progress = []
724 progress = []
730 def oncopiedrevision(rl, rev, node):
725 def oncopiedrevision(rl, rev, node):
731 progress[1] += 1
726 progress[1] += 1
732 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
727 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
733
728
734 # Do the actual copying.
729 # Do the actual copying.
735 # FUTURE this operation can be farmed off to worker processes.
730 # FUTURE this operation can be farmed off to worker processes.
736 seen = set()
731 seen = set()
737 for unencoded, encoded, size in srcrepo.store.walk():
732 for unencoded, encoded, size in srcrepo.store.walk():
738 if unencoded.endswith('.d'):
733 if unencoded.endswith('.d'):
739 continue
734 continue
740
735
741 oldrl = _revlogfrompath(srcrepo, unencoded)
736 oldrl = _revlogfrompath(srcrepo, unencoded)
742 newrl = _revlogfrompath(dstrepo, unencoded)
737 newrl = _revlogfrompath(dstrepo, unencoded)
743
738
744 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
739 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
745 ui.write(_('finished migrating %d manifest revisions across %d '
740 ui.write(_('finished migrating %d manifest revisions across %d '
746 'manifests; change in size: %s\n') %
741 'manifests; change in size: %s\n') %
747 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
742 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
748
743
749 ui.write(_('migrating changelog containing %d revisions '
744 ui.write(_('migrating changelog containing %d revisions '
750 '(%s in store; %s tracked data)\n') %
745 '(%s in store; %s tracked data)\n') %
751 (crevcount, util.bytecount(csrcsize),
746 (crevcount, util.bytecount(csrcsize),
752 util.bytecount(crawsize)))
747 util.bytecount(crawsize)))
753 seen.add('c')
748 seen.add('c')
754 progress[:] = [_('changelog revisions'), 0, crevcount]
749 progress[:] = [_('changelog revisions'), 0, crevcount]
755 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
750 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
756 ui.write(_('finished migrating %d filelog revisions across %d '
751 ui.write(_('finished migrating %d filelog revisions across %d '
757 'filelogs; change in size: %s\n') %
752 'filelogs; change in size: %s\n') %
758 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
753 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
759
754
760 ui.write(_('migrating %d manifests containing %d revisions '
755 ui.write(_('migrating %d manifests containing %d revisions '
761 '(%s in store; %s tracked data)\n') %
756 '(%s in store; %s tracked data)\n') %
762 (mcount, mrevcount, util.bytecount(msrcsize),
757 (mcount, mrevcount, util.bytecount(msrcsize),
763 util.bytecount(mrawsize)))
758 util.bytecount(mrawsize)))
764 seen.add('m')
759 seen.add('m')
765 progress[:] = [_('manifest revisions'), 0, mrevcount]
760 progress[:] = [_('manifest revisions'), 0, mrevcount]
766 elif 'f' not in seen:
761 elif 'f' not in seen:
767 ui.write(_('migrating %d filelogs containing %d revisions '
762 ui.write(_('migrating %d filelogs containing %d revisions '
768 '(%s in store; %s tracked data)\n') %
763 '(%s in store; %s tracked data)\n') %
769 (fcount, frevcount, util.bytecount(fsrcsize),
764 (fcount, frevcount, util.bytecount(fsrcsize),
770 util.bytecount(frawsize)))
765 util.bytecount(frawsize)))
771 seen.add('f')
766 seen.add('f')
772 progress[:] = [_('file revisions'), 0, frevcount]
767 progress[:] = [_('file revisions'), 0, frevcount]
773
768
774 ui.progress(progress[0], progress[1], total=progress[2])
769 ui.progress(progress[0], progress[1], total=progress[2])
775
770
776 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
771 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
777 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
772 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
778 deltareuse=deltareuse,
773 deltareuse=deltareuse,
779 aggressivemergedeltas=aggressivemergedeltas)
774 aggressivemergedeltas=aggressivemergedeltas)
780
775
781 datasize = 0
776 datasize = 0
782 idx = newrl.index
777 idx = newrl.index
783 for rev in newrl:
778 for rev in newrl:
784 datasize += idx[rev][1]
779 datasize += idx[rev][1]
785
780
786 dstsize += datasize
781 dstsize += datasize
787
782
788 if isinstance(newrl, changelog.changelog):
783 if isinstance(newrl, changelog.changelog):
789 cdstsize += datasize
784 cdstsize += datasize
790 elif isinstance(newrl, manifest.manifestrevlog):
785 elif isinstance(newrl, manifest.manifestrevlog):
791 mdstsize += datasize
786 mdstsize += datasize
792 else:
787 else:
793 fdstsize += datasize
788 fdstsize += datasize
794
789
795 ui.progress(progress[0], None)
790 ui.progress(progress[0], None)
796
791
797 ui.write(_('finished migrating %d changelog revisions; change in size: '
792 ui.write(_('finished migrating %d changelog revisions; change in size: '
798 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
793 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
799
794
800 ui.write(_('finished migrating %d total revisions; total change in store '
795 ui.write(_('finished migrating %d total revisions; total change in store '
801 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
796 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
802
797
803 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
798 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
804 """Determine whether to copy a store file during upgrade.
799 """Determine whether to copy a store file during upgrade.
805
800
806 This function is called when migrating store files from ``srcrepo`` to
801 This function is called when migrating store files from ``srcrepo`` to
807 ``dstrepo`` as part of upgrading a repository.
802 ``dstrepo`` as part of upgrading a repository.
808
803
809 Args:
804 Args:
810 srcrepo: repo we are copying from
805 srcrepo: repo we are copying from
811 dstrepo: repo we are copying to
806 dstrepo: repo we are copying to
812 requirements: set of requirements for ``dstrepo``
807 requirements: set of requirements for ``dstrepo``
813 path: store file being examined
808 path: store file being examined
814 mode: the ``ST_MODE`` file type of ``path``
809 mode: the ``ST_MODE`` file type of ``path``
815 st: ``stat`` data structure for ``path``
810 st: ``stat`` data structure for ``path``
816
811
817 Function should return ``True`` if the file is to be copied.
812 Function should return ``True`` if the file is to be copied.
818 """
813 """
819 # Skip revlogs.
814 # Skip revlogs.
820 if path.endswith(('.i', '.d')):
815 if path.endswith(('.i', '.d')):
821 return False
816 return False
822 # Skip transaction related files.
817 # Skip transaction related files.
823 if path.startswith('undo'):
818 if path.startswith('undo'):
824 return False
819 return False
825 # Only copy regular files.
820 # Only copy regular files.
826 if mode != stat.S_IFREG:
821 if mode != stat.S_IFREG:
827 return False
822 return False
828 # Skip other skipped files.
823 # Skip other skipped files.
829 if path in ('lock', 'fncache'):
824 if path in ('lock', 'fncache'):
830 return False
825 return False
831
826
832 return True
827 return True
833
828
834 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
829 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
835 """Hook point for extensions to perform additional actions during upgrade.
830 """Hook point for extensions to perform additional actions during upgrade.
836
831
837 This function is called after revlogs and store files have been copied but
832 This function is called after revlogs and store files have been copied but
838 before the new store is swapped into the original location.
833 before the new store is swapped into the original location.
839 """
834 """
840
835
841 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
836 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
842 """Do the low-level work of upgrading a repository.
837 """Do the low-level work of upgrading a repository.
843
838
844 The upgrade is effectively performed as a copy between a source
839 The upgrade is effectively performed as a copy between a source
845 repository and a temporary destination repository.
840 repository and a temporary destination repository.
846
841
847 The source repository is unmodified for as long as possible so the
842 The source repository is unmodified for as long as possible so the
848 upgrade can abort at any time without causing loss of service for
843 upgrade can abort at any time without causing loss of service for
849 readers and without corrupting the source repository.
844 readers and without corrupting the source repository.
850 """
845 """
851 assert srcrepo.currentwlock()
846 assert srcrepo.currentwlock()
852 assert dstrepo.currentwlock()
847 assert dstrepo.currentwlock()
853
848
854 ui.write(_('(it is safe to interrupt this process any time before '
849 ui.write(_('(it is safe to interrupt this process any time before '
855 'data migration completes)\n'))
850 'data migration completes)\n'))
856
851
857 if 'redeltaall' in actions:
852 if 'redeltaall' in actions:
858 deltareuse = revlog.revlog.DELTAREUSENEVER
853 deltareuse = revlog.revlog.DELTAREUSENEVER
859 elif 'redeltaparent' in actions:
854 elif 'redeltaparent' in actions:
860 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
855 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
861 elif 'redeltamultibase' in actions:
856 elif 'redeltamultibase' in actions:
862 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
857 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
863 else:
858 else:
864 deltareuse = revlog.revlog.DELTAREUSEALWAYS
859 deltareuse = revlog.revlog.DELTAREUSEALWAYS
865
860
866 with dstrepo.transaction('upgrade') as tr:
861 with dstrepo.transaction('upgrade') as tr:
867 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
862 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
868 'redeltamultibase' in actions)
863 'redeltamultibase' in actions)
869
864
870 # Now copy other files in the store directory.
865 # Now copy other files in the store directory.
871 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
866 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
872 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
867 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
873 p, kind, st):
868 p, kind, st):
874 continue
869 continue
875
870
876 srcrepo.ui.write(_('copying %s\n') % p)
871 srcrepo.ui.write(_('copying %s\n') % p)
877 src = srcrepo.store.vfs.join(p)
872 src = srcrepo.store.vfs.join(p)
878 dst = dstrepo.store.vfs.join(p)
873 dst = dstrepo.store.vfs.join(p)
879 util.copyfile(src, dst, copystat=True)
874 util.copyfile(src, dst, copystat=True)
880
875
881 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
876 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
882
877
883 ui.write(_('data fully migrated to temporary repository\n'))
878 ui.write(_('data fully migrated to temporary repository\n'))
884
879
885 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
880 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
886 backupvfs = vfsmod.vfs(backuppath)
881 backupvfs = vfsmod.vfs(backuppath)
887
882
888 # Make a backup of requires file first, as it is the first to be modified.
883 # Make a backup of requires file first, as it is the first to be modified.
889 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
884 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
890
885
891 # We install an arbitrary requirement that clients must not support
886 # We install an arbitrary requirement that clients must not support
892 # as a mechanism to lock out new clients during the data swap. This is
887 # as a mechanism to lock out new clients during the data swap. This is
893 # better than allowing a client to continue while the repository is in
888 # better than allowing a client to continue while the repository is in
894 # an inconsistent state.
889 # an inconsistent state.
895 ui.write(_('marking source repository as being upgraded; clients will be '
890 ui.write(_('marking source repository as being upgraded; clients will be '
896 'unable to read from repository\n'))
891 'unable to read from repository\n'))
897 scmutil.writerequires(srcrepo.vfs,
892 scmutil.writerequires(srcrepo.vfs,
898 srcrepo.requirements | set(['upgradeinprogress']))
893 srcrepo.requirements | set(['upgradeinprogress']))
899
894
900 ui.write(_('starting in-place swap of repository data\n'))
895 ui.write(_('starting in-place swap of repository data\n'))
901 ui.write(_('replaced files will be backed up at %s\n') %
896 ui.write(_('replaced files will be backed up at %s\n') %
902 backuppath)
897 backuppath)
903
898
904 # Now swap in the new store directory. Doing it as a rename should make
899 # Now swap in the new store directory. Doing it as a rename should make
905 # the operation nearly instantaneous and atomic (at least in well-behaved
900 # the operation nearly instantaneous and atomic (at least in well-behaved
906 # environments).
901 # environments).
907 ui.write(_('replacing store...\n'))
902 ui.write(_('replacing store...\n'))
908 tstart = util.timer()
903 tstart = util.timer()
909 util.rename(srcrepo.spath, backupvfs.join('store'))
904 util.rename(srcrepo.spath, backupvfs.join('store'))
910 util.rename(dstrepo.spath, srcrepo.spath)
905 util.rename(dstrepo.spath, srcrepo.spath)
911 elapsed = util.timer() - tstart
906 elapsed = util.timer() - tstart
912 ui.write(_('store replacement complete; repository was inconsistent for '
907 ui.write(_('store replacement complete; repository was inconsistent for '
913 '%0.1fs\n') % elapsed)
908 '%0.1fs\n') % elapsed)
914
909
915 # We first write the requirements file. Any new requirements will lock
910 # We first write the requirements file. Any new requirements will lock
916 # out legacy clients.
911 # out legacy clients.
917 ui.write(_('finalizing requirements file and making repository readable '
912 ui.write(_('finalizing requirements file and making repository readable '
918 'again\n'))
913 'again\n'))
919 scmutil.writerequires(srcrepo.vfs, requirements)
914 scmutil.writerequires(srcrepo.vfs, requirements)
920
915
921 # The lock file from the old store won't be removed because nothing has a
916 # The lock file from the old store won't be removed because nothing has a
922 # reference to its new location. So clean it up manually. Alternatively, we
917 # reference to its new location. So clean it up manually. Alternatively, we
923 # could update srcrepo.svfs and other variables to point to the new
918 # could update srcrepo.svfs and other variables to point to the new
924 # location. This is simpler.
919 # location. This is simpler.
925 backupvfs.unlink('store/lock')
920 backupvfs.unlink('store/lock')
926
921
927 return backuppath
922 return backuppath
928
923
929 def upgraderepo(ui, repo, run=False, optimize=None):
924 def upgraderepo(ui, repo, run=False, optimize=None):
930 """Upgrade a repository in place."""
925 """Upgrade a repository in place."""
931 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
926 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
932 from . import localrepo
927 from . import localrepo
933
928
934 optimize = set(optimize or [])
929 optimize = set(optimize or [])
935 repo = repo.unfiltered()
930 repo = repo.unfiltered()
936
931
937 # Ensure the repository can be upgraded.
932 # Ensure the repository can be upgraded.
938 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
933 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
939 if missingreqs:
934 if missingreqs:
940 raise error.Abort(_('cannot upgrade repository; requirement '
935 raise error.Abort(_('cannot upgrade repository; requirement '
941 'missing: %s') % _(', ').join(sorted(missingreqs)))
936 'missing: %s') % _(', ').join(sorted(missingreqs)))
942
937
943 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
938 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
944 if blockedreqs:
939 if blockedreqs:
945 raise error.Abort(_('cannot upgrade repository; unsupported source '
940 raise error.Abort(_('cannot upgrade repository; unsupported source '
946 'requirement: %s') %
941 'requirement: %s') %
947 _(', ').join(sorted(blockedreqs)))
942 _(', ').join(sorted(blockedreqs)))
948
943
949 # FUTURE there is potentially a need to control the wanted requirements via
944 # FUTURE there is potentially a need to control the wanted requirements via
950 # command arguments or via an extension hook point.
945 # command arguments or via an extension hook point.
951 newreqs = localrepo.newreporequirements(repo)
946 newreqs = localrepo.newreporequirements(repo)
952
947
953 noremovereqs = (repo.requirements - newreqs -
948 noremovereqs = (repo.requirements - newreqs -
954 upgradesupportremovedrequirements(repo))
949 upgradesupportremovedrequirements(repo))
955 if noremovereqs:
950 if noremovereqs:
956 raise error.Abort(_('cannot upgrade repository; requirement would be '
951 raise error.Abort(_('cannot upgrade repository; requirement would be '
957 'removed: %s') % _(', ').join(sorted(noremovereqs)))
952 'removed: %s') % _(', ').join(sorted(noremovereqs)))
958
953
959 noaddreqs = (newreqs - repo.requirements -
954 noaddreqs = (newreqs - repo.requirements -
960 upgradeallowednewrequirements(repo))
955 upgradeallowednewrequirements(repo))
961 if noaddreqs:
956 if noaddreqs:
962 raise error.Abort(_('cannot upgrade repository; do not support adding '
957 raise error.Abort(_('cannot upgrade repository; do not support adding '
963 'requirement: %s') %
958 'requirement: %s') %
964 _(', ').join(sorted(noaddreqs)))
959 _(', ').join(sorted(noaddreqs)))
965
960
966 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
961 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
967 if unsupportedreqs:
962 if unsupportedreqs:
968 raise error.Abort(_('cannot upgrade repository; do not support '
963 raise error.Abort(_('cannot upgrade repository; do not support '
969 'destination requirement: %s') %
964 'destination requirement: %s') %
970 _(', ').join(sorted(unsupportedreqs)))
965 _(', ').join(sorted(unsupportedreqs)))
971
966
972 # Find and validate all improvements that can be made.
967 # Find and validate all improvements that can be made.
973 improvements = upgradefindimprovements(repo)
968 improvements = upgradefindimprovements(repo)
974 for i in improvements:
969 for i in improvements:
975 if i.type not in (deficiency, optimisation):
970 if i.type not in (deficiency, optimisation):
976 raise error.Abort(_('unexpected improvement type %s for %s') % (
971 raise error.Abort(_('unexpected improvement type %s for %s') % (
977 i.type, i.name))
972 i.type, i.name))
978
973
979 # Validate arguments.
974 # Validate arguments.
980 unknownoptimize = optimize - set(i.name for i in improvements
975 unknownoptimize = optimize - set(i.name for i in improvements
981 if i.type == optimisation)
976 if i.type == optimisation)
982 if unknownoptimize:
977 if unknownoptimize:
983 raise error.Abort(_('unknown optimization action requested: %s') %
978 raise error.Abort(_('unknown optimization action requested: %s') %
984 ', '.join(sorted(unknownoptimize)),
979 ', '.join(sorted(unknownoptimize)),
985 hint=_('run without arguments to see valid '
980 hint=_('run without arguments to see valid '
986 'optimizations'))
981 'optimizations'))
987
982
988 actions = upgradedetermineactions(repo, improvements, repo.requirements,
983 actions = upgradedetermineactions(repo, improvements, repo.requirements,
989 newreqs, optimize)
984 newreqs, optimize)
990
985
991 def printrequirements():
986 def printrequirements():
992 ui.write(_('requirements\n'))
987 ui.write(_('requirements\n'))
993 ui.write(_(' preserved: %s\n') %
988 ui.write(_(' preserved: %s\n') %
994 _(', ').join(sorted(newreqs & repo.requirements)))
989 _(', ').join(sorted(newreqs & repo.requirements)))
995
990
996 if repo.requirements - newreqs:
991 if repo.requirements - newreqs:
997 ui.write(_(' removed: %s\n') %
992 ui.write(_(' removed: %s\n') %
998 _(', ').join(sorted(repo.requirements - newreqs)))
993 _(', ').join(sorted(repo.requirements - newreqs)))
999
994
1000 if newreqs - repo.requirements:
995 if newreqs - repo.requirements:
1001 ui.write(_(' added: %s\n') %
996 ui.write(_(' added: %s\n') %
1002 _(', ').join(sorted(newreqs - repo.requirements)))
997 _(', ').join(sorted(newreqs - repo.requirements)))
1003
998
1004 ui.write('\n')
999 ui.write('\n')
1005
1000
1006 def printupgradeactions():
1001 def printupgradeactions():
1007 for action in actions:
1002 for action in actions:
1008 for i in improvements:
1003 for i in improvements:
1009 if i.name == action:
1004 if i.name == action:
1010 ui.write('%s\n %s\n\n' %
1005 ui.write('%s\n %s\n\n' %
1011 (i.name, i.upgrademessage))
1006 (i.name, i.upgrademessage))
1012
1007
1013 if not run:
1008 if not run:
1014 fromdefault = []
1009 fromdefault = []
1015 fromconfig = []
1010 fromconfig = []
1016 optimizations = []
1011 optimizations = []
1017
1012
1018 for i in improvements:
1013 for i in improvements:
1019 assert i.type in (deficiency, optimisation)
1014 assert i.type in (deficiency, optimisation)
1020 if i.type == deficiency:
1015 if i.type == deficiency:
1021 if i.fromdefault:
1016 if i.fromdefault:
1022 fromdefault.append(i)
1017 fromdefault.append(i)
1023 if i.fromconfig:
1018 if i.fromconfig:
1024 fromconfig.append(i)
1019 fromconfig.append(i)
1025 else:
1020 else:
1026 optimizations.append(i)
1021 optimizations.append(i)
1027
1022
1028 if fromdefault or fromconfig:
1023 if fromdefault or fromconfig:
1029 fromconfignames = set(x.name for x in fromconfig)
1024 fromconfignames = set(x.name for x in fromconfig)
1030 onlydefault = [i for i in fromdefault
1025 onlydefault = [i for i in fromdefault
1031 if i.name not in fromconfignames]
1026 if i.name not in fromconfignames]
1032
1027
1033 if fromconfig:
1028 if fromconfig:
1034 ui.write(_('repository lacks features recommended by '
1029 ui.write(_('repository lacks features recommended by '
1035 'current config options:\n\n'))
1030 'current config options:\n\n'))
1036 for i in fromconfig:
1031 for i in fromconfig:
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1032 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038
1033
1039 if onlydefault:
1034 if onlydefault:
1040 ui.write(_('repository lacks features used by the default '
1035 ui.write(_('repository lacks features used by the default '
1041 'config options:\n\n'))
1036 'config options:\n\n'))
1042 for i in onlydefault:
1037 for i in onlydefault:
1043 ui.write('%s\n %s\n\n' % (i.name, i.description))
1038 ui.write('%s\n %s\n\n' % (i.name, i.description))
1044
1039
1045 ui.write('\n')
1040 ui.write('\n')
1046 else:
1041 else:
1047 ui.write(_('(no feature deficiencies found in existing '
1042 ui.write(_('(no feature deficiencies found in existing '
1048 'repository)\n'))
1043 'repository)\n'))
1049
1044
1050 ui.write(_('performing an upgrade with "--run" will make the following '
1045 ui.write(_('performing an upgrade with "--run" will make the following '
1051 'changes:\n\n'))
1046 'changes:\n\n'))
1052
1047
1053 printrequirements()
1048 printrequirements()
1054 printupgradeactions()
1049 printupgradeactions()
1055
1050
1056 unusedoptimize = [i for i in improvements
1051 unusedoptimize = [i for i in improvements
1057 if i.name not in actions and i.type == optimisation]
1052 if i.name not in actions and i.type == optimisation]
1058 if unusedoptimize:
1053 if unusedoptimize:
1059 ui.write(_('additional optimizations are available by specifying '
1054 ui.write(_('additional optimizations are available by specifying '
1060 '"--optimize <name>":\n\n'))
1055 '"--optimize <name>":\n\n'))
1061 for i in unusedoptimize:
1056 for i in unusedoptimize:
1062 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1057 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1063 return
1058 return
1064
1059
1065 # Else we're in the run=true case.
1060 # Else we're in the run=true case.
1066 ui.write(_('upgrade will perform the following actions:\n\n'))
1061 ui.write(_('upgrade will perform the following actions:\n\n'))
1067 printrequirements()
1062 printrequirements()
1068 printupgradeactions()
1063 printupgradeactions()
1069
1064
1070 ui.write(_('beginning upgrade...\n'))
1065 ui.write(_('beginning upgrade...\n'))
1071 with repo.wlock():
1066 with repo.wlock():
1072 with repo.lock():
1067 with repo.lock():
1073 ui.write(_('repository locked and read-only\n'))
1068 ui.write(_('repository locked and read-only\n'))
1074 # Our strategy for upgrading the repository is to create a new,
1069 # Our strategy for upgrading the repository is to create a new,
1075 # temporary repository, write data to it, then do a swap of the
1070 # temporary repository, write data to it, then do a swap of the
1076 # data. There are less heavyweight ways to do this, but it is easier
1071 # data. There are less heavyweight ways to do this, but it is easier
1077 # to create a new repo object than to instantiate all the components
1072 # to create a new repo object than to instantiate all the components
1078 # (like the store) separately.
1073 # (like the store) separately.
1079 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1074 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1080 backuppath = None
1075 backuppath = None
1081 try:
1076 try:
1082 ui.write(_('creating temporary repository to stage migrated '
1077 ui.write(_('creating temporary repository to stage migrated '
1083 'data: %s\n') % tmppath)
1078 'data: %s\n') % tmppath)
1084 dstrepo = localrepo.localrepository(repo.baseui,
1079 dstrepo = localrepo.localrepository(repo.baseui,
1085 path=tmppath,
1080 path=tmppath,
1086 create=True)
1081 create=True)
1087
1082
1088 with dstrepo.wlock():
1083 with dstrepo.wlock():
1089 with dstrepo.lock():
1084 with dstrepo.lock():
1090 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1085 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1091 actions)
1086 actions)
1092
1087
1093 finally:
1088 finally:
1094 ui.write(_('removing temporary repository %s\n') % tmppath)
1089 ui.write(_('removing temporary repository %s\n') % tmppath)
1095 repo.vfs.rmtree(tmppath, forcibly=True)
1090 repo.vfs.rmtree(tmppath, forcibly=True)
1096
1091
1097 if backuppath:
1092 if backuppath:
1098 ui.warn(_('copy of old repository backed up at %s\n') %
1093 ui.warn(_('copy of old repository backed up at %s\n') %
1099 backuppath)
1094 backuppath)
1100 ui.warn(_('the old repository will not be deleted; remove '
1095 ui.warn(_('the old repository will not be deleted; remove '
1101 'it to free up disk space once the upgraded '
1096 'it to free up disk space once the upgraded '
1102 'repository is verified\n'))
1097 'repository is verified\n'))
General Comments 0
You need to be logged in to leave comments. Login now