##// END OF EJS Templates
repair: use ProgrammingError
Jun Wu -
r31645:7095e783 default
parent child Browse files
Show More
@@ -1,1097 +1,1096 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import stat
13 import stat
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import short
17 from .node import short
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 changelog,
21 changelog,
22 error,
22 error,
23 exchange,
23 exchange,
24 manifest,
24 manifest,
25 obsolete,
25 obsolete,
26 revlog,
26 revlog,
27 scmutil,
27 scmutil,
28 util,
28 util,
29 vfs as vfsmod,
29 vfs as vfsmod,
30 )
30 )
31
31
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
32 def _bundle(repo, bases, heads, node, suffix, compress=True):
33 """create a bundle with the specified revisions as a backup"""
33 """create a bundle with the specified revisions as a backup"""
34 cgversion = changegroup.safeversion(repo)
34 cgversion = changegroup.safeversion(repo)
35
35
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
36 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 version=cgversion)
37 version=cgversion)
38 backupdir = "strip-backup"
38 backupdir = "strip-backup"
39 vfs = repo.vfs
39 vfs = repo.vfs
40 if not vfs.isdir(backupdir):
40 if not vfs.isdir(backupdir):
41 vfs.mkdir(backupdir)
41 vfs.mkdir(backupdir)
42
42
43 # Include a hash of all the nodes in the filename for uniqueness
43 # Include a hash of all the nodes in the filename for uniqueness
44 allcommits = repo.set('%ln::%ln', bases, heads)
44 allcommits = repo.set('%ln::%ln', bases, heads)
45 allhashes = sorted(c.hex() for c in allcommits)
45 allhashes = sorted(c.hex() for c in allcommits)
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
46 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48
48
49 comp = None
49 comp = None
50 if cgversion != '01':
50 if cgversion != '01':
51 bundletype = "HG20"
51 bundletype = "HG20"
52 if compress:
52 if compress:
53 comp = 'BZ'
53 comp = 'BZ'
54 elif compress:
54 elif compress:
55 bundletype = "HG10BZ"
55 bundletype = "HG10BZ"
56 else:
56 else:
57 bundletype = "HG10UN"
57 bundletype = "HG10UN"
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
58 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
59 compression=comp)
59 compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
72 s = set()
72 s = set()
73 def collectone(revlog):
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
76
76
77 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
78 for fname in files:
79 collectone(repo.file(fname))
79 collectone(repo.file(fname))
80
80
81 return s
81 return s
82
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function operates within a transaction of its own, but does
84 # This function operates within a transaction of its own, but does
85 # not take any lock on the repo.
85 # not take any lock on the repo.
86 # Simple way to maintain backwards compatibility for this
86 # Simple way to maintain backwards compatibility for this
87 # argument.
87 # argument.
88 if backup in ['none', 'strip']:
88 if backup in ['none', 'strip']:
89 backup = False
89 backup = False
90
90
91 repo = repo.unfiltered()
91 repo = repo.unfiltered()
92 repo.destroying()
92 repo.destroying()
93
93
94 cl = repo.changelog
94 cl = repo.changelog
95 # TODO handle undo of merge sets
95 # TODO handle undo of merge sets
96 if isinstance(nodelist, str):
96 if isinstance(nodelist, str):
97 nodelist = [nodelist]
97 nodelist = [nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
98 striplist = [cl.rev(node) for node in nodelist]
99 striprev = min(striplist)
99 striprev = min(striplist)
100
100
101 files = _collectfiles(repo, striprev)
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
103
104 # Some revisions with rev > striprev may not be descendants of striprev.
104 # Some revisions with rev > striprev may not be descendants of striprev.
105 # We have to find these revisions and put them in a bundle, so that
105 # We have to find these revisions and put them in a bundle, so that
106 # we can restore them after the truncations.
106 # we can restore them after the truncations.
107 # To create the bundle we use repo.changegroupsubset which requires
107 # To create the bundle we use repo.changegroupsubset which requires
108 # the list of heads and bases of the set of interesting revisions.
108 # the list of heads and bases of the set of interesting revisions.
109 # (head = revision in the set that has no descendant in the set;
109 # (head = revision in the set that has no descendant in the set;
110 # base = revision in the set that has no ancestor in the set)
110 # base = revision in the set that has no ancestor in the set)
111 tostrip = set(striplist)
111 tostrip = set(striplist)
112 saveheads = set(saverevs)
112 saveheads = set(saverevs)
113 for r in cl.revs(start=striprev + 1):
113 for r in cl.revs(start=striprev + 1):
114 if any(p in tostrip for p in cl.parentrevs(r)):
114 if any(p in tostrip for p in cl.parentrevs(r)):
115 tostrip.add(r)
115 tostrip.add(r)
116
116
117 if r not in tostrip:
117 if r not in tostrip:
118 saverevs.add(r)
118 saverevs.add(r)
119 saveheads.difference_update(cl.parentrevs(r))
119 saveheads.difference_update(cl.parentrevs(r))
120 saveheads.add(r)
120 saveheads.add(r)
121 saveheads = [cl.node(r) for r in saveheads]
121 saveheads = [cl.node(r) for r in saveheads]
122
122
123 # compute base nodes
123 # compute base nodes
124 if saverevs:
124 if saverevs:
125 descendants = set(cl.descendants(saverevs))
125 descendants = set(cl.descendants(saverevs))
126 saverevs.difference_update(descendants)
126 saverevs.difference_update(descendants)
127 savebases = [cl.node(r) for r in saverevs]
127 savebases = [cl.node(r) for r in saverevs]
128 stripbases = [cl.node(r) for r in tostrip]
128 stripbases = [cl.node(r) for r in tostrip]
129
129
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
130 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
131 # is much faster
131 # is much faster
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
132 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
133 if newbmtarget:
133 if newbmtarget:
134 newbmtarget = repo[newbmtarget.first()].node()
134 newbmtarget = repo[newbmtarget.first()].node()
135 else:
135 else:
136 newbmtarget = '.'
136 newbmtarget = '.'
137
137
138 bm = repo._bookmarks
138 bm = repo._bookmarks
139 updatebm = []
139 updatebm = []
140 for m in bm:
140 for m in bm:
141 rev = repo[bm[m]].rev()
141 rev = repo[bm[m]].rev()
142 if rev in tostrip:
142 if rev in tostrip:
143 updatebm.append(m)
143 updatebm.append(m)
144
144
145 # create a changegroup for all the branches we need to keep
145 # create a changegroup for all the branches we need to keep
146 backupfile = None
146 backupfile = None
147 vfs = repo.vfs
147 vfs = repo.vfs
148 node = nodelist[-1]
148 node = nodelist[-1]
149 if backup:
149 if backup:
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
150 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
151 repo.ui.status(_("saved backup bundle to %s\n") %
151 repo.ui.status(_("saved backup bundle to %s\n") %
152 vfs.join(backupfile))
152 vfs.join(backupfile))
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
153 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
154 vfs.join(backupfile))
154 vfs.join(backupfile))
155 tmpbundlefile = None
155 tmpbundlefile = None
156 if saveheads:
156 if saveheads:
157 # do not compress temporary bundle if we remove it from disk later
157 # do not compress temporary bundle if we remove it from disk later
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
158 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
159 compress=False)
159 compress=False)
160
160
161 mfst = repo.manifestlog._revlog
161 mfst = repo.manifestlog._revlog
162
162
163 curtr = repo.currenttransaction()
163 curtr = repo.currenttransaction()
164 if curtr is not None:
164 if curtr is not None:
165 del curtr # avoid carrying reference to transaction for nothing
165 del curtr # avoid carrying reference to transaction for nothing
166 msg = _('programming error: cannot strip from inside a transaction')
166 raise error.ProgrammingError('cannot strip from inside a transaction')
167 raise error.Abort(msg, hint=_('contact your extension maintainer'))
168
167
169 try:
168 try:
170 with repo.transaction("strip") as tr:
169 with repo.transaction("strip") as tr:
171 offset = len(tr.entries)
170 offset = len(tr.entries)
172
171
173 tr.startgroup()
172 tr.startgroup()
174 cl.strip(striprev, tr)
173 cl.strip(striprev, tr)
175 mfst.strip(striprev, tr)
174 mfst.strip(striprev, tr)
176 if 'treemanifest' in repo.requirements: # safe but unnecessary
175 if 'treemanifest' in repo.requirements: # safe but unnecessary
177 # otherwise
176 # otherwise
178 for unencoded, encoded, size in repo.store.datafiles():
177 for unencoded, encoded, size in repo.store.datafiles():
179 if (unencoded.startswith('meta/') and
178 if (unencoded.startswith('meta/') and
180 unencoded.endswith('00manifest.i')):
179 unencoded.endswith('00manifest.i')):
181 dir = unencoded[5:-12]
180 dir = unencoded[5:-12]
182 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
181 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
183 for fn in files:
182 for fn in files:
184 repo.file(fn).strip(striprev, tr)
183 repo.file(fn).strip(striprev, tr)
185 tr.endgroup()
184 tr.endgroup()
186
185
187 for i in xrange(offset, len(tr.entries)):
186 for i in xrange(offset, len(tr.entries)):
188 file, troffset, ignore = tr.entries[i]
187 file, troffset, ignore = tr.entries[i]
189 with repo.svfs(file, 'a', checkambig=True) as fp:
188 with repo.svfs(file, 'a', checkambig=True) as fp:
190 fp.truncate(troffset)
189 fp.truncate(troffset)
191 if troffset == 0:
190 if troffset == 0:
192 repo.store.markremoved(file)
191 repo.store.markremoved(file)
193
192
194 if tmpbundlefile:
193 if tmpbundlefile:
195 ui.note(_("adding branch\n"))
194 ui.note(_("adding branch\n"))
196 f = vfs.open(tmpbundlefile, "rb")
195 f = vfs.open(tmpbundlefile, "rb")
197 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
196 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
198 if not repo.ui.verbose:
197 if not repo.ui.verbose:
199 # silence internal shuffling chatter
198 # silence internal shuffling chatter
200 repo.ui.pushbuffer()
199 repo.ui.pushbuffer()
201 if isinstance(gen, bundle2.unbundle20):
200 if isinstance(gen, bundle2.unbundle20):
202 with repo.transaction('strip') as tr:
201 with repo.transaction('strip') as tr:
203 tr.hookargs = {'source': 'strip',
202 tr.hookargs = {'source': 'strip',
204 'url': 'bundle:' + vfs.join(tmpbundlefile)}
203 'url': 'bundle:' + vfs.join(tmpbundlefile)}
205 bundle2.applybundle(repo, gen, tr, source='strip',
204 bundle2.applybundle(repo, gen, tr, source='strip',
206 url='bundle:' + vfs.join(tmpbundlefile))
205 url='bundle:' + vfs.join(tmpbundlefile))
207 else:
206 else:
208 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
207 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
209 True)
208 True)
210 if not repo.ui.verbose:
209 if not repo.ui.verbose:
211 repo.ui.popbuffer()
210 repo.ui.popbuffer()
212 f.close()
211 f.close()
213 repo._phasecache.invalidate()
212 repo._phasecache.invalidate()
214
213
215 for m in updatebm:
214 for m in updatebm:
216 bm[m] = repo[newbmtarget].node()
215 bm[m] = repo[newbmtarget].node()
217
216
218 with repo.lock():
217 with repo.lock():
219 with repo.transaction('repair') as tr:
218 with repo.transaction('repair') as tr:
220 bm.recordchange(tr)
219 bm.recordchange(tr)
221
220
222 # remove undo files
221 # remove undo files
223 for undovfs, undofile in repo.undofiles():
222 for undovfs, undofile in repo.undofiles():
224 try:
223 try:
225 undovfs.unlink(undofile)
224 undovfs.unlink(undofile)
226 except OSError as e:
225 except OSError as e:
227 if e.errno != errno.ENOENT:
226 if e.errno != errno.ENOENT:
228 ui.warn(_('error removing %s: %s\n') %
227 ui.warn(_('error removing %s: %s\n') %
229 (undovfs.join(undofile), str(e)))
228 (undovfs.join(undofile), str(e)))
230
229
231 except: # re-raises
230 except: # re-raises
232 if backupfile:
231 if backupfile:
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 % vfs.join(backupfile))
233 % vfs.join(backupfile))
235 if tmpbundlefile:
234 if tmpbundlefile:
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 % vfs.join(tmpbundlefile))
236 % vfs.join(tmpbundlefile))
238 ui.warn(_("(fix the problem, then recover the changesets with "
237 ui.warn(_("(fix the problem, then recover the changesets with "
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 raise
239 raise
241 else:
240 else:
242 if tmpbundlefile:
241 if tmpbundlefile:
243 # Remove temporary bundle only if there were no exceptions
242 # Remove temporary bundle only if there were no exceptions
244 vfs.unlink(tmpbundlefile)
243 vfs.unlink(tmpbundlefile)
245
244
246 repo.destroyed()
245 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
246 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
247 # extensions can use it
249 return backupfile
248 return backupfile
250
249
251 def rebuildfncache(ui, repo):
250 def rebuildfncache(ui, repo):
252 """Rebuilds the fncache file from repo history.
251 """Rebuilds the fncache file from repo history.
253
252
254 Missing entries will be added. Extra entries will be removed.
253 Missing entries will be added. Extra entries will be removed.
255 """
254 """
256 repo = repo.unfiltered()
255 repo = repo.unfiltered()
257
256
258 if 'fncache' not in repo.requirements:
257 if 'fncache' not in repo.requirements:
259 ui.warn(_('(not rebuilding fncache because repository does not '
258 ui.warn(_('(not rebuilding fncache because repository does not '
260 'support fncache)\n'))
259 'support fncache)\n'))
261 return
260 return
262
261
263 with repo.lock():
262 with repo.lock():
264 fnc = repo.store.fncache
263 fnc = repo.store.fncache
265 # Trigger load of fncache.
264 # Trigger load of fncache.
266 if 'irrelevant' in fnc:
265 if 'irrelevant' in fnc:
267 pass
266 pass
268
267
269 oldentries = set(fnc.entries)
268 oldentries = set(fnc.entries)
270 newentries = set()
269 newentries = set()
271 seenfiles = set()
270 seenfiles = set()
272
271
273 repolen = len(repo)
272 repolen = len(repo)
274 for rev in repo:
273 for rev in repo:
275 ui.progress(_('rebuilding'), rev, total=repolen,
274 ui.progress(_('rebuilding'), rev, total=repolen,
276 unit=_('changesets'))
275 unit=_('changesets'))
277
276
278 ctx = repo[rev]
277 ctx = repo[rev]
279 for f in ctx.files():
278 for f in ctx.files():
280 # This is to minimize I/O.
279 # This is to minimize I/O.
281 if f in seenfiles:
280 if f in seenfiles:
282 continue
281 continue
283 seenfiles.add(f)
282 seenfiles.add(f)
284
283
285 i = 'data/%s.i' % f
284 i = 'data/%s.i' % f
286 d = 'data/%s.d' % f
285 d = 'data/%s.d' % f
287
286
288 if repo.store._exists(i):
287 if repo.store._exists(i):
289 newentries.add(i)
288 newentries.add(i)
290 if repo.store._exists(d):
289 if repo.store._exists(d):
291 newentries.add(d)
290 newentries.add(d)
292
291
293 ui.progress(_('rebuilding'), None)
292 ui.progress(_('rebuilding'), None)
294
293
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
294 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 for dir in util.dirs(seenfiles):
295 for dir in util.dirs(seenfiles):
297 i = 'meta/%s/00manifest.i' % dir
296 i = 'meta/%s/00manifest.i' % dir
298 d = 'meta/%s/00manifest.d' % dir
297 d = 'meta/%s/00manifest.d' % dir
299
298
300 if repo.store._exists(i):
299 if repo.store._exists(i):
301 newentries.add(i)
300 newentries.add(i)
302 if repo.store._exists(d):
301 if repo.store._exists(d):
303 newentries.add(d)
302 newentries.add(d)
304
303
305 addcount = len(newentries - oldentries)
304 addcount = len(newentries - oldentries)
306 removecount = len(oldentries - newentries)
305 removecount = len(oldentries - newentries)
307 for p in sorted(oldentries - newentries):
306 for p in sorted(oldentries - newentries):
308 ui.write(_('removing %s\n') % p)
307 ui.write(_('removing %s\n') % p)
309 for p in sorted(newentries - oldentries):
308 for p in sorted(newentries - oldentries):
310 ui.write(_('adding %s\n') % p)
309 ui.write(_('adding %s\n') % p)
311
310
312 if addcount or removecount:
311 if addcount or removecount:
313 ui.write(_('%d items added, %d removed from fncache\n') %
312 ui.write(_('%d items added, %d removed from fncache\n') %
314 (addcount, removecount))
313 (addcount, removecount))
315 fnc.entries = newentries
314 fnc.entries = newentries
316 fnc._dirty = True
315 fnc._dirty = True
317
316
318 with repo.transaction('fncache') as tr:
317 with repo.transaction('fncache') as tr:
319 fnc.write(tr)
318 fnc.write(tr)
320 else:
319 else:
321 ui.write(_('fncache already up to date\n'))
320 ui.write(_('fncache already up to date\n'))
322
321
323 def stripbmrevset(repo, mark):
322 def stripbmrevset(repo, mark):
324 """
323 """
325 The revset to strip when strip is called with -B mark
324 The revset to strip when strip is called with -B mark
326
325
327 Needs to live here so extensions can use it and wrap it even when strip is
326 Needs to live here so extensions can use it and wrap it even when strip is
328 not enabled or not present on a box.
327 not enabled or not present on a box.
329 """
328 """
330 return repo.revs("ancestors(bookmark(%s)) - "
329 return repo.revs("ancestors(bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
330 "ancestors(head() and not bookmark(%s)) - "
332 "ancestors(bookmark() and not bookmark(%s))",
331 "ancestors(bookmark() and not bookmark(%s))",
333 mark, mark, mark)
332 mark, mark, mark)
334
333
335 def deleteobsmarkers(obsstore, indices):
334 def deleteobsmarkers(obsstore, indices):
336 """Delete some obsmarkers from obsstore and return how many were deleted
335 """Delete some obsmarkers from obsstore and return how many were deleted
337
336
338 'indices' is a list of ints which are the indices
337 'indices' is a list of ints which are the indices
339 of the markers to be deleted.
338 of the markers to be deleted.
340
339
341 Every invocation of this function completely rewrites the obsstore file,
340 Every invocation of this function completely rewrites the obsstore file,
342 skipping the markers we want to be removed. The new temporary file is
341 skipping the markers we want to be removed. The new temporary file is
343 created, remaining markers are written there and on .close() this file
342 created, remaining markers are written there and on .close() this file
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
343 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 if not indices:
344 if not indices:
346 # we don't want to rewrite the obsstore with the same content
345 # we don't want to rewrite the obsstore with the same content
347 return
346 return
348
347
349 left = []
348 left = []
350 current = obsstore._all
349 current = obsstore._all
351 n = 0
350 n = 0
352 for i, m in enumerate(current):
351 for i, m in enumerate(current):
353 if i in indices:
352 if i in indices:
354 n += 1
353 n += 1
355 continue
354 continue
356 left.append(m)
355 left.append(m)
357
356
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
357 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
358 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 newobsstorefile.write(bytes)
359 newobsstorefile.write(bytes)
361 newobsstorefile.close()
360 newobsstorefile.close()
362 return n
361 return n
363
362
364 def upgraderequiredsourcerequirements(repo):
363 def upgraderequiredsourcerequirements(repo):
365 """Obtain requirements required to be present to upgrade a repo.
364 """Obtain requirements required to be present to upgrade a repo.
366
365
367 An upgrade will not be allowed if the repository doesn't have the
366 An upgrade will not be allowed if the repository doesn't have the
368 requirements returned by this function.
367 requirements returned by this function.
369 """
368 """
370 return set([
369 return set([
371 # Introduced in Mercurial 0.9.2.
370 # Introduced in Mercurial 0.9.2.
372 'revlogv1',
371 'revlogv1',
373 # Introduced in Mercurial 0.9.2.
372 # Introduced in Mercurial 0.9.2.
374 'store',
373 'store',
375 ])
374 ])
376
375
377 def upgradeblocksourcerequirements(repo):
376 def upgradeblocksourcerequirements(repo):
378 """Obtain requirements that will prevent an upgrade from occurring.
377 """Obtain requirements that will prevent an upgrade from occurring.
379
378
380 An upgrade cannot be performed if the source repository contains a
379 An upgrade cannot be performed if the source repository contains a
381 requirements in the returned set.
380 requirements in the returned set.
382 """
381 """
383 return set([
382 return set([
384 # The upgrade code does not yet support these experimental features.
383 # The upgrade code does not yet support these experimental features.
385 # This is an artificial limitation.
384 # This is an artificial limitation.
386 'manifestv2',
385 'manifestv2',
387 'treemanifest',
386 'treemanifest',
388 # This was a precursor to generaldelta and was never enabled by default.
387 # This was a precursor to generaldelta and was never enabled by default.
389 # It should (hopefully) not exist in the wild.
388 # It should (hopefully) not exist in the wild.
390 'parentdelta',
389 'parentdelta',
391 # Upgrade should operate on the actual store, not the shared link.
390 # Upgrade should operate on the actual store, not the shared link.
392 'shared',
391 'shared',
393 ])
392 ])
394
393
395 def upgradesupportremovedrequirements(repo):
394 def upgradesupportremovedrequirements(repo):
396 """Obtain requirements that can be removed during an upgrade.
395 """Obtain requirements that can be removed during an upgrade.
397
396
398 If an upgrade were to create a repository that dropped a requirement,
397 If an upgrade were to create a repository that dropped a requirement,
399 the dropped requirement must appear in the returned set for the upgrade
398 the dropped requirement must appear in the returned set for the upgrade
400 to be allowed.
399 to be allowed.
401 """
400 """
402 return set()
401 return set()
403
402
404 def upgradesupporteddestrequirements(repo):
403 def upgradesupporteddestrequirements(repo):
405 """Obtain requirements that upgrade supports in the destination.
404 """Obtain requirements that upgrade supports in the destination.
406
405
407 If the result of the upgrade would create requirements not in this set,
406 If the result of the upgrade would create requirements not in this set,
408 the upgrade is disallowed.
407 the upgrade is disallowed.
409
408
410 Extensions should monkeypatch this to add their custom requirements.
409 Extensions should monkeypatch this to add their custom requirements.
411 """
410 """
412 return set([
411 return set([
413 'dotencode',
412 'dotencode',
414 'fncache',
413 'fncache',
415 'generaldelta',
414 'generaldelta',
416 'revlogv1',
415 'revlogv1',
417 'store',
416 'store',
418 ])
417 ])
419
418
420 def upgradeallowednewrequirements(repo):
419 def upgradeallowednewrequirements(repo):
421 """Obtain requirements that can be added to a repository during upgrade.
420 """Obtain requirements that can be added to a repository during upgrade.
422
421
423 This is used to disallow proposed requirements from being added when
422 This is used to disallow proposed requirements from being added when
424 they weren't present before.
423 they weren't present before.
425
424
426 We use a list of allowed requirement additions instead of a list of known
425 We use a list of allowed requirement additions instead of a list of known
427 bad additions because the whitelist approach is safer and will prevent
426 bad additions because the whitelist approach is safer and will prevent
428 future, unknown requirements from accidentally being added.
427 future, unknown requirements from accidentally being added.
429 """
428 """
430 return set([
429 return set([
431 'dotencode',
430 'dotencode',
432 'fncache',
431 'fncache',
433 'generaldelta',
432 'generaldelta',
434 ])
433 ])
435
434
436 deficiency = 'deficiency'
435 deficiency = 'deficiency'
437 optimisation = 'optimization'
436 optimisation = 'optimization'
438
437
439 class upgradeimprovement(object):
438 class upgradeimprovement(object):
440 """Represents an improvement that can be made as part of an upgrade.
439 """Represents an improvement that can be made as part of an upgrade.
441
440
442 The following attributes are defined on each instance:
441 The following attributes are defined on each instance:
443
442
444 name
443 name
445 Machine-readable string uniquely identifying this improvement. It
444 Machine-readable string uniquely identifying this improvement. It
446 will be mapped to an action later in the upgrade process.
445 will be mapped to an action later in the upgrade process.
447
446
448 type
447 type
449 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
448 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
450 problem. An optimization is an action (sometimes optional) that
449 problem. An optimization is an action (sometimes optional) that
451 can be taken to further improve the state of the repository.
450 can be taken to further improve the state of the repository.
452
451
453 description
452 description
454 Message intended for humans explaining the improvement in more detail,
453 Message intended for humans explaining the improvement in more detail,
455 including the implications of it. For ``deficiency`` types, should be
454 including the implications of it. For ``deficiency`` types, should be
456 worded in the present tense. For ``optimisation`` types, should be
455 worded in the present tense. For ``optimisation`` types, should be
457 worded in the future tense.
456 worded in the future tense.
458
457
459 upgrademessage
458 upgrademessage
460 Message intended for humans explaining what an upgrade addressing this
459 Message intended for humans explaining what an upgrade addressing this
461 issue will do. Should be worded in the future tense.
460 issue will do. Should be worded in the future tense.
462
461
463 fromdefault (``deficiency`` types only)
462 fromdefault (``deficiency`` types only)
464 Boolean indicating whether the current (deficient) state deviates
463 Boolean indicating whether the current (deficient) state deviates
465 from Mercurial's default configuration.
464 from Mercurial's default configuration.
466
465
467 fromconfig (``deficiency`` types only)
466 fromconfig (``deficiency`` types only)
468 Boolean indicating whether the current (deficient) state deviates
467 Boolean indicating whether the current (deficient) state deviates
469 from the current Mercurial configuration.
468 from the current Mercurial configuration.
470 """
469 """
471 def __init__(self, name, type, description, upgrademessage, **kwargs):
470 def __init__(self, name, type, description, upgrademessage, **kwargs):
472 self.name = name
471 self.name = name
473 self.type = type
472 self.type = type
474 self.description = description
473 self.description = description
475 self.upgrademessage = upgrademessage
474 self.upgrademessage = upgrademessage
476
475
477 for k, v in kwargs.items():
476 for k, v in kwargs.items():
478 setattr(self, k, v)
477 setattr(self, k, v)
479
478
480 def upgradefindimprovements(repo):
479 def upgradefindimprovements(repo):
481 """Determine improvements that can be made to the repo during upgrade.
480 """Determine improvements that can be made to the repo during upgrade.
482
481
483 Returns a list of ``upgradeimprovement`` describing repository deficiencies
482 Returns a list of ``upgradeimprovement`` describing repository deficiencies
484 and optimizations.
483 and optimizations.
485 """
484 """
486 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
485 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
487 from . import localrepo
486 from . import localrepo
488
487
489 newreporeqs = localrepo.newreporequirements(repo)
488 newreporeqs = localrepo.newreporequirements(repo)
490
489
491 improvements = []
490 improvements = []
492
491
493 # We could detect lack of revlogv1 and store here, but they were added
492 # We could detect lack of revlogv1 and store here, but they were added
494 # in 0.9.2 and we don't support upgrading repos without these
493 # in 0.9.2 and we don't support upgrading repos without these
495 # requirements, so let's not bother.
494 # requirements, so let's not bother.
496
495
497 if 'fncache' not in repo.requirements:
496 if 'fncache' not in repo.requirements:
498 improvements.append(upgradeimprovement(
497 improvements.append(upgradeimprovement(
499 name='fncache',
498 name='fncache',
500 type=deficiency,
499 type=deficiency,
501 description=_('long and reserved filenames may not work correctly; '
500 description=_('long and reserved filenames may not work correctly; '
502 'repository performance is sub-optimal'),
501 'repository performance is sub-optimal'),
503 upgrademessage=_('repository will be more resilient to storing '
502 upgrademessage=_('repository will be more resilient to storing '
504 'certain paths and performance of certain '
503 'certain paths and performance of certain '
505 'operations should be improved'),
504 'operations should be improved'),
506 fromdefault=True,
505 fromdefault=True,
507 fromconfig='fncache' in newreporeqs))
506 fromconfig='fncache' in newreporeqs))
508
507
509 if 'dotencode' not in repo.requirements:
508 if 'dotencode' not in repo.requirements:
510 improvements.append(upgradeimprovement(
509 improvements.append(upgradeimprovement(
511 name='dotencode',
510 name='dotencode',
512 type=deficiency,
511 type=deficiency,
513 description=_('storage of filenames beginning with a period or '
512 description=_('storage of filenames beginning with a period or '
514 'space may not work correctly'),
513 'space may not work correctly'),
515 upgrademessage=_('repository will be better able to store files '
514 upgrademessage=_('repository will be better able to store files '
516 'beginning with a space or period'),
515 'beginning with a space or period'),
517 fromdefault=True,
516 fromdefault=True,
518 fromconfig='dotencode' in newreporeqs))
517 fromconfig='dotencode' in newreporeqs))
519
518
520 if 'generaldelta' not in repo.requirements:
519 if 'generaldelta' not in repo.requirements:
521 improvements.append(upgradeimprovement(
520 improvements.append(upgradeimprovement(
522 name='generaldelta',
521 name='generaldelta',
523 type=deficiency,
522 type=deficiency,
524 description=_('deltas within internal storage are unable to '
523 description=_('deltas within internal storage are unable to '
525 'choose optimal revisions; repository is larger and '
524 'choose optimal revisions; repository is larger and '
526 'slower than it could be; interaction with other '
525 'slower than it could be; interaction with other '
527 'repositories may require extra network and CPU '
526 'repositories may require extra network and CPU '
528 'resources, making "hg push" and "hg pull" slower'),
527 'resources, making "hg push" and "hg pull" slower'),
529 upgrademessage=_('repository storage will be able to create '
528 upgrademessage=_('repository storage will be able to create '
530 'optimal deltas; new repository data will be '
529 'optimal deltas; new repository data will be '
531 'smaller and read times should decrease; '
530 'smaller and read times should decrease; '
532 'interacting with other repositories using this '
531 'interacting with other repositories using this '
533 'storage model should require less network and '
532 'storage model should require less network and '
534 'CPU resources, making "hg push" and "hg pull" '
533 'CPU resources, making "hg push" and "hg pull" '
535 'faster'),
534 'faster'),
536 fromdefault=True,
535 fromdefault=True,
537 fromconfig='generaldelta' in newreporeqs))
536 fromconfig='generaldelta' in newreporeqs))
538
537
539 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
538 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
540 # changelogs with deltas.
539 # changelogs with deltas.
541 cl = repo.changelog
540 cl = repo.changelog
542 for rev in cl:
541 for rev in cl:
543 chainbase = cl.chainbase(rev)
542 chainbase = cl.chainbase(rev)
544 if chainbase != rev:
543 if chainbase != rev:
545 improvements.append(upgradeimprovement(
544 improvements.append(upgradeimprovement(
546 name='removecldeltachain',
545 name='removecldeltachain',
547 type=deficiency,
546 type=deficiency,
548 description=_('changelog storage is using deltas instead of '
547 description=_('changelog storage is using deltas instead of '
549 'raw entries; changelog reading and any '
548 'raw entries; changelog reading and any '
550 'operation relying on changelog data are slower '
549 'operation relying on changelog data are slower '
551 'than they could be'),
550 'than they could be'),
552 upgrademessage=_('changelog storage will be reformated to '
551 upgrademessage=_('changelog storage will be reformated to '
553 'store raw entries; changelog reading will be '
552 'store raw entries; changelog reading will be '
554 'faster; changelog size may be reduced'),
553 'faster; changelog size may be reduced'),
555 fromdefault=True,
554 fromdefault=True,
556 fromconfig=True))
555 fromconfig=True))
557 break
556 break
558
557
559 # Now for the optimizations.
558 # Now for the optimizations.
560
559
561 # These are unconditionally added. There is logic later that figures out
560 # These are unconditionally added. There is logic later that figures out
562 # which ones to apply.
561 # which ones to apply.
563
562
564 improvements.append(upgradeimprovement(
563 improvements.append(upgradeimprovement(
565 name='redeltaparent',
564 name='redeltaparent',
566 type=optimisation,
565 type=optimisation,
567 description=_('deltas within internal storage will be recalculated to '
566 description=_('deltas within internal storage will be recalculated to '
568 'choose an optimal base revision where this was not '
567 'choose an optimal base revision where this was not '
569 'already done; the size of the repository may shrink and '
568 'already done; the size of the repository may shrink and '
570 'various operations may become faster; the first time '
569 'various operations may become faster; the first time '
571 'this optimization is performed could slow down upgrade '
570 'this optimization is performed could slow down upgrade '
572 'execution considerably; subsequent invocations should '
571 'execution considerably; subsequent invocations should '
573 'not run noticeably slower'),
572 'not run noticeably slower'),
574 upgrademessage=_('deltas within internal storage will choose a new '
573 upgrademessage=_('deltas within internal storage will choose a new '
575 'base revision if needed')))
574 'base revision if needed')))
576
575
577 improvements.append(upgradeimprovement(
576 improvements.append(upgradeimprovement(
578 name='redeltamultibase',
577 name='redeltamultibase',
579 type=optimisation,
578 type=optimisation,
580 description=_('deltas within internal storage will be recalculated '
579 description=_('deltas within internal storage will be recalculated '
581 'against multiple base revision and the smallest '
580 'against multiple base revision and the smallest '
582 'difference will be used; the size of the repository may '
581 'difference will be used; the size of the repository may '
583 'shrink significantly when there are many merges; this '
582 'shrink significantly when there are many merges; this '
584 'optimization will slow down execution in proportion to '
583 'optimization will slow down execution in proportion to '
585 'the number of merges in the repository and the amount '
584 'the number of merges in the repository and the amount '
586 'of files in the repository; this slow down should not '
585 'of files in the repository; this slow down should not '
587 'be significant unless there are tens of thousands of '
586 'be significant unless there are tens of thousands of '
588 'files and thousands of merges'),
587 'files and thousands of merges'),
589 upgrademessage=_('deltas within internal storage will choose an '
588 upgrademessage=_('deltas within internal storage will choose an '
590 'optimal delta by computing deltas against multiple '
589 'optimal delta by computing deltas against multiple '
591 'parents; may slow down execution time '
590 'parents; may slow down execution time '
592 'significantly')))
591 'significantly')))
593
592
594 improvements.append(upgradeimprovement(
593 improvements.append(upgradeimprovement(
595 name='redeltaall',
594 name='redeltaall',
596 type=optimisation,
595 type=optimisation,
597 description=_('deltas within internal storage will always be '
596 description=_('deltas within internal storage will always be '
598 'recalculated without reusing prior deltas; this will '
597 'recalculated without reusing prior deltas; this will '
599 'likely make execution run several times slower; this '
598 'likely make execution run several times slower; this '
600 'optimization is typically not needed'),
599 'optimization is typically not needed'),
601 upgrademessage=_('deltas within internal storage will be fully '
600 upgrademessage=_('deltas within internal storage will be fully '
602 'recomputed; this will likely drastically slow down '
601 'recomputed; this will likely drastically slow down '
603 'execution time')))
602 'execution time')))
604
603
605 return improvements
604 return improvements
606
605
607 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
606 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
608 optimize):
607 optimize):
609 """Determine upgrade actions that will be performed.
608 """Determine upgrade actions that will be performed.
610
609
611 Given a list of improvements as returned by ``upgradefindimprovements``,
610 Given a list of improvements as returned by ``upgradefindimprovements``,
612 determine the list of upgrade actions that will be performed.
611 determine the list of upgrade actions that will be performed.
613
612
614 The role of this function is to filter improvements if needed, apply
613 The role of this function is to filter improvements if needed, apply
615 recommended optimizations from the improvements list that make sense,
614 recommended optimizations from the improvements list that make sense,
616 etc.
615 etc.
617
616
618 Returns a list of action names.
617 Returns a list of action names.
619 """
618 """
620 newactions = []
619 newactions = []
621
620
622 knownreqs = upgradesupporteddestrequirements(repo)
621 knownreqs = upgradesupporteddestrequirements(repo)
623
622
624 for i in improvements:
623 for i in improvements:
625 name = i.name
624 name = i.name
626
625
627 # If the action is a requirement that doesn't show up in the
626 # If the action is a requirement that doesn't show up in the
628 # destination requirements, prune the action.
627 # destination requirements, prune the action.
629 if name in knownreqs and name not in destreqs:
628 if name in knownreqs and name not in destreqs:
630 continue
629 continue
631
630
632 if i.type == deficiency:
631 if i.type == deficiency:
633 newactions.append(name)
632 newactions.append(name)
634
633
635 newactions.extend(o for o in sorted(optimize) if o not in newactions)
634 newactions.extend(o for o in sorted(optimize) if o not in newactions)
636
635
637 # FUTURE consider adding some optimizations here for certain transitions.
636 # FUTURE consider adding some optimizations here for certain transitions.
638 # e.g. adding generaldelta could schedule parent redeltas.
637 # e.g. adding generaldelta could schedule parent redeltas.
639
638
640 return newactions
639 return newactions
641
640
642 def _revlogfrompath(repo, path):
641 def _revlogfrompath(repo, path):
643 """Obtain a revlog from a repo path.
642 """Obtain a revlog from a repo path.
644
643
645 An instance of the appropriate class is returned.
644 An instance of the appropriate class is returned.
646 """
645 """
647 if path == '00changelog.i':
646 if path == '00changelog.i':
648 return changelog.changelog(repo.svfs)
647 return changelog.changelog(repo.svfs)
649 elif path.endswith('00manifest.i'):
648 elif path.endswith('00manifest.i'):
650 mandir = path[:-len('00manifest.i')]
649 mandir = path[:-len('00manifest.i')]
651 return manifest.manifestrevlog(repo.svfs, dir=mandir)
650 return manifest.manifestrevlog(repo.svfs, dir=mandir)
652 else:
651 else:
653 # Filelogs don't do anything special with settings. So we can use a
652 # Filelogs don't do anything special with settings. So we can use a
654 # vanilla revlog.
653 # vanilla revlog.
655 return revlog.revlog(repo.svfs, path)
654 return revlog.revlog(repo.svfs, path)
656
655
657 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
656 def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, aggressivemergedeltas):
658 """Copy revlogs between 2 repos."""
657 """Copy revlogs between 2 repos."""
659 revcount = 0
658 revcount = 0
660 srcsize = 0
659 srcsize = 0
661 srcrawsize = 0
660 srcrawsize = 0
662 dstsize = 0
661 dstsize = 0
663 fcount = 0
662 fcount = 0
664 frevcount = 0
663 frevcount = 0
665 fsrcsize = 0
664 fsrcsize = 0
666 frawsize = 0
665 frawsize = 0
667 fdstsize = 0
666 fdstsize = 0
668 mcount = 0
667 mcount = 0
669 mrevcount = 0
668 mrevcount = 0
670 msrcsize = 0
669 msrcsize = 0
671 mrawsize = 0
670 mrawsize = 0
672 mdstsize = 0
671 mdstsize = 0
673 crevcount = 0
672 crevcount = 0
674 csrcsize = 0
673 csrcsize = 0
675 crawsize = 0
674 crawsize = 0
676 cdstsize = 0
675 cdstsize = 0
677
676
678 # Perform a pass to collect metadata. This validates we can open all
677 # Perform a pass to collect metadata. This validates we can open all
679 # source files and allows a unified progress bar to be displayed.
678 # source files and allows a unified progress bar to be displayed.
680 for unencoded, encoded, size in srcrepo.store.walk():
679 for unencoded, encoded, size in srcrepo.store.walk():
681 if unencoded.endswith('.d'):
680 if unencoded.endswith('.d'):
682 continue
681 continue
683
682
684 rl = _revlogfrompath(srcrepo, unencoded)
683 rl = _revlogfrompath(srcrepo, unencoded)
685 revcount += len(rl)
684 revcount += len(rl)
686
685
687 datasize = 0
686 datasize = 0
688 rawsize = 0
687 rawsize = 0
689 idx = rl.index
688 idx = rl.index
690 for rev in rl:
689 for rev in rl:
691 e = idx[rev]
690 e = idx[rev]
692 datasize += e[1]
691 datasize += e[1]
693 rawsize += e[2]
692 rawsize += e[2]
694
693
695 srcsize += datasize
694 srcsize += datasize
696 srcrawsize += rawsize
695 srcrawsize += rawsize
697
696
698 # This is for the separate progress bars.
697 # This is for the separate progress bars.
699 if isinstance(rl, changelog.changelog):
698 if isinstance(rl, changelog.changelog):
700 crevcount += len(rl)
699 crevcount += len(rl)
701 csrcsize += datasize
700 csrcsize += datasize
702 crawsize += rawsize
701 crawsize += rawsize
703 elif isinstance(rl, manifest.manifestrevlog):
702 elif isinstance(rl, manifest.manifestrevlog):
704 mcount += 1
703 mcount += 1
705 mrevcount += len(rl)
704 mrevcount += len(rl)
706 msrcsize += datasize
705 msrcsize += datasize
707 mrawsize += rawsize
706 mrawsize += rawsize
708 elif isinstance(rl, revlog.revlog):
707 elif isinstance(rl, revlog.revlog):
709 fcount += 1
708 fcount += 1
710 frevcount += len(rl)
709 frevcount += len(rl)
711 fsrcsize += datasize
710 fsrcsize += datasize
712 frawsize += rawsize
711 frawsize += rawsize
713
712
714 if not revcount:
713 if not revcount:
715 return
714 return
716
715
717 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
716 ui.write(_('migrating %d total revisions (%d in filelogs, %d in manifests, '
718 '%d in changelog)\n') %
717 '%d in changelog)\n') %
719 (revcount, frevcount, mrevcount, crevcount))
718 (revcount, frevcount, mrevcount, crevcount))
720 ui.write(_('migrating %s in store; %s tracked data\n') % (
719 ui.write(_('migrating %s in store; %s tracked data\n') % (
721 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
720 (util.bytecount(srcsize), util.bytecount(srcrawsize))))
722
721
723 # Used to keep track of progress.
722 # Used to keep track of progress.
724 progress = []
723 progress = []
725 def oncopiedrevision(rl, rev, node):
724 def oncopiedrevision(rl, rev, node):
726 progress[1] += 1
725 progress[1] += 1
727 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
726 srcrepo.ui.progress(progress[0], progress[1], total=progress[2])
728
727
729 # Do the actual copying.
728 # Do the actual copying.
730 # FUTURE this operation can be farmed off to worker processes.
729 # FUTURE this operation can be farmed off to worker processes.
731 seen = set()
730 seen = set()
732 for unencoded, encoded, size in srcrepo.store.walk():
731 for unencoded, encoded, size in srcrepo.store.walk():
733 if unencoded.endswith('.d'):
732 if unencoded.endswith('.d'):
734 continue
733 continue
735
734
736 oldrl = _revlogfrompath(srcrepo, unencoded)
735 oldrl = _revlogfrompath(srcrepo, unencoded)
737 newrl = _revlogfrompath(dstrepo, unencoded)
736 newrl = _revlogfrompath(dstrepo, unencoded)
738
737
739 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
738 if isinstance(oldrl, changelog.changelog) and 'c' not in seen:
740 ui.write(_('finished migrating %d manifest revisions across %d '
739 ui.write(_('finished migrating %d manifest revisions across %d '
741 'manifests; change in size: %s\n') %
740 'manifests; change in size: %s\n') %
742 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
741 (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)))
743
742
744 ui.write(_('migrating changelog containing %d revisions '
743 ui.write(_('migrating changelog containing %d revisions '
745 '(%s in store; %s tracked data)\n') %
744 '(%s in store; %s tracked data)\n') %
746 (crevcount, util.bytecount(csrcsize),
745 (crevcount, util.bytecount(csrcsize),
747 util.bytecount(crawsize)))
746 util.bytecount(crawsize)))
748 seen.add('c')
747 seen.add('c')
749 progress[:] = [_('changelog revisions'), 0, crevcount]
748 progress[:] = [_('changelog revisions'), 0, crevcount]
750 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
749 elif isinstance(oldrl, manifest.manifestrevlog) and 'm' not in seen:
751 ui.write(_('finished migrating %d filelog revisions across %d '
750 ui.write(_('finished migrating %d filelog revisions across %d '
752 'filelogs; change in size: %s\n') %
751 'filelogs; change in size: %s\n') %
753 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
752 (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)))
754
753
755 ui.write(_('migrating %d manifests containing %d revisions '
754 ui.write(_('migrating %d manifests containing %d revisions '
756 '(%s in store; %s tracked data)\n') %
755 '(%s in store; %s tracked data)\n') %
757 (mcount, mrevcount, util.bytecount(msrcsize),
756 (mcount, mrevcount, util.bytecount(msrcsize),
758 util.bytecount(mrawsize)))
757 util.bytecount(mrawsize)))
759 seen.add('m')
758 seen.add('m')
760 progress[:] = [_('manifest revisions'), 0, mrevcount]
759 progress[:] = [_('manifest revisions'), 0, mrevcount]
761 elif 'f' not in seen:
760 elif 'f' not in seen:
762 ui.write(_('migrating %d filelogs containing %d revisions '
761 ui.write(_('migrating %d filelogs containing %d revisions '
763 '(%s in store; %s tracked data)\n') %
762 '(%s in store; %s tracked data)\n') %
764 (fcount, frevcount, util.bytecount(fsrcsize),
763 (fcount, frevcount, util.bytecount(fsrcsize),
765 util.bytecount(frawsize)))
764 util.bytecount(frawsize)))
766 seen.add('f')
765 seen.add('f')
767 progress[:] = [_('file revisions'), 0, frevcount]
766 progress[:] = [_('file revisions'), 0, frevcount]
768
767
769 ui.progress(progress[0], progress[1], total=progress[2])
768 ui.progress(progress[0], progress[1], total=progress[2])
770
769
771 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
770 ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
772 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
771 oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
773 deltareuse=deltareuse,
772 deltareuse=deltareuse,
774 aggressivemergedeltas=aggressivemergedeltas)
773 aggressivemergedeltas=aggressivemergedeltas)
775
774
776 datasize = 0
775 datasize = 0
777 idx = newrl.index
776 idx = newrl.index
778 for rev in newrl:
777 for rev in newrl:
779 datasize += idx[rev][1]
778 datasize += idx[rev][1]
780
779
781 dstsize += datasize
780 dstsize += datasize
782
781
783 if isinstance(newrl, changelog.changelog):
782 if isinstance(newrl, changelog.changelog):
784 cdstsize += datasize
783 cdstsize += datasize
785 elif isinstance(newrl, manifest.manifestrevlog):
784 elif isinstance(newrl, manifest.manifestrevlog):
786 mdstsize += datasize
785 mdstsize += datasize
787 else:
786 else:
788 fdstsize += datasize
787 fdstsize += datasize
789
788
790 ui.progress(progress[0], None)
789 ui.progress(progress[0], None)
791
790
792 ui.write(_('finished migrating %d changelog revisions; change in size: '
791 ui.write(_('finished migrating %d changelog revisions; change in size: '
793 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
792 '%s\n') % (crevcount, util.bytecount(cdstsize - csrcsize)))
794
793
795 ui.write(_('finished migrating %d total revisions; total change in store '
794 ui.write(_('finished migrating %d total revisions; total change in store '
796 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
795 'size: %s\n') % (revcount, util.bytecount(dstsize - srcsize)))
797
796
798 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
797 def _upgradefilterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
799 """Determine whether to copy a store file during upgrade.
798 """Determine whether to copy a store file during upgrade.
800
799
801 This function is called when migrating store files from ``srcrepo`` to
800 This function is called when migrating store files from ``srcrepo`` to
802 ``dstrepo`` as part of upgrading a repository.
801 ``dstrepo`` as part of upgrading a repository.
803
802
804 Args:
803 Args:
805 srcrepo: repo we are copying from
804 srcrepo: repo we are copying from
806 dstrepo: repo we are copying to
805 dstrepo: repo we are copying to
807 requirements: set of requirements for ``dstrepo``
806 requirements: set of requirements for ``dstrepo``
808 path: store file being examined
807 path: store file being examined
809 mode: the ``ST_MODE`` file type of ``path``
808 mode: the ``ST_MODE`` file type of ``path``
810 st: ``stat`` data structure for ``path``
809 st: ``stat`` data structure for ``path``
811
810
812 Function should return ``True`` if the file is to be copied.
811 Function should return ``True`` if the file is to be copied.
813 """
812 """
814 # Skip revlogs.
813 # Skip revlogs.
815 if path.endswith(('.i', '.d')):
814 if path.endswith(('.i', '.d')):
816 return False
815 return False
817 # Skip transaction related files.
816 # Skip transaction related files.
818 if path.startswith('undo'):
817 if path.startswith('undo'):
819 return False
818 return False
820 # Only copy regular files.
819 # Only copy regular files.
821 if mode != stat.S_IFREG:
820 if mode != stat.S_IFREG:
822 return False
821 return False
823 # Skip other skipped files.
822 # Skip other skipped files.
824 if path in ('lock', 'fncache'):
823 if path in ('lock', 'fncache'):
825 return False
824 return False
826
825
827 return True
826 return True
828
827
829 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
828 def _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements):
830 """Hook point for extensions to perform additional actions during upgrade.
829 """Hook point for extensions to perform additional actions during upgrade.
831
830
832 This function is called after revlogs and store files have been copied but
831 This function is called after revlogs and store files have been copied but
833 before the new store is swapped into the original location.
832 before the new store is swapped into the original location.
834 """
833 """
835
834
836 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
835 def _upgraderepo(ui, srcrepo, dstrepo, requirements, actions):
837 """Do the low-level work of upgrading a repository.
836 """Do the low-level work of upgrading a repository.
838
837
839 The upgrade is effectively performed as a copy between a source
838 The upgrade is effectively performed as a copy between a source
840 repository and a temporary destination repository.
839 repository and a temporary destination repository.
841
840
842 The source repository is unmodified for as long as possible so the
841 The source repository is unmodified for as long as possible so the
843 upgrade can abort at any time without causing loss of service for
842 upgrade can abort at any time without causing loss of service for
844 readers and without corrupting the source repository.
843 readers and without corrupting the source repository.
845 """
844 """
846 assert srcrepo.currentwlock()
845 assert srcrepo.currentwlock()
847 assert dstrepo.currentwlock()
846 assert dstrepo.currentwlock()
848
847
849 ui.write(_('(it is safe to interrupt this process any time before '
848 ui.write(_('(it is safe to interrupt this process any time before '
850 'data migration completes)\n'))
849 'data migration completes)\n'))
851
850
852 if 'redeltaall' in actions:
851 if 'redeltaall' in actions:
853 deltareuse = revlog.revlog.DELTAREUSENEVER
852 deltareuse = revlog.revlog.DELTAREUSENEVER
854 elif 'redeltaparent' in actions:
853 elif 'redeltaparent' in actions:
855 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
854 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
856 elif 'redeltamultibase' in actions:
855 elif 'redeltamultibase' in actions:
857 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
856 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
858 else:
857 else:
859 deltareuse = revlog.revlog.DELTAREUSEALWAYS
858 deltareuse = revlog.revlog.DELTAREUSEALWAYS
860
859
861 with dstrepo.transaction('upgrade') as tr:
860 with dstrepo.transaction('upgrade') as tr:
862 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
861 _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
863 'redeltamultibase' in actions)
862 'redeltamultibase' in actions)
864
863
865 # Now copy other files in the store directory.
864 # Now copy other files in the store directory.
866 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
865 for p, kind, st in srcrepo.store.vfs.readdir('', stat=True):
867 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
866 if not _upgradefilterstorefile(srcrepo, dstrepo, requirements,
868 p, kind, st):
867 p, kind, st):
869 continue
868 continue
870
869
871 srcrepo.ui.write(_('copying %s\n') % p)
870 srcrepo.ui.write(_('copying %s\n') % p)
872 src = srcrepo.store.vfs.join(p)
871 src = srcrepo.store.vfs.join(p)
873 dst = dstrepo.store.vfs.join(p)
872 dst = dstrepo.store.vfs.join(p)
874 util.copyfile(src, dst, copystat=True)
873 util.copyfile(src, dst, copystat=True)
875
874
876 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
875 _upgradefinishdatamigration(ui, srcrepo, dstrepo, requirements)
877
876
878 ui.write(_('data fully migrated to temporary repository\n'))
877 ui.write(_('data fully migrated to temporary repository\n'))
879
878
880 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
879 backuppath = tempfile.mkdtemp(prefix='upgradebackup.', dir=srcrepo.path)
881 backupvfs = vfsmod.vfs(backuppath)
880 backupvfs = vfsmod.vfs(backuppath)
882
881
883 # Make a backup of requires file first, as it is the first to be modified.
882 # Make a backup of requires file first, as it is the first to be modified.
884 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
883 util.copyfile(srcrepo.vfs.join('requires'), backupvfs.join('requires'))
885
884
886 # We install an arbitrary requirement that clients must not support
885 # We install an arbitrary requirement that clients must not support
887 # as a mechanism to lock out new clients during the data swap. This is
886 # as a mechanism to lock out new clients during the data swap. This is
888 # better than allowing a client to continue while the repository is in
887 # better than allowing a client to continue while the repository is in
889 # an inconsistent state.
888 # an inconsistent state.
890 ui.write(_('marking source repository as being upgraded; clients will be '
889 ui.write(_('marking source repository as being upgraded; clients will be '
891 'unable to read from repository\n'))
890 'unable to read from repository\n'))
892 scmutil.writerequires(srcrepo.vfs,
891 scmutil.writerequires(srcrepo.vfs,
893 srcrepo.requirements | set(['upgradeinprogress']))
892 srcrepo.requirements | set(['upgradeinprogress']))
894
893
895 ui.write(_('starting in-place swap of repository data\n'))
894 ui.write(_('starting in-place swap of repository data\n'))
896 ui.write(_('replaced files will be backed up at %s\n') %
895 ui.write(_('replaced files will be backed up at %s\n') %
897 backuppath)
896 backuppath)
898
897
899 # Now swap in the new store directory. Doing it as a rename should make
898 # Now swap in the new store directory. Doing it as a rename should make
900 # the operation nearly instantaneous and atomic (at least in well-behaved
899 # the operation nearly instantaneous and atomic (at least in well-behaved
901 # environments).
900 # environments).
902 ui.write(_('replacing store...\n'))
901 ui.write(_('replacing store...\n'))
903 tstart = util.timer()
902 tstart = util.timer()
904 util.rename(srcrepo.spath, backupvfs.join('store'))
903 util.rename(srcrepo.spath, backupvfs.join('store'))
905 util.rename(dstrepo.spath, srcrepo.spath)
904 util.rename(dstrepo.spath, srcrepo.spath)
906 elapsed = util.timer() - tstart
905 elapsed = util.timer() - tstart
907 ui.write(_('store replacement complete; repository was inconsistent for '
906 ui.write(_('store replacement complete; repository was inconsistent for '
908 '%0.1fs\n') % elapsed)
907 '%0.1fs\n') % elapsed)
909
908
910 # We first write the requirements file. Any new requirements will lock
909 # We first write the requirements file. Any new requirements will lock
911 # out legacy clients.
910 # out legacy clients.
912 ui.write(_('finalizing requirements file and making repository readable '
911 ui.write(_('finalizing requirements file and making repository readable '
913 'again\n'))
912 'again\n'))
914 scmutil.writerequires(srcrepo.vfs, requirements)
913 scmutil.writerequires(srcrepo.vfs, requirements)
915
914
916 # The lock file from the old store won't be removed because nothing has a
915 # The lock file from the old store won't be removed because nothing has a
917 # reference to its new location. So clean it up manually. Alternatively, we
916 # reference to its new location. So clean it up manually. Alternatively, we
918 # could update srcrepo.svfs and other variables to point to the new
917 # could update srcrepo.svfs and other variables to point to the new
919 # location. This is simpler.
918 # location. This is simpler.
920 backupvfs.unlink('store/lock')
919 backupvfs.unlink('store/lock')
921
920
922 return backuppath
921 return backuppath
923
922
924 def upgraderepo(ui, repo, run=False, optimize=None):
923 def upgraderepo(ui, repo, run=False, optimize=None):
925 """Upgrade a repository in place."""
924 """Upgrade a repository in place."""
926 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
925 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
927 from . import localrepo
926 from . import localrepo
928
927
929 optimize = set(optimize or [])
928 optimize = set(optimize or [])
930 repo = repo.unfiltered()
929 repo = repo.unfiltered()
931
930
932 # Ensure the repository can be upgraded.
931 # Ensure the repository can be upgraded.
933 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
932 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
934 if missingreqs:
933 if missingreqs:
935 raise error.Abort(_('cannot upgrade repository; requirement '
934 raise error.Abort(_('cannot upgrade repository; requirement '
936 'missing: %s') % _(', ').join(sorted(missingreqs)))
935 'missing: %s') % _(', ').join(sorted(missingreqs)))
937
936
938 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
937 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
939 if blockedreqs:
938 if blockedreqs:
940 raise error.Abort(_('cannot upgrade repository; unsupported source '
939 raise error.Abort(_('cannot upgrade repository; unsupported source '
941 'requirement: %s') %
940 'requirement: %s') %
942 _(', ').join(sorted(blockedreqs)))
941 _(', ').join(sorted(blockedreqs)))
943
942
944 # FUTURE there is potentially a need to control the wanted requirements via
943 # FUTURE there is potentially a need to control the wanted requirements via
945 # command arguments or via an extension hook point.
944 # command arguments or via an extension hook point.
946 newreqs = localrepo.newreporequirements(repo)
945 newreqs = localrepo.newreporequirements(repo)
947
946
948 noremovereqs = (repo.requirements - newreqs -
947 noremovereqs = (repo.requirements - newreqs -
949 upgradesupportremovedrequirements(repo))
948 upgradesupportremovedrequirements(repo))
950 if noremovereqs:
949 if noremovereqs:
951 raise error.Abort(_('cannot upgrade repository; requirement would be '
950 raise error.Abort(_('cannot upgrade repository; requirement would be '
952 'removed: %s') % _(', ').join(sorted(noremovereqs)))
951 'removed: %s') % _(', ').join(sorted(noremovereqs)))
953
952
954 noaddreqs = (newreqs - repo.requirements -
953 noaddreqs = (newreqs - repo.requirements -
955 upgradeallowednewrequirements(repo))
954 upgradeallowednewrequirements(repo))
956 if noaddreqs:
955 if noaddreqs:
957 raise error.Abort(_('cannot upgrade repository; do not support adding '
956 raise error.Abort(_('cannot upgrade repository; do not support adding '
958 'requirement: %s') %
957 'requirement: %s') %
959 _(', ').join(sorted(noaddreqs)))
958 _(', ').join(sorted(noaddreqs)))
960
959
961 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
960 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
962 if unsupportedreqs:
961 if unsupportedreqs:
963 raise error.Abort(_('cannot upgrade repository; do not support '
962 raise error.Abort(_('cannot upgrade repository; do not support '
964 'destination requirement: %s') %
963 'destination requirement: %s') %
965 _(', ').join(sorted(unsupportedreqs)))
964 _(', ').join(sorted(unsupportedreqs)))
966
965
967 # Find and validate all improvements that can be made.
966 # Find and validate all improvements that can be made.
968 improvements = upgradefindimprovements(repo)
967 improvements = upgradefindimprovements(repo)
969 for i in improvements:
968 for i in improvements:
970 if i.type not in (deficiency, optimisation):
969 if i.type not in (deficiency, optimisation):
971 raise error.Abort(_('unexpected improvement type %s for %s') % (
970 raise error.Abort(_('unexpected improvement type %s for %s') % (
972 i.type, i.name))
971 i.type, i.name))
973
972
974 # Validate arguments.
973 # Validate arguments.
975 unknownoptimize = optimize - set(i.name for i in improvements
974 unknownoptimize = optimize - set(i.name for i in improvements
976 if i.type == optimisation)
975 if i.type == optimisation)
977 if unknownoptimize:
976 if unknownoptimize:
978 raise error.Abort(_('unknown optimization action requested: %s') %
977 raise error.Abort(_('unknown optimization action requested: %s') %
979 ', '.join(sorted(unknownoptimize)),
978 ', '.join(sorted(unknownoptimize)),
980 hint=_('run without arguments to see valid '
979 hint=_('run without arguments to see valid '
981 'optimizations'))
980 'optimizations'))
982
981
983 actions = upgradedetermineactions(repo, improvements, repo.requirements,
982 actions = upgradedetermineactions(repo, improvements, repo.requirements,
984 newreqs, optimize)
983 newreqs, optimize)
985
984
986 def printrequirements():
985 def printrequirements():
987 ui.write(_('requirements\n'))
986 ui.write(_('requirements\n'))
988 ui.write(_(' preserved: %s\n') %
987 ui.write(_(' preserved: %s\n') %
989 _(', ').join(sorted(newreqs & repo.requirements)))
988 _(', ').join(sorted(newreqs & repo.requirements)))
990
989
991 if repo.requirements - newreqs:
990 if repo.requirements - newreqs:
992 ui.write(_(' removed: %s\n') %
991 ui.write(_(' removed: %s\n') %
993 _(', ').join(sorted(repo.requirements - newreqs)))
992 _(', ').join(sorted(repo.requirements - newreqs)))
994
993
995 if newreqs - repo.requirements:
994 if newreqs - repo.requirements:
996 ui.write(_(' added: %s\n') %
995 ui.write(_(' added: %s\n') %
997 _(', ').join(sorted(newreqs - repo.requirements)))
996 _(', ').join(sorted(newreqs - repo.requirements)))
998
997
999 ui.write('\n')
998 ui.write('\n')
1000
999
1001 def printupgradeactions():
1000 def printupgradeactions():
1002 for action in actions:
1001 for action in actions:
1003 for i in improvements:
1002 for i in improvements:
1004 if i.name == action:
1003 if i.name == action:
1005 ui.write('%s\n %s\n\n' %
1004 ui.write('%s\n %s\n\n' %
1006 (i.name, i.upgrademessage))
1005 (i.name, i.upgrademessage))
1007
1006
1008 if not run:
1007 if not run:
1009 fromdefault = []
1008 fromdefault = []
1010 fromconfig = []
1009 fromconfig = []
1011 optimizations = []
1010 optimizations = []
1012
1011
1013 for i in improvements:
1012 for i in improvements:
1014 assert i.type in (deficiency, optimisation)
1013 assert i.type in (deficiency, optimisation)
1015 if i.type == deficiency:
1014 if i.type == deficiency:
1016 if i.fromdefault:
1015 if i.fromdefault:
1017 fromdefault.append(i)
1016 fromdefault.append(i)
1018 if i.fromconfig:
1017 if i.fromconfig:
1019 fromconfig.append(i)
1018 fromconfig.append(i)
1020 else:
1019 else:
1021 optimizations.append(i)
1020 optimizations.append(i)
1022
1021
1023 if fromdefault or fromconfig:
1022 if fromdefault or fromconfig:
1024 fromconfignames = set(x.name for x in fromconfig)
1023 fromconfignames = set(x.name for x in fromconfig)
1025 onlydefault = [i for i in fromdefault
1024 onlydefault = [i for i in fromdefault
1026 if i.name not in fromconfignames]
1025 if i.name not in fromconfignames]
1027
1026
1028 if fromconfig:
1027 if fromconfig:
1029 ui.write(_('repository lacks features recommended by '
1028 ui.write(_('repository lacks features recommended by '
1030 'current config options:\n\n'))
1029 'current config options:\n\n'))
1031 for i in fromconfig:
1030 for i in fromconfig:
1032 ui.write('%s\n %s\n\n' % (i.name, i.description))
1031 ui.write('%s\n %s\n\n' % (i.name, i.description))
1033
1032
1034 if onlydefault:
1033 if onlydefault:
1035 ui.write(_('repository lacks features used by the default '
1034 ui.write(_('repository lacks features used by the default '
1036 'config options:\n\n'))
1035 'config options:\n\n'))
1037 for i in onlydefault:
1036 for i in onlydefault:
1038 ui.write('%s\n %s\n\n' % (i.name, i.description))
1037 ui.write('%s\n %s\n\n' % (i.name, i.description))
1039
1038
1040 ui.write('\n')
1039 ui.write('\n')
1041 else:
1040 else:
1042 ui.write(_('(no feature deficiencies found in existing '
1041 ui.write(_('(no feature deficiencies found in existing '
1043 'repository)\n'))
1042 'repository)\n'))
1044
1043
1045 ui.write(_('performing an upgrade with "--run" will make the following '
1044 ui.write(_('performing an upgrade with "--run" will make the following '
1046 'changes:\n\n'))
1045 'changes:\n\n'))
1047
1046
1048 printrequirements()
1047 printrequirements()
1049 printupgradeactions()
1048 printupgradeactions()
1050
1049
1051 unusedoptimize = [i for i in improvements
1050 unusedoptimize = [i for i in improvements
1052 if i.name not in actions and i.type == optimisation]
1051 if i.name not in actions and i.type == optimisation]
1053 if unusedoptimize:
1052 if unusedoptimize:
1054 ui.write(_('additional optimizations are available by specifying '
1053 ui.write(_('additional optimizations are available by specifying '
1055 '"--optimize <name>":\n\n'))
1054 '"--optimize <name>":\n\n'))
1056 for i in unusedoptimize:
1055 for i in unusedoptimize:
1057 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1056 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
1058 return
1057 return
1059
1058
1060 # Else we're in the run=true case.
1059 # Else we're in the run=true case.
1061 ui.write(_('upgrade will perform the following actions:\n\n'))
1060 ui.write(_('upgrade will perform the following actions:\n\n'))
1062 printrequirements()
1061 printrequirements()
1063 printupgradeactions()
1062 printupgradeactions()
1064
1063
1065 ui.write(_('beginning upgrade...\n'))
1064 ui.write(_('beginning upgrade...\n'))
1066 with repo.wlock():
1065 with repo.wlock():
1067 with repo.lock():
1066 with repo.lock():
1068 ui.write(_('repository locked and read-only\n'))
1067 ui.write(_('repository locked and read-only\n'))
1069 # Our strategy for upgrading the repository is to create a new,
1068 # Our strategy for upgrading the repository is to create a new,
1070 # temporary repository, write data to it, then do a swap of the
1069 # temporary repository, write data to it, then do a swap of the
1071 # data. There are less heavyweight ways to do this, but it is easier
1070 # data. There are less heavyweight ways to do this, but it is easier
1072 # to create a new repo object than to instantiate all the components
1071 # to create a new repo object than to instantiate all the components
1073 # (like the store) separately.
1072 # (like the store) separately.
1074 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1073 tmppath = tempfile.mkdtemp(prefix='upgrade.', dir=repo.path)
1075 backuppath = None
1074 backuppath = None
1076 try:
1075 try:
1077 ui.write(_('creating temporary repository to stage migrated '
1076 ui.write(_('creating temporary repository to stage migrated '
1078 'data: %s\n') % tmppath)
1077 'data: %s\n') % tmppath)
1079 dstrepo = localrepo.localrepository(repo.baseui,
1078 dstrepo = localrepo.localrepository(repo.baseui,
1080 path=tmppath,
1079 path=tmppath,
1081 create=True)
1080 create=True)
1082
1081
1083 with dstrepo.wlock():
1082 with dstrepo.wlock():
1084 with dstrepo.lock():
1083 with dstrepo.lock():
1085 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1084 backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
1086 actions)
1085 actions)
1087
1086
1088 finally:
1087 finally:
1089 ui.write(_('removing temporary repository %s\n') % tmppath)
1088 ui.write(_('removing temporary repository %s\n') % tmppath)
1090 repo.vfs.rmtree(tmppath, forcibly=True)
1089 repo.vfs.rmtree(tmppath, forcibly=True)
1091
1090
1092 if backuppath:
1091 if backuppath:
1093 ui.warn(_('copy of old repository backed up at %s\n') %
1092 ui.warn(_('copy of old repository backed up at %s\n') %
1094 backuppath)
1093 backuppath)
1095 ui.warn(_('the old repository will not be deleted; remove '
1094 ui.warn(_('the old repository will not be deleted; remove '
1096 'it to free up disk space once the upgraded '
1095 'it to free up disk space once the upgraded '
1097 'repository is verified\n'))
1096 'repository is verified\n'))
@@ -1,179 +1,178 b''
1
1
2 $ cat << EOF > buggylocking.py
2 $ cat << EOF > buggylocking.py
3 > """A small extension that tests our developer warnings
3 > """A small extension that tests our developer warnings
4 > """
4 > """
5 >
5 >
6 > from mercurial import cmdutil, repair, revset
6 > from mercurial import cmdutil, repair, revset
7 >
7 >
8 > cmdtable = {}
8 > cmdtable = {}
9 > command = cmdutil.command(cmdtable)
9 > command = cmdutil.command(cmdtable)
10 >
10 >
11 > @command('buggylocking', [], '')
11 > @command('buggylocking', [], '')
12 > def buggylocking(ui, repo):
12 > def buggylocking(ui, repo):
13 > lo = repo.lock()
13 > lo = repo.lock()
14 > wl = repo.wlock()
14 > wl = repo.wlock()
15 > wl.release()
15 > wl.release()
16 > lo.release()
16 > lo.release()
17 >
17 >
18 > @command('buggytransaction', [], '')
18 > @command('buggytransaction', [], '')
19 > def buggylocking(ui, repo):
19 > def buggylocking(ui, repo):
20 > tr = repo.transaction('buggy')
20 > tr = repo.transaction('buggy')
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
21 > # make sure we rollback the transaction as we don't want to rely on the__del__
22 > tr.release()
22 > tr.release()
23 >
23 >
24 > @command('properlocking', [], '')
24 > @command('properlocking', [], '')
25 > def properlocking(ui, repo):
25 > def properlocking(ui, repo):
26 > """check that reentrance is fine"""
26 > """check that reentrance is fine"""
27 > wl = repo.wlock()
27 > wl = repo.wlock()
28 > lo = repo.lock()
28 > lo = repo.lock()
29 > tr = repo.transaction('proper')
29 > tr = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
30 > tr2 = repo.transaction('proper')
31 > lo2 = repo.lock()
31 > lo2 = repo.lock()
32 > wl2 = repo.wlock()
32 > wl2 = repo.wlock()
33 > wl2.release()
33 > wl2.release()
34 > lo2.release()
34 > lo2.release()
35 > tr2.close()
35 > tr2.close()
36 > tr.close()
36 > tr.close()
37 > lo.release()
37 > lo.release()
38 > wl.release()
38 > wl.release()
39 >
39 >
40 > @command('nowaitlocking', [], '')
40 > @command('nowaitlocking', [], '')
41 > def nowaitlocking(ui, repo):
41 > def nowaitlocking(ui, repo):
42 > lo = repo.lock()
42 > lo = repo.lock()
43 > wl = repo.wlock(wait=False)
43 > wl = repo.wlock(wait=False)
44 > wl.release()
44 > wl.release()
45 > lo.release()
45 > lo.release()
46 >
46 >
47 > @command('stripintr', [], '')
47 > @command('stripintr', [], '')
48 > def stripintr(ui, repo):
48 > def stripintr(ui, repo):
49 > lo = repo.lock()
49 > lo = repo.lock()
50 > tr = repo.transaction('foobar')
50 > tr = repo.transaction('foobar')
51 > try:
51 > try:
52 > repair.strip(repo.ui, repo, [repo['.'].node()])
52 > repair.strip(repo.ui, repo, [repo['.'].node()])
53 > finally:
53 > finally:
54 > lo.release()
54 > lo.release()
55 > @command('oldanddeprecated', [], '')
55 > @command('oldanddeprecated', [], '')
56 > def oldanddeprecated(ui, repo):
56 > def oldanddeprecated(ui, repo):
57 > """test deprecation warning API"""
57 > """test deprecation warning API"""
58 > def foobar(ui):
58 > def foobar(ui):
59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
59 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
60 > foobar(ui)
60 > foobar(ui)
61 >
61 >
62 > def oldstylerevset(repo, subset, x):
62 > def oldstylerevset(repo, subset, x):
63 > return list(subset)
63 > return list(subset)
64 >
64 >
65 > revset.symbols['oldstyle'] = oldstylerevset
65 > revset.symbols['oldstyle'] = oldstylerevset
66 > EOF
66 > EOF
67
67
68 $ cat << EOF >> $HGRCPATH
68 $ cat << EOF >> $HGRCPATH
69 > [extensions]
69 > [extensions]
70 > buggylocking=$TESTTMP/buggylocking.py
70 > buggylocking=$TESTTMP/buggylocking.py
71 > mock=$TESTDIR/mockblackbox.py
71 > mock=$TESTDIR/mockblackbox.py
72 > blackbox=
72 > blackbox=
73 > [devel]
73 > [devel]
74 > all-warnings=1
74 > all-warnings=1
75 > EOF
75 > EOF
76
76
77 $ hg init lock-checker
77 $ hg init lock-checker
78 $ cd lock-checker
78 $ cd lock-checker
79 $ hg buggylocking
79 $ hg buggylocking
80 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
80 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
81 $ cat << EOF >> $HGRCPATH
81 $ cat << EOF >> $HGRCPATH
82 > [devel]
82 > [devel]
83 > all=0
83 > all=0
84 > check-locks=1
84 > check-locks=1
85 > EOF
85 > EOF
86 $ hg buggylocking
86 $ hg buggylocking
87 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
87 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
88 $ hg buggylocking --traceback
88 $ hg buggylocking --traceback
89 devel-warn: "wlock" acquired after "lock" at:
89 devel-warn: "wlock" acquired after "lock" at:
90 */hg:* in * (glob)
90 */hg:* in * (glob)
91 */mercurial/dispatch.py:* in run (glob)
91 */mercurial/dispatch.py:* in run (glob)
92 */mercurial/dispatch.py:* in dispatch (glob)
92 */mercurial/dispatch.py:* in dispatch (glob)
93 */mercurial/dispatch.py:* in _runcatch (glob)
93 */mercurial/dispatch.py:* in _runcatch (glob)
94 */mercurial/dispatch.py:* in callcatch (glob)
94 */mercurial/dispatch.py:* in callcatch (glob)
95 */mercurial/scmutil.py* in callcatch (glob)
95 */mercurial/scmutil.py* in callcatch (glob)
96 */mercurial/dispatch.py:* in _runcatchfunc (glob)
96 */mercurial/dispatch.py:* in _runcatchfunc (glob)
97 */mercurial/dispatch.py:* in _dispatch (glob)
97 */mercurial/dispatch.py:* in _dispatch (glob)
98 */mercurial/dispatch.py:* in runcommand (glob)
98 */mercurial/dispatch.py:* in runcommand (glob)
99 */mercurial/dispatch.py:* in _runcommand (glob)
99 */mercurial/dispatch.py:* in _runcommand (glob)
100 */mercurial/dispatch.py:* in <lambda> (glob)
100 */mercurial/dispatch.py:* in <lambda> (glob)
101 */mercurial/util.py:* in check (glob)
101 */mercurial/util.py:* in check (glob)
102 $TESTTMP/buggylocking.py:* in buggylocking (glob)
102 $TESTTMP/buggylocking.py:* in buggylocking (glob)
103 $ hg properlocking
103 $ hg properlocking
104 $ hg nowaitlocking
104 $ hg nowaitlocking
105
105
106 $ echo a > a
106 $ echo a > a
107 $ hg add a
107 $ hg add a
108 $ hg commit -m a
108 $ hg commit -m a
109 $ hg stripintr
109 $ hg stripintr 2>&1 | egrep -v '^(\*\*| )'
110 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
110 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
111 abort: programming error: cannot strip from inside a transaction
111 Traceback (most recent call last):
112 (contact your extension maintainer)
112 mercurial.error.ProgrammingError: cannot strip from inside a transaction
113 [255]
114
113
115 $ hg log -r "oldstyle()" -T '{rev}\n'
114 $ hg log -r "oldstyle()" -T '{rev}\n'
116 devel-warn: revset "oldstyle" uses list instead of smartset
115 devel-warn: revset "oldstyle" uses list instead of smartset
117 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
116 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
118 0
117 0
119 $ hg oldanddeprecated
118 $ hg oldanddeprecated
120 devel-warn: foorbar is deprecated, go shopping
119 devel-warn: foorbar is deprecated, go shopping
121 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
120 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
122
121
123 $ hg oldanddeprecated --traceback
122 $ hg oldanddeprecated --traceback
124 devel-warn: foorbar is deprecated, go shopping
123 devel-warn: foorbar is deprecated, go shopping
125 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
124 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
126 */hg:* in <module> (glob)
125 */hg:* in <module> (glob)
127 */mercurial/dispatch.py:* in run (glob)
126 */mercurial/dispatch.py:* in run (glob)
128 */mercurial/dispatch.py:* in dispatch (glob)
127 */mercurial/dispatch.py:* in dispatch (glob)
129 */mercurial/dispatch.py:* in _runcatch (glob)
128 */mercurial/dispatch.py:* in _runcatch (glob)
130 */mercurial/dispatch.py:* in callcatch (glob)
129 */mercurial/dispatch.py:* in callcatch (glob)
131 */mercurial/scmutil.py* in callcatch (glob)
130 */mercurial/scmutil.py* in callcatch (glob)
132 */mercurial/dispatch.py:* in _runcatchfunc (glob)
131 */mercurial/dispatch.py:* in _runcatchfunc (glob)
133 */mercurial/dispatch.py:* in _dispatch (glob)
132 */mercurial/dispatch.py:* in _dispatch (glob)
134 */mercurial/dispatch.py:* in runcommand (glob)
133 */mercurial/dispatch.py:* in runcommand (glob)
135 */mercurial/dispatch.py:* in _runcommand (glob)
134 */mercurial/dispatch.py:* in _runcommand (glob)
136 */mercurial/dispatch.py:* in <lambda> (glob)
135 */mercurial/dispatch.py:* in <lambda> (glob)
137 */mercurial/util.py:* in check (glob)
136 */mercurial/util.py:* in check (glob)
138 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
137 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
139 $ hg blackbox -l 9
138 $ hg blackbox -l 9
140 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
139 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" uses list instead of smartset
141 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
140 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r 'oldstyle()' -T '{rev}\n' exited 0 after * seconds (glob)
141 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r 'oldstyle()' -T '{rev}\n' exited 0 after * seconds (glob)
143 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
142 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
144 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
143 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
145 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
144 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
146 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
145 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
147 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
146 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
148 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
147 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
149 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
148 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
150 */hg:* in <module> (glob)
149 */hg:* in <module> (glob)
151 */mercurial/dispatch.py:* in run (glob)
150 */mercurial/dispatch.py:* in run (glob)
152 */mercurial/dispatch.py:* in dispatch (glob)
151 */mercurial/dispatch.py:* in dispatch (glob)
153 */mercurial/dispatch.py:* in _runcatch (glob)
152 */mercurial/dispatch.py:* in _runcatch (glob)
154 */mercurial/dispatch.py:* in callcatch (glob)
153 */mercurial/dispatch.py:* in callcatch (glob)
155 */mercurial/scmutil.py* in callcatch (glob)
154 */mercurial/scmutil.py* in callcatch (glob)
156 */mercurial/dispatch.py:* in _runcatchfunc (glob)
155 */mercurial/dispatch.py:* in _runcatchfunc (glob)
157 */mercurial/dispatch.py:* in _dispatch (glob)
156 */mercurial/dispatch.py:* in _dispatch (glob)
158 */mercurial/dispatch.py:* in runcommand (glob)
157 */mercurial/dispatch.py:* in runcommand (glob)
159 */mercurial/dispatch.py:* in _runcommand (glob)
158 */mercurial/dispatch.py:* in _runcommand (glob)
160 */mercurial/dispatch.py:* in <lambda> (glob)
159 */mercurial/dispatch.py:* in <lambda> (glob)
161 */mercurial/util.py:* in check (glob)
160 */mercurial/util.py:* in check (glob)
162 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
161 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
163 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
162 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
164 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
163 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
165
164
166 Test programming error failure:
165 Test programming error failure:
167
166
168 $ hg buggytransaction 2>&1 | egrep -v '^ '
167 $ hg buggytransaction 2>&1 | egrep -v '^ '
169 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
168 ** Unknown exception encountered with possibly-broken third-party extension buggylocking
170 ** which supports versions unknown of Mercurial.
169 ** which supports versions unknown of Mercurial.
171 ** Please disable buggylocking and try your action again.
170 ** Please disable buggylocking and try your action again.
172 ** If that fixes the bug please report it to the extension author.
171 ** If that fixes the bug please report it to the extension author.
173 ** Python * (glob)
172 ** Python * (glob)
174 ** Mercurial Distributed SCM (*) (glob)
173 ** Mercurial Distributed SCM (*) (glob)
175 ** Extensions loaded: * (glob)
174 ** Extensions loaded: * (glob)
176 Traceback (most recent call last):
175 Traceback (most recent call last):
177 mercurial.error.ProgrammingError: transaction requires locking
176 mercurial.error.ProgrammingError: transaction requires locking
178
177
179 $ cd ..
178 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now