##// END OF EJS Templates
repair: determine what upgrade will do...
Gregory Szorc -
r30776:3997edc4 default
parent child Browse files
Show More
@@ -1,495 +1,773 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifestlog._revlog)
70 collectone(repo.manifestlog._revlog)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 files = _collectfiles(repo, striprev)
94 files = _collectfiles(repo, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
96
96
97 # Some revisions with rev > striprev may not be descendants of striprev.
97 # Some revisions with rev > striprev may not be descendants of striprev.
98 # We have to find these revisions and put them in a bundle, so that
98 # We have to find these revisions and put them in a bundle, so that
99 # we can restore them after the truncations.
99 # we can restore them after the truncations.
100 # To create the bundle we use repo.changegroupsubset which requires
100 # To create the bundle we use repo.changegroupsubset which requires
101 # the list of heads and bases of the set of interesting revisions.
101 # the list of heads and bases of the set of interesting revisions.
102 # (head = revision in the set that has no descendant in the set;
102 # (head = revision in the set that has no descendant in the set;
103 # base = revision in the set that has no ancestor in the set)
103 # base = revision in the set that has no ancestor in the set)
104 tostrip = set(striplist)
104 tostrip = set(striplist)
105 saveheads = set(saverevs)
105 saveheads = set(saverevs)
106 for r in cl.revs(start=striprev + 1):
106 for r in cl.revs(start=striprev + 1):
107 if any(p in tostrip for p in cl.parentrevs(r)):
107 if any(p in tostrip for p in cl.parentrevs(r)):
108 tostrip.add(r)
108 tostrip.add(r)
109
109
110 if r not in tostrip:
110 if r not in tostrip:
111 saverevs.add(r)
111 saverevs.add(r)
112 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.add(r)
113 saveheads.add(r)
114 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
115
115
116 # compute base nodes
116 # compute base nodes
117 if saverevs:
117 if saverevs:
118 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
119 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
120 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
121 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
122
122
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # is much faster
124 # is much faster
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 if newbmtarget:
126 if newbmtarget:
127 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
128 else:
128 else:
129 newbmtarget = '.'
129 newbmtarget = '.'
130
130
131 bm = repo._bookmarks
131 bm = repo._bookmarks
132 updatebm = []
132 updatebm = []
133 for m in bm:
133 for m in bm:
134 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
135 if rev in tostrip:
135 if rev in tostrip:
136 updatebm.append(m)
136 updatebm.append(m)
137
137
138 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
139 backupfile = None
139 backupfile = None
140 vfs = repo.vfs
140 vfs = repo.vfs
141 node = nodelist[-1]
141 node = nodelist[-1]
142 if backup:
142 if backup:
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
145 vfs.join(backupfile))
145 vfs.join(backupfile))
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 tmpbundlefile = None
148 tmpbundlefile = None
149 if saveheads:
149 if saveheads:
150 # do not compress temporary bundle if we remove it from disk later
150 # do not compress temporary bundle if we remove it from disk later
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
152 compress=False)
153
153
154 mfst = repo.manifestlog._revlog
154 mfst = repo.manifestlog._revlog
155
155
156 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
157 if curtr is not None:
157 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
159 msg = _('programming error: cannot strip from inside a transaction')
159 msg = _('programming error: cannot strip from inside a transaction')
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161
161
162 try:
162 try:
163 with repo.transaction("strip") as tr:
163 with repo.transaction("strip") as tr:
164 offset = len(tr.entries)
164 offset = len(tr.entries)
165
165
166 tr.startgroup()
166 tr.startgroup()
167 cl.strip(striprev, tr)
167 cl.strip(striprev, tr)
168 mfst.strip(striprev, tr)
168 mfst.strip(striprev, tr)
169 if 'treemanifest' in repo.requirements: # safe but unnecessary
169 if 'treemanifest' in repo.requirements: # safe but unnecessary
170 # otherwise
170 # otherwise
171 for unencoded, encoded, size in repo.store.datafiles():
171 for unencoded, encoded, size in repo.store.datafiles():
172 if (unencoded.startswith('meta/') and
172 if (unencoded.startswith('meta/') and
173 unencoded.endswith('00manifest.i')):
173 unencoded.endswith('00manifest.i')):
174 dir = unencoded[5:-12]
174 dir = unencoded[5:-12]
175 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
175 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
176 for fn in files:
176 for fn in files:
177 repo.file(fn).strip(striprev, tr)
177 repo.file(fn).strip(striprev, tr)
178 tr.endgroup()
178 tr.endgroup()
179
179
180 for i in xrange(offset, len(tr.entries)):
180 for i in xrange(offset, len(tr.entries)):
181 file, troffset, ignore = tr.entries[i]
181 file, troffset, ignore = tr.entries[i]
182 with repo.svfs(file, 'a', checkambig=True) as fp:
182 with repo.svfs(file, 'a', checkambig=True) as fp:
183 fp.truncate(troffset)
183 fp.truncate(troffset)
184 if troffset == 0:
184 if troffset == 0:
185 repo.store.markremoved(file)
185 repo.store.markremoved(file)
186
186
187 if tmpbundlefile:
187 if tmpbundlefile:
188 ui.note(_("adding branch\n"))
188 ui.note(_("adding branch\n"))
189 f = vfs.open(tmpbundlefile, "rb")
189 f = vfs.open(tmpbundlefile, "rb")
190 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
190 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
191 if not repo.ui.verbose:
191 if not repo.ui.verbose:
192 # silence internal shuffling chatter
192 # silence internal shuffling chatter
193 repo.ui.pushbuffer()
193 repo.ui.pushbuffer()
194 if isinstance(gen, bundle2.unbundle20):
194 if isinstance(gen, bundle2.unbundle20):
195 with repo.transaction('strip') as tr:
195 with repo.transaction('strip') as tr:
196 tr.hookargs = {'source': 'strip',
196 tr.hookargs = {'source': 'strip',
197 'url': 'bundle:' + vfs.join(tmpbundlefile)}
197 'url': 'bundle:' + vfs.join(tmpbundlefile)}
198 bundle2.applybundle(repo, gen, tr, source='strip',
198 bundle2.applybundle(repo, gen, tr, source='strip',
199 url='bundle:' + vfs.join(tmpbundlefile))
199 url='bundle:' + vfs.join(tmpbundlefile))
200 else:
200 else:
201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
202 True)
202 True)
203 if not repo.ui.verbose:
203 if not repo.ui.verbose:
204 repo.ui.popbuffer()
204 repo.ui.popbuffer()
205 f.close()
205 f.close()
206 repo._phasecache.invalidate()
206 repo._phasecache.invalidate()
207
207
208 for m in updatebm:
208 for m in updatebm:
209 bm[m] = repo[newbmtarget].node()
209 bm[m] = repo[newbmtarget].node()
210 lock = tr = None
210 lock = tr = None
211 try:
211 try:
212 lock = repo.lock()
212 lock = repo.lock()
213 tr = repo.transaction('repair')
213 tr = repo.transaction('repair')
214 bm.recordchange(tr)
214 bm.recordchange(tr)
215 tr.close()
215 tr.close()
216 finally:
216 finally:
217 tr.release()
217 tr.release()
218 lock.release()
218 lock.release()
219
219
220 # remove undo files
220 # remove undo files
221 for undovfs, undofile in repo.undofiles():
221 for undovfs, undofile in repo.undofiles():
222 try:
222 try:
223 undovfs.unlink(undofile)
223 undovfs.unlink(undofile)
224 except OSError as e:
224 except OSError as e:
225 if e.errno != errno.ENOENT:
225 if e.errno != errno.ENOENT:
226 ui.warn(_('error removing %s: %s\n') %
226 ui.warn(_('error removing %s: %s\n') %
227 (undovfs.join(undofile), str(e)))
227 (undovfs.join(undofile), str(e)))
228
228
229 except: # re-raises
229 except: # re-raises
230 if backupfile:
230 if backupfile:
231 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
231 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
232 % vfs.join(backupfile))
232 % vfs.join(backupfile))
233 if tmpbundlefile:
233 if tmpbundlefile:
234 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
234 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
235 % vfs.join(tmpbundlefile))
235 % vfs.join(tmpbundlefile))
236 ui.warn(_("(fix the problem, then recover the changesets with "
236 ui.warn(_("(fix the problem, then recover the changesets with "
237 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
237 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
238 raise
238 raise
239 else:
239 else:
240 if tmpbundlefile:
240 if tmpbundlefile:
241 # Remove temporary bundle only if there were no exceptions
241 # Remove temporary bundle only if there were no exceptions
242 vfs.unlink(tmpbundlefile)
242 vfs.unlink(tmpbundlefile)
243
243
244 repo.destroyed()
244 repo.destroyed()
245 # return the backup file path (or None if 'backup' was False) so
245 # return the backup file path (or None if 'backup' was False) so
246 # extensions can use it
246 # extensions can use it
247 return backupfile
247 return backupfile
248
248
249 def rebuildfncache(ui, repo):
249 def rebuildfncache(ui, repo):
250 """Rebuilds the fncache file from repo history.
250 """Rebuilds the fncache file from repo history.
251
251
252 Missing entries will be added. Extra entries will be removed.
252 Missing entries will be added. Extra entries will be removed.
253 """
253 """
254 repo = repo.unfiltered()
254 repo = repo.unfiltered()
255
255
256 if 'fncache' not in repo.requirements:
256 if 'fncache' not in repo.requirements:
257 ui.warn(_('(not rebuilding fncache because repository does not '
257 ui.warn(_('(not rebuilding fncache because repository does not '
258 'support fncache)\n'))
258 'support fncache)\n'))
259 return
259 return
260
260
261 with repo.lock():
261 with repo.lock():
262 fnc = repo.store.fncache
262 fnc = repo.store.fncache
263 # Trigger load of fncache.
263 # Trigger load of fncache.
264 if 'irrelevant' in fnc:
264 if 'irrelevant' in fnc:
265 pass
265 pass
266
266
267 oldentries = set(fnc.entries)
267 oldentries = set(fnc.entries)
268 newentries = set()
268 newentries = set()
269 seenfiles = set()
269 seenfiles = set()
270
270
271 repolen = len(repo)
271 repolen = len(repo)
272 for rev in repo:
272 for rev in repo:
273 ui.progress(_('rebuilding'), rev, total=repolen,
273 ui.progress(_('rebuilding'), rev, total=repolen,
274 unit=_('changesets'))
274 unit=_('changesets'))
275
275
276 ctx = repo[rev]
276 ctx = repo[rev]
277 for f in ctx.files():
277 for f in ctx.files():
278 # This is to minimize I/O.
278 # This is to minimize I/O.
279 if f in seenfiles:
279 if f in seenfiles:
280 continue
280 continue
281 seenfiles.add(f)
281 seenfiles.add(f)
282
282
283 i = 'data/%s.i' % f
283 i = 'data/%s.i' % f
284 d = 'data/%s.d' % f
284 d = 'data/%s.d' % f
285
285
286 if repo.store._exists(i):
286 if repo.store._exists(i):
287 newentries.add(i)
287 newentries.add(i)
288 if repo.store._exists(d):
288 if repo.store._exists(d):
289 newentries.add(d)
289 newentries.add(d)
290
290
291 ui.progress(_('rebuilding'), None)
291 ui.progress(_('rebuilding'), None)
292
292
293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
294 for dir in util.dirs(seenfiles):
294 for dir in util.dirs(seenfiles):
295 i = 'meta/%s/00manifest.i' % dir
295 i = 'meta/%s/00manifest.i' % dir
296 d = 'meta/%s/00manifest.d' % dir
296 d = 'meta/%s/00manifest.d' % dir
297
297
298 if repo.store._exists(i):
298 if repo.store._exists(i):
299 newentries.add(i)
299 newentries.add(i)
300 if repo.store._exists(d):
300 if repo.store._exists(d):
301 newentries.add(d)
301 newentries.add(d)
302
302
303 addcount = len(newentries - oldentries)
303 addcount = len(newentries - oldentries)
304 removecount = len(oldentries - newentries)
304 removecount = len(oldentries - newentries)
305 for p in sorted(oldentries - newentries):
305 for p in sorted(oldentries - newentries):
306 ui.write(_('removing %s\n') % p)
306 ui.write(_('removing %s\n') % p)
307 for p in sorted(newentries - oldentries):
307 for p in sorted(newentries - oldentries):
308 ui.write(_('adding %s\n') % p)
308 ui.write(_('adding %s\n') % p)
309
309
310 if addcount or removecount:
310 if addcount or removecount:
311 ui.write(_('%d items added, %d removed from fncache\n') %
311 ui.write(_('%d items added, %d removed from fncache\n') %
312 (addcount, removecount))
312 (addcount, removecount))
313 fnc.entries = newentries
313 fnc.entries = newentries
314 fnc._dirty = True
314 fnc._dirty = True
315
315
316 with repo.transaction('fncache') as tr:
316 with repo.transaction('fncache') as tr:
317 fnc.write(tr)
317 fnc.write(tr)
318 else:
318 else:
319 ui.write(_('fncache already up to date\n'))
319 ui.write(_('fncache already up to date\n'))
320
320
321 def stripbmrevset(repo, mark):
321 def stripbmrevset(repo, mark):
322 """
322 """
323 The revset to strip when strip is called with -B mark
323 The revset to strip when strip is called with -B mark
324
324
325 Needs to live here so extensions can use it and wrap it even when strip is
325 Needs to live here so extensions can use it and wrap it even when strip is
326 not enabled or not present on a box.
326 not enabled or not present on a box.
327 """
327 """
328 return repo.revs("ancestors(bookmark(%s)) - "
328 return repo.revs("ancestors(bookmark(%s)) - "
329 "ancestors(head() and not bookmark(%s)) - "
329 "ancestors(head() and not bookmark(%s)) - "
330 "ancestors(bookmark() and not bookmark(%s))",
330 "ancestors(bookmark() and not bookmark(%s))",
331 mark, mark, mark)
331 mark, mark, mark)
332
332
333 def deleteobsmarkers(obsstore, indices):
333 def deleteobsmarkers(obsstore, indices):
334 """Delete some obsmarkers from obsstore and return how many were deleted
334 """Delete some obsmarkers from obsstore and return how many were deleted
335
335
336 'indices' is a list of ints which are the indices
336 'indices' is a list of ints which are the indices
337 of the markers to be deleted.
337 of the markers to be deleted.
338
338
339 Every invocation of this function completely rewrites the obsstore file,
339 Every invocation of this function completely rewrites the obsstore file,
340 skipping the markers we want to be removed. The new temporary file is
340 skipping the markers we want to be removed. The new temporary file is
341 created, remaining markers are written there and on .close() this file
341 created, remaining markers are written there and on .close() this file
342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
343 if not indices:
343 if not indices:
344 # we don't want to rewrite the obsstore with the same content
344 # we don't want to rewrite the obsstore with the same content
345 return
345 return
346
346
347 left = []
347 left = []
348 current = obsstore._all
348 current = obsstore._all
349 n = 0
349 n = 0
350 for i, m in enumerate(current):
350 for i, m in enumerate(current):
351 if i in indices:
351 if i in indices:
352 n += 1
352 n += 1
353 continue
353 continue
354 left.append(m)
354 left.append(m)
355
355
356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
358 newobsstorefile.write(bytes)
358 newobsstorefile.write(bytes)
359 newobsstorefile.close()
359 newobsstorefile.close()
360 return n
360 return n
361
361
362 def upgraderequiredsourcerequirements(repo):
362 def upgraderequiredsourcerequirements(repo):
363 """Obtain requirements required to be present to upgrade a repo.
363 """Obtain requirements required to be present to upgrade a repo.
364
364
365 An upgrade will not be allowed if the repository doesn't have the
365 An upgrade will not be allowed if the repository doesn't have the
366 requirements returned by this function.
366 requirements returned by this function.
367 """
367 """
368 return set([
368 return set([
369 # Introduced in Mercurial 0.9.2.
369 # Introduced in Mercurial 0.9.2.
370 'revlogv1',
370 'revlogv1',
371 # Introduced in Mercurial 0.9.2.
371 # Introduced in Mercurial 0.9.2.
372 'store',
372 'store',
373 ])
373 ])
374
374
375 def upgradeblocksourcerequirements(repo):
375 def upgradeblocksourcerequirements(repo):
376 """Obtain requirements that will prevent an upgrade from occurring.
376 """Obtain requirements that will prevent an upgrade from occurring.
377
377
378 An upgrade cannot be performed if the source repository contains a
378 An upgrade cannot be performed if the source repository contains a
379 requirements in the returned set.
379 requirements in the returned set.
380 """
380 """
381 return set([
381 return set([
382 # The upgrade code does not yet support these experimental features.
382 # The upgrade code does not yet support these experimental features.
383 # This is an artificial limitation.
383 # This is an artificial limitation.
384 'manifestv2',
384 'manifestv2',
385 'treemanifest',
385 'treemanifest',
386 # This was a precursor to generaldelta and was never enabled by default.
386 # This was a precursor to generaldelta and was never enabled by default.
387 # It should (hopefully) not exist in the wild.
387 # It should (hopefully) not exist in the wild.
388 'parentdelta',
388 'parentdelta',
389 # Upgrade should operate on the actual store, not the shared link.
389 # Upgrade should operate on the actual store, not the shared link.
390 'shared',
390 'shared',
391 ])
391 ])
392
392
393 def upgradesupportremovedrequirements(repo):
393 def upgradesupportremovedrequirements(repo):
394 """Obtain requirements that can be removed during an upgrade.
394 """Obtain requirements that can be removed during an upgrade.
395
395
396 If an upgrade were to create a repository that dropped a requirement,
396 If an upgrade were to create a repository that dropped a requirement,
397 the dropped requirement must appear in the returned set for the upgrade
397 the dropped requirement must appear in the returned set for the upgrade
398 to be allowed.
398 to be allowed.
399 """
399 """
400 return set()
400 return set()
401
401
402 def upgradesupporteddestrequirements(repo):
402 def upgradesupporteddestrequirements(repo):
403 """Obtain requirements that upgrade supports in the destination.
403 """Obtain requirements that upgrade supports in the destination.
404
404
405 If the result of the upgrade would create requirements not in this set,
405 If the result of the upgrade would create requirements not in this set,
406 the upgrade is disallowed.
406 the upgrade is disallowed.
407
407
408 Extensions should monkeypatch this to add their custom requirements.
408 Extensions should monkeypatch this to add their custom requirements.
409 """
409 """
410 return set([
410 return set([
411 'dotencode',
411 'dotencode',
412 'fncache',
412 'fncache',
413 'generaldelta',
413 'generaldelta',
414 'revlogv1',
414 'revlogv1',
415 'store',
415 'store',
416 ])
416 ])
417
417
418 def upgradeallowednewrequirements(repo):
418 def upgradeallowednewrequirements(repo):
419 """Obtain requirements that can be added to a repository during upgrade.
419 """Obtain requirements that can be added to a repository during upgrade.
420
420
421 This is used to disallow proposed requirements from being added when
421 This is used to disallow proposed requirements from being added when
422 they weren't present before.
422 they weren't present before.
423
423
424 We use a list of allowed requirement additions instead of a list of known
424 We use a list of allowed requirement additions instead of a list of known
425 bad additions because the whitelist approach is safer and will prevent
425 bad additions because the whitelist approach is safer and will prevent
426 future, unknown requirements from accidentally being added.
426 future, unknown requirements from accidentally being added.
427 """
427 """
428 return set([
428 return set([
429 'dotencode',
429 'dotencode',
430 'fncache',
430 'fncache',
431 'generaldelta',
431 'generaldelta',
432 ])
432 ])
433
433
434 deficiency = 'deficiency'
435 optimisation = 'optimization'
436
437 class upgradeimprovement(object):
438 """Represents an improvement that can be made as part of an upgrade.
439
440 The following attributes are defined on each instance:
441
442 name
443 Machine-readable string uniquely identifying this improvement. It
444 will be mapped to an action later in the upgrade process.
445
446 type
447 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
448 problem. An optimization is an action (sometimes optional) that
449 can be taken to further improve the state of the repository.
450
451 description
452 Message intended for humans explaining the improvement in more detail,
453 including the implications of it. For ``deficiency`` types, should be
454 worded in the present tense. For ``optimisation`` types, should be
455 worded in the future tense.
456
457 upgrademessage
458 Message intended for humans explaining what an upgrade addressing this
459 issue will do. Should be worded in the future tense.
460
461 fromdefault (``deficiency`` types only)
462 Boolean indicating whether the current (deficient) state deviates
463 from Mercurial's default configuration.
464
465 fromconfig (``deficiency`` types only)
466 Boolean indicating whether the current (deficient) state deviates
467 from the current Mercurial configuration.
468 """
469 def __init__(self, name, type, description, upgrademessage, **kwargs):
470 self.name = name
471 self.type = type
472 self.description = description
473 self.upgrademessage = upgrademessage
474
475 for k, v in kwargs.items():
476 setattr(self, k, v)
477
478 def upgradefindimprovements(repo):
479 """Determine improvements that can be made to the repo during upgrade.
480
481 Returns a list of ``upgradeimprovement`` describing repository deficiencies
482 and optimizations.
483 """
484 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
485 from . import localrepo
486
487 newreporeqs = localrepo.newreporequirements(repo)
488
489 improvements = []
490
491 # We could detect lack of revlogv1 and store here, but they were added
492 # in 0.9.2 and we don't support upgrading repos without these
493 # requirements, so let's not bother.
494
495 if 'fncache' not in repo.requirements:
496 improvements.append(upgradeimprovement(
497 name='fncache',
498 type=deficiency,
499 description=_('long and reserved filenames may not work correctly; '
500 'repository performance is sub-optimal'),
501 upgrademessage=_('repository will be more resilient to storing '
502 'certain paths and performance of certain '
503 'operations should be improved'),
504 fromdefault=True,
505 fromconfig='fncache' in newreporeqs))
506
507 if 'dotencode' not in repo.requirements:
508 improvements.append(upgradeimprovement(
509 name='dotencode',
510 type=deficiency,
511 description=_('storage of filenames beginning with a period or '
512 'space may not work correctly'),
513 upgrademessage=_('repository will be better able to store files '
514 'beginning with a space or period'),
515 fromdefault=True,
516 fromconfig='dotencode' in newreporeqs))
517
518 if 'generaldelta' not in repo.requirements:
519 improvements.append(upgradeimprovement(
520 name='generaldelta',
521 type=deficiency,
522 description=_('deltas within internal storage are unable to '
523 'choose optimal revisions; repository is larger and '
524 'slower than it could be; interaction with other '
525 'repositories may require extra network and CPU '
526 'resources, making "hg push" and "hg pull" slower'),
527 upgrademessage=_('repository storage will be able to create '
528 'optimal deltas; new repository data will be '
529 'smaller and read times should decrease; '
530 'interacting with other repositories using this '
531 'storage model should require less network and '
532 'CPU resources, making "hg push" and "hg pull" '
533 'faster'),
534 fromdefault=True,
535 fromconfig='generaldelta' in newreporeqs))
536
537 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
538 # changelogs with deltas.
539 cl = repo.changelog
540 for rev in cl:
541 chainbase = cl.chainbase(rev)
542 if chainbase != rev:
543 improvements.append(upgradeimprovement(
544 name='removecldeltachain',
545 type=deficiency,
546 description=_('changelog storage is using deltas instead of '
547 'raw entries; changelog reading and any '
548 'operation relying on changelog data are slower '
549 'than they could be'),
550 upgrademessage=_('changelog storage will be reformated to '
551 'store raw entries; changelog reading will be '
552 'faster; changelog size may be reduced'),
553 fromdefault=True,
554 fromconfig=True))
555 break
556
557 # Now for the optimizations.
558
559 # These are unconditionally added. There is logic later that figures out
560 # which ones to apply.
561
562 improvements.append(upgradeimprovement(
563 name='redeltaparent',
564 type=optimisation,
565 description=_('deltas within internal storage will be recalculated to '
566 'choose an optimal base revision where this was not '
567 'already done; the size of the repository may shrink and '
568 'various operations may become faster; the first time '
569 'this optimization is performed could slow down upgrade '
570 'execution considerably; subsequent invocations should '
571 'not run noticeably slower'),
572 upgrademessage=_('deltas within internal storage will choose a new '
573 'base revision if needed')))
574
575 improvements.append(upgradeimprovement(
576 name='redeltamultibase',
577 type=optimisation,
578 description=_('deltas within internal storage will be recalculated '
579 'against multiple base revision and the smallest '
580 'difference will be used; the size of the repository may '
581 'shrink significantly when there are many merges; this '
582 'optimization will slow down execution in proportion to '
583 'the number of merges in the repository and the amount '
584 'of files in the repository; this slow down should not '
585 'be significant unless there are tens of thousands of '
586 'files and thousands of merges'),
587 upgrademessage=_('deltas within internal storage will choose an '
588 'optimal delta by computing deltas against multiple '
589 'parents; may slow down execution time '
590 'significantly')))
591
592 improvements.append(upgradeimprovement(
593 name='redeltaall',
594 type=optimisation,
595 description=_('deltas within internal storage will always be '
596 'recalculated without reusing prior deltas; this will '
597 'likely make execution run several times slower; this '
598 'optimization is typically not needed'),
599 upgrademessage=_('deltas within internal storage will be fully '
600 'recomputed; this will likely drastically slow down '
601 'execution time')))
602
603 return improvements
604
605 def upgradedetermineactions(repo, improvements, sourcereqs, destreqs,
606 optimize):
607 """Determine upgrade actions that will be performed.
608
609 Given a list of improvements as returned by ``upgradefindimprovements``,
610 determine the list of upgrade actions that will be performed.
611
612 The role of this function is to filter improvements if needed, apply
613 recommended optimizations from the improvements list that make sense,
614 etc.
615
616 Returns a list of action names.
617 """
618 newactions = []
619
620 knownreqs = upgradesupporteddestrequirements(repo)
621
622 for i in improvements:
623 name = i.name
624
625 # If the action is a requirement that doesn't show up in the
626 # destination requirements, prune the action.
627 if name in knownreqs and name not in destreqs:
628 continue
629
630 if i.type == deficiency:
631 newactions.append(name)
632
633 newactions.extend(o for o in sorted(optimize) if o not in newactions)
634
635 # FUTURE consider adding some optimizations here for certain transitions.
636 # e.g. adding generaldelta could schedule parent redeltas.
637
638 return newactions
639
434 def upgraderepo(ui, repo, run=False, optimize=None):
640 def upgraderepo(ui, repo, run=False, optimize=None):
435 """Upgrade a repository in place."""
641 """Upgrade a repository in place."""
436 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
642 # Avoid cycle: cmdutil -> repair -> localrepo -> cmdutil
437 from . import localrepo
643 from . import localrepo
438
644
645 optimize = set(optimize or [])
439 repo = repo.unfiltered()
646 repo = repo.unfiltered()
440
647
441 # Ensure the repository can be upgraded.
648 # Ensure the repository can be upgraded.
442 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
649 missingreqs = upgraderequiredsourcerequirements(repo) - repo.requirements
443 if missingreqs:
650 if missingreqs:
444 raise error.Abort(_('cannot upgrade repository; requirement '
651 raise error.Abort(_('cannot upgrade repository; requirement '
445 'missing: %s') % _(', ').join(sorted(missingreqs)))
652 'missing: %s') % _(', ').join(sorted(missingreqs)))
446
653
447 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
654 blockedreqs = upgradeblocksourcerequirements(repo) & repo.requirements
448 if blockedreqs:
655 if blockedreqs:
449 raise error.Abort(_('cannot upgrade repository; unsupported source '
656 raise error.Abort(_('cannot upgrade repository; unsupported source '
450 'requirement: %s') %
657 'requirement: %s') %
451 _(', ').join(sorted(blockedreqs)))
658 _(', ').join(sorted(blockedreqs)))
452
659
453 # FUTURE there is potentially a need to control the wanted requirements via
660 # FUTURE there is potentially a need to control the wanted requirements via
454 # command arguments or via an extension hook point.
661 # command arguments or via an extension hook point.
455 newreqs = localrepo.newreporequirements(repo)
662 newreqs = localrepo.newreporequirements(repo)
456
663
457 noremovereqs = (repo.requirements - newreqs -
664 noremovereqs = (repo.requirements - newreqs -
458 upgradesupportremovedrequirements(repo))
665 upgradesupportremovedrequirements(repo))
459 if noremovereqs:
666 if noremovereqs:
460 raise error.Abort(_('cannot upgrade repository; requirement would be '
667 raise error.Abort(_('cannot upgrade repository; requirement would be '
461 'removed: %s') % _(', ').join(sorted(noremovereqs)))
668 'removed: %s') % _(', ').join(sorted(noremovereqs)))
462
669
463 noaddreqs = (newreqs - repo.requirements -
670 noaddreqs = (newreqs - repo.requirements -
464 upgradeallowednewrequirements(repo))
671 upgradeallowednewrequirements(repo))
465 if noaddreqs:
672 if noaddreqs:
466 raise error.Abort(_('cannot upgrade repository; do not support adding '
673 raise error.Abort(_('cannot upgrade repository; do not support adding '
467 'requirement: %s') %
674 'requirement: %s') %
468 _(', ').join(sorted(noaddreqs)))
675 _(', ').join(sorted(noaddreqs)))
469
676
470 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
677 unsupportedreqs = newreqs - upgradesupporteddestrequirements(repo)
471 if unsupportedreqs:
678 if unsupportedreqs:
472 raise error.Abort(_('cannot upgrade repository; do not support '
679 raise error.Abort(_('cannot upgrade repository; do not support '
473 'destination requirement: %s') %
680 'destination requirement: %s') %
474 _(', ').join(sorted(unsupportedreqs)))
681 _(', ').join(sorted(unsupportedreqs)))
475
682
683 # Find and validate all improvements that can be made.
684 improvements = upgradefindimprovements(repo)
685 for i in improvements:
686 if i.type not in (deficiency, optimisation):
687 raise error.Abort(_('unexpected improvement type %s for %s') % (
688 i.type, i.name))
689
690 # Validate arguments.
691 unknownoptimize = optimize - set(i.name for i in improvements
692 if i.type == optimisation)
693 if unknownoptimize:
694 raise error.Abort(_('unknown optimization action requested: %s') %
695 ', '.join(sorted(unknownoptimize)),
696 hint=_('run without arguments to see valid '
697 'optimizations'))
698
699 actions = upgradedetermineactions(repo, improvements, repo.requirements,
700 newreqs, optimize)
701
476 def printrequirements():
702 def printrequirements():
477 ui.write(_('requirements\n'))
703 ui.write(_('requirements\n'))
478 ui.write(_(' preserved: %s\n') %
704 ui.write(_(' preserved: %s\n') %
479 _(', ').join(sorted(newreqs & repo.requirements)))
705 _(', ').join(sorted(newreqs & repo.requirements)))
480
706
481 if repo.requirements - newreqs:
707 if repo.requirements - newreqs:
482 ui.write(_(' removed: %s\n') %
708 ui.write(_(' removed: %s\n') %
483 _(', ').join(sorted(repo.requirements - newreqs)))
709 _(', ').join(sorted(repo.requirements - newreqs)))
484
710
485 if newreqs - repo.requirements:
711 if newreqs - repo.requirements:
486 ui.write(_(' added: %s\n') %
712 ui.write(_(' added: %s\n') %
487 _(', ').join(sorted(newreqs - repo.requirements)))
713 _(', ').join(sorted(newreqs - repo.requirements)))
488
714
489 ui.write('\n')
715 ui.write('\n')
490
716
717 def printupgradeactions():
718 for action in actions:
719 for i in improvements:
720 if i.name == action:
721 ui.write('%s\n %s\n\n' %
722 (i.name, i.upgrademessage))
723
491 if not run:
724 if not run:
725 fromdefault = []
726 fromconfig = []
727 optimizations = []
728
729 for i in improvements:
730 assert i.type in (deficiency, optimisation)
731 if i.type == deficiency:
732 if i.fromdefault:
733 fromdefault.append(i)
734 if i.fromconfig:
735 fromconfig.append(i)
736 else:
737 optimizations.append(i)
738
739 if fromdefault or fromconfig:
740 fromconfignames = set(x.name for x in fromconfig)
741 onlydefault = [i for i in fromdefault
742 if i.name not in fromconfignames]
743
744 if fromconfig:
745 ui.write(_('repository lacks features recommended by '
746 'current config options:\n\n'))
747 for i in fromconfig:
748 ui.write('%s\n %s\n\n' % (i.name, i.description))
749
750 if onlydefault:
751 ui.write(_('repository lacks features used by the default '
752 'config options:\n\n'))
753 for i in onlydefault:
754 ui.write('%s\n %s\n\n' % (i.name, i.description))
755
756 ui.write('\n')
757 else:
758 ui.write(_('(no feature deficiencies found in existing '
759 'repository)\n'))
760
492 ui.write(_('performing an upgrade with "--run" will make the following '
761 ui.write(_('performing an upgrade with "--run" will make the following '
493 'changes:\n\n'))
762 'changes:\n\n'))
494
763
495 printrequirements()
764 printrequirements()
765 printupgradeactions()
766
767 unusedoptimize = [i for i in improvements
768 if i.name not in actions and i.type == optimisation]
769 if unusedoptimize:
770 ui.write(_('additional optimizations are available by specifying '
771 '"--optimize <name>":\n\n'))
772 for i in unusedoptimize:
773 ui.write(_('%s\n %s\n\n') % (i.name, i.description))
@@ -1,51 +1,182 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > share =
3 > share =
4 > EOF
4 > EOF
5
5
6 store and revlogv1 are required in source
6 store and revlogv1 are required in source
7
7
8 $ hg --config format.usestore=false init no-store
8 $ hg --config format.usestore=false init no-store
9 $ hg -R no-store debugupgraderepo
9 $ hg -R no-store debugupgraderepo
10 abort: cannot upgrade repository; requirement missing: store
10 abort: cannot upgrade repository; requirement missing: store
11 [255]
11 [255]
12
12
13 $ hg init no-revlogv1
13 $ hg init no-revlogv1
14 $ cat > no-revlogv1/.hg/requires << EOF
14 $ cat > no-revlogv1/.hg/requires << EOF
15 > dotencode
15 > dotencode
16 > fncache
16 > fncache
17 > generaldelta
17 > generaldelta
18 > store
18 > store
19 > EOF
19 > EOF
20
20
21 $ hg -R no-revlogv1 debugupgraderepo
21 $ hg -R no-revlogv1 debugupgraderepo
22 abort: cannot upgrade repository; requirement missing: revlogv1
22 abort: cannot upgrade repository; requirement missing: revlogv1
23 [255]
23 [255]
24
24
25 Cannot upgrade shared repositories
25 Cannot upgrade shared repositories
26
26
27 $ hg init share-parent
27 $ hg init share-parent
28 $ hg -q share share-parent share-child
28 $ hg -q share share-parent share-child
29
29
30 $ hg -R share-child debugupgraderepo
30 $ hg -R share-child debugupgraderepo
31 abort: cannot upgrade repository; unsupported source requirement: shared
31 abort: cannot upgrade repository; unsupported source requirement: shared
32 [255]
32 [255]
33
33
34 Do not yet support upgrading manifestv2 and treemanifest repos
34 Do not yet support upgrading manifestv2 and treemanifest repos
35
35
36 $ hg --config experimental.manifestv2=true init manifestv2
36 $ hg --config experimental.manifestv2=true init manifestv2
37 $ hg -R manifestv2 debugupgraderepo
37 $ hg -R manifestv2 debugupgraderepo
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
38 abort: cannot upgrade repository; unsupported source requirement: manifestv2
39 [255]
39 [255]
40
40
41 $ hg --config experimental.treemanifest=true init treemanifest
41 $ hg --config experimental.treemanifest=true init treemanifest
42 $ hg -R treemanifest debugupgraderepo
42 $ hg -R treemanifest debugupgraderepo
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
43 abort: cannot upgrade repository; unsupported source requirement: treemanifest
44 [255]
44 [255]
45
45
46 Cannot add manifestv2 or treemanifest requirement during upgrade
46 Cannot add manifestv2 or treemanifest requirement during upgrade
47
47
48 $ hg init disallowaddedreq
48 $ hg init disallowaddedreq
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
49 $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
50 abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
51 [255]
51 [255]
52
53 An upgrade of a repository created with recommended settings only suggests optimizations
54
55 $ hg init empty
56 $ cd empty
57 $ hg debugupgraderepo
58 (no feature deficiencies found in existing repository)
59 performing an upgrade with "--run" will make the following changes:
60
61 requirements
62 preserved: dotencode, fncache, generaldelta, revlogv1, store
63
64 additional optimizations are available by specifying "--optimize <name>":
65
66 redeltaparent
67 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
68
69 redeltamultibase
70 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
71
72 redeltaall
73 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
74
75
76 --optimize can be used to add optimizations
77
78 $ hg debugupgrade --optimize redeltaparent
79 (no feature deficiencies found in existing repository)
80 performing an upgrade with "--run" will make the following changes:
81
82 requirements
83 preserved: dotencode, fncache, generaldelta, revlogv1, store
84
85 redeltaparent
86 deltas within internal storage will choose a new base revision if needed
87
88 additional optimizations are available by specifying "--optimize <name>":
89
90 redeltamultibase
91 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
92
93 redeltaall
94 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
95
96
97 Various sub-optimal detections work
98
99 $ cat > .hg/requires << EOF
100 > revlogv1
101 > store
102 > EOF
103
104 $ hg debugupgraderepo
105 repository lacks features recommended by current config options:
106
107 fncache
108 long and reserved filenames may not work correctly; repository performance is sub-optimal
109
110 dotencode
111 storage of filenames beginning with a period or space may not work correctly
112
113 generaldelta
114 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
115
116
117 performing an upgrade with "--run" will make the following changes:
118
119 requirements
120 preserved: revlogv1, store
121 added: dotencode, fncache, generaldelta
122
123 fncache
124 repository will be more resilient to storing certain paths and performance of certain operations should be improved
125
126 dotencode
127 repository will be better able to store files beginning with a space or period
128
129 generaldelta
130 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
131
132 additional optimizations are available by specifying "--optimize <name>":
133
134 redeltaparent
135 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
136
137 redeltamultibase
138 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
139
140 redeltaall
141 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
142
143
144 $ hg --config format.dotencode=false debugupgraderepo
145 repository lacks features recommended by current config options:
146
147 fncache
148 long and reserved filenames may not work correctly; repository performance is sub-optimal
149
150 generaldelta
151 deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
152
153 repository lacks features used by the default config options:
154
155 dotencode
156 storage of filenames beginning with a period or space may not work correctly
157
158
159 performing an upgrade with "--run" will make the following changes:
160
161 requirements
162 preserved: revlogv1, store
163 added: fncache, generaldelta
164
165 fncache
166 repository will be more resilient to storing certain paths and performance of certain operations should be improved
167
168 generaldelta
169 repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
170
171 additional optimizations are available by specifying "--optimize <name>":
172
173 redeltaparent
174 deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
175
176 redeltamultibase
177 deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
178
179 redeltaall
180 deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
181
182
General Comments 0
You need to be logged in to leave comments. Login now