##// END OF EJS Templates
bundle2: fix type of experimental option
Matt Mackall -
r25845:7e3bb3e1 default
parent child Browse files
Show More
@@ -1,298 +1,298 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, exchange, util, bundle2
9 from mercurial import changegroup, exchange, util, bundle2
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import errno
12 import errno
13
13
14 def _bundle(repo, bases, heads, node, suffix, compress=True):
14 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 """create a bundle with the specified revisions as a backup"""
15 """create a bundle with the specified revisions as a backup"""
16 usebundle2 = (repo.ui.config('experimental', 'bundle2-exp', True) and
16 usebundle2 = (repo.ui.configbool('experimental', 'bundle2-exp', True) and
17 repo.ui.config('experimental', 'strip-bundle2-version'))
17 repo.ui.config('experimental', 'strip-bundle2-version'))
18 if usebundle2:
18 if usebundle2:
19 cgversion = repo.ui.config('experimental', 'strip-bundle2-version')
19 cgversion = repo.ui.config('experimental', 'strip-bundle2-version')
20 if cgversion not in changegroup.packermap:
20 if cgversion not in changegroup.packermap:
21 repo.ui.warn(_('unknown strip-bundle2-version value %r; '
21 repo.ui.warn(_('unknown strip-bundle2-version value %r; '
22 'should be one of %r\n') %
22 'should be one of %r\n') %
23 (cgversion, sorted(changegroup.packermap.keys()),))
23 (cgversion, sorted(changegroup.packermap.keys()),))
24 cgversion = '01'
24 cgversion = '01'
25 usebundle2 = False
25 usebundle2 = False
26 else:
26 else:
27 cgversion = '01'
27 cgversion = '01'
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 if usebundle2:
42 if usebundle2:
43 bundletype = "HG20"
43 bundletype = "HG20"
44 elif compress:
44 elif compress:
45 bundletype = "HG10BZ"
45 bundletype = "HG10BZ"
46 else:
46 else:
47 bundletype = "HG10UN"
47 bundletype = "HG10UN"
48 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
48 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
49
49
50 def _collectfiles(repo, striprev):
50 def _collectfiles(repo, striprev):
51 """find out the filelogs affected by the strip"""
51 """find out the filelogs affected by the strip"""
52 files = set()
52 files = set()
53
53
54 for x in xrange(striprev, len(repo)):
54 for x in xrange(striprev, len(repo)):
55 files.update(repo[x].files())
55 files.update(repo[x].files())
56
56
57 return sorted(files)
57 return sorted(files)
58
58
59 def _collectbrokencsets(repo, files, striprev):
59 def _collectbrokencsets(repo, files, striprev):
60 """return the changesets which will be broken by the truncation"""
60 """return the changesets which will be broken by the truncation"""
61 s = set()
61 s = set()
62 def collectone(revlog):
62 def collectone(revlog):
63 _, brokenset = revlog.getstrippoint(striprev)
63 _, brokenset = revlog.getstrippoint(striprev)
64 s.update([revlog.linkrev(r) for r in brokenset])
64 s.update([revlog.linkrev(r) for r in brokenset])
65
65
66 collectone(repo.manifest)
66 collectone(repo.manifest)
67 for fname in files:
67 for fname in files:
68 collectone(repo.file(fname))
68 collectone(repo.file(fname))
69
69
70 return s
70 return s
71
71
72 def strip(ui, repo, nodelist, backup=True, topic='backup'):
72 def strip(ui, repo, nodelist, backup=True, topic='backup'):
73
73
74 # Simple way to maintain backwards compatibility for this
74 # Simple way to maintain backwards compatibility for this
75 # argument.
75 # argument.
76 if backup in ['none', 'strip']:
76 if backup in ['none', 'strip']:
77 backup = False
77 backup = False
78
78
79 repo = repo.unfiltered()
79 repo = repo.unfiltered()
80 repo.destroying()
80 repo.destroying()
81
81
82 cl = repo.changelog
82 cl = repo.changelog
83 # TODO handle undo of merge sets
83 # TODO handle undo of merge sets
84 if isinstance(nodelist, str):
84 if isinstance(nodelist, str):
85 nodelist = [nodelist]
85 nodelist = [nodelist]
86 striplist = [cl.rev(node) for node in nodelist]
86 striplist = [cl.rev(node) for node in nodelist]
87 striprev = min(striplist)
87 striprev = min(striplist)
88
88
89 # Some revisions with rev > striprev may not be descendants of striprev.
89 # Some revisions with rev > striprev may not be descendants of striprev.
90 # We have to find these revisions and put them in a bundle, so that
90 # We have to find these revisions and put them in a bundle, so that
91 # we can restore them after the truncations.
91 # we can restore them after the truncations.
92 # To create the bundle we use repo.changegroupsubset which requires
92 # To create the bundle we use repo.changegroupsubset which requires
93 # the list of heads and bases of the set of interesting revisions.
93 # the list of heads and bases of the set of interesting revisions.
94 # (head = revision in the set that has no descendant in the set;
94 # (head = revision in the set that has no descendant in the set;
95 # base = revision in the set that has no ancestor in the set)
95 # base = revision in the set that has no ancestor in the set)
96 tostrip = set(striplist)
96 tostrip = set(striplist)
97 for rev in striplist:
97 for rev in striplist:
98 for desc in cl.descendants([rev]):
98 for desc in cl.descendants([rev]):
99 tostrip.add(desc)
99 tostrip.add(desc)
100
100
101 files = _collectfiles(repo, striprev)
101 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
103
103
104 # compute heads
104 # compute heads
105 saveheads = set(saverevs)
105 saveheads = set(saverevs)
106 for r in xrange(striprev + 1, len(cl)):
106 for r in xrange(striprev + 1, len(cl)):
107 if r not in tostrip:
107 if r not in tostrip:
108 saverevs.add(r)
108 saverevs.add(r)
109 saveheads.difference_update(cl.parentrevs(r))
109 saveheads.difference_update(cl.parentrevs(r))
110 saveheads.add(r)
110 saveheads.add(r)
111 saveheads = [cl.node(r) for r in saveheads]
111 saveheads = [cl.node(r) for r in saveheads]
112
112
113 # compute base nodes
113 # compute base nodes
114 if saverevs:
114 if saverevs:
115 descendants = set(cl.descendants(saverevs))
115 descendants = set(cl.descendants(saverevs))
116 saverevs.difference_update(descendants)
116 saverevs.difference_update(descendants)
117 savebases = [cl.node(r) for r in saverevs]
117 savebases = [cl.node(r) for r in saverevs]
118 stripbases = [cl.node(r) for r in tostrip]
118 stripbases = [cl.node(r) for r in tostrip]
119
119
120 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
120 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
121 # is much faster
121 # is much faster
122 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
122 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
123 if newbmtarget:
123 if newbmtarget:
124 newbmtarget = repo[newbmtarget.first()].node()
124 newbmtarget = repo[newbmtarget.first()].node()
125 else:
125 else:
126 newbmtarget = '.'
126 newbmtarget = '.'
127
127
128 bm = repo._bookmarks
128 bm = repo._bookmarks
129 updatebm = []
129 updatebm = []
130 for m in bm:
130 for m in bm:
131 rev = repo[bm[m]].rev()
131 rev = repo[bm[m]].rev()
132 if rev in tostrip:
132 if rev in tostrip:
133 updatebm.append(m)
133 updatebm.append(m)
134
134
135 # create a changegroup for all the branches we need to keep
135 # create a changegroup for all the branches we need to keep
136 backupfile = None
136 backupfile = None
137 vfs = repo.vfs
137 vfs = repo.vfs
138 node = nodelist[-1]
138 node = nodelist[-1]
139 if backup:
139 if backup:
140 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
140 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
141 repo.ui.status(_("saved backup bundle to %s\n") %
141 repo.ui.status(_("saved backup bundle to %s\n") %
142 vfs.join(backupfile))
142 vfs.join(backupfile))
143 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
143 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
144 vfs.join(backupfile))
144 vfs.join(backupfile))
145 if saveheads or savebases:
145 if saveheads or savebases:
146 # do not compress partial bundle if we remove it from disk later
146 # do not compress partial bundle if we remove it from disk later
147 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
147 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
148 compress=False)
148 compress=False)
149
149
150 mfst = repo.manifest
150 mfst = repo.manifest
151
151
152 curtr = repo.currenttransaction()
152 curtr = repo.currenttransaction()
153 if curtr is not None:
153 if curtr is not None:
154 del curtr # avoid carrying reference to transaction for nothing
154 del curtr # avoid carrying reference to transaction for nothing
155 msg = _('programming error: cannot strip from inside a transaction')
155 msg = _('programming error: cannot strip from inside a transaction')
156 raise util.Abort(msg, hint=_('contact your extension maintainer'))
156 raise util.Abort(msg, hint=_('contact your extension maintainer'))
157
157
158 tr = repo.transaction("strip")
158 tr = repo.transaction("strip")
159 offset = len(tr.entries)
159 offset = len(tr.entries)
160
160
161 try:
161 try:
162 tr.startgroup()
162 tr.startgroup()
163 cl.strip(striprev, tr)
163 cl.strip(striprev, tr)
164 mfst.strip(striprev, tr)
164 mfst.strip(striprev, tr)
165 for fn in files:
165 for fn in files:
166 repo.file(fn).strip(striprev, tr)
166 repo.file(fn).strip(striprev, tr)
167 tr.endgroup()
167 tr.endgroup()
168
168
169 try:
169 try:
170 for i in xrange(offset, len(tr.entries)):
170 for i in xrange(offset, len(tr.entries)):
171 file, troffset, ignore = tr.entries[i]
171 file, troffset, ignore = tr.entries[i]
172 repo.svfs(file, 'a').truncate(troffset)
172 repo.svfs(file, 'a').truncate(troffset)
173 if troffset == 0:
173 if troffset == 0:
174 repo.store.markremoved(file)
174 repo.store.markremoved(file)
175 tr.close()
175 tr.close()
176 except: # re-raises
176 except: # re-raises
177 tr.abort()
177 tr.abort()
178 raise
178 raise
179
179
180 if saveheads or savebases:
180 if saveheads or savebases:
181 ui.note(_("adding branch\n"))
181 ui.note(_("adding branch\n"))
182 f = vfs.open(chgrpfile, "rb")
182 f = vfs.open(chgrpfile, "rb")
183 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
183 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
184 if not repo.ui.verbose:
184 if not repo.ui.verbose:
185 # silence internal shuffling chatter
185 # silence internal shuffling chatter
186 repo.ui.pushbuffer()
186 repo.ui.pushbuffer()
187 if isinstance(gen, bundle2.unbundle20):
187 if isinstance(gen, bundle2.unbundle20):
188 tr = repo.transaction('strip')
188 tr = repo.transaction('strip')
189 tr.hookargs = {'source': 'strip',
189 tr.hookargs = {'source': 'strip',
190 'url': 'bundle:' + vfs.join(chgrpfile)}
190 'url': 'bundle:' + vfs.join(chgrpfile)}
191 try:
191 try:
192 bundle2.processbundle(repo, gen, lambda: tr)
192 bundle2.processbundle(repo, gen, lambda: tr)
193 tr.close()
193 tr.close()
194 finally:
194 finally:
195 tr.release()
195 tr.release()
196 else:
196 else:
197 changegroup.addchangegroup(repo, gen, 'strip',
197 changegroup.addchangegroup(repo, gen, 'strip',
198 'bundle:' + vfs.join(chgrpfile),
198 'bundle:' + vfs.join(chgrpfile),
199 True)
199 True)
200 if not repo.ui.verbose:
200 if not repo.ui.verbose:
201 repo.ui.popbuffer()
201 repo.ui.popbuffer()
202 f.close()
202 f.close()
203
203
204 # remove undo files
204 # remove undo files
205 for undovfs, undofile in repo.undofiles():
205 for undovfs, undofile in repo.undofiles():
206 try:
206 try:
207 undovfs.unlink(undofile)
207 undovfs.unlink(undofile)
208 except OSError as e:
208 except OSError as e:
209 if e.errno != errno.ENOENT:
209 if e.errno != errno.ENOENT:
210 ui.warn(_('error removing %s: %s\n') %
210 ui.warn(_('error removing %s: %s\n') %
211 (undovfs.join(undofile), str(e)))
211 (undovfs.join(undofile), str(e)))
212
212
213 for m in updatebm:
213 for m in updatebm:
214 bm[m] = repo[newbmtarget].node()
214 bm[m] = repo[newbmtarget].node()
215 bm.write()
215 bm.write()
216 except: # re-raises
216 except: # re-raises
217 if backupfile:
217 if backupfile:
218 ui.warn(_("strip failed, full bundle stored in '%s'\n")
218 ui.warn(_("strip failed, full bundle stored in '%s'\n")
219 % vfs.join(backupfile))
219 % vfs.join(backupfile))
220 elif saveheads:
220 elif saveheads:
221 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
221 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
222 % vfs.join(chgrpfile))
222 % vfs.join(chgrpfile))
223 raise
223 raise
224 else:
224 else:
225 if saveheads or savebases:
225 if saveheads or savebases:
226 # Remove partial backup only if there were no exceptions
226 # Remove partial backup only if there were no exceptions
227 vfs.unlink(chgrpfile)
227 vfs.unlink(chgrpfile)
228
228
229 repo.destroyed()
229 repo.destroyed()
230
230
231 def rebuildfncache(ui, repo):
231 def rebuildfncache(ui, repo):
232 """Rebuilds the fncache file from repo history.
232 """Rebuilds the fncache file from repo history.
233
233
234 Missing entries will be added. Extra entries will be removed.
234 Missing entries will be added. Extra entries will be removed.
235 """
235 """
236 repo = repo.unfiltered()
236 repo = repo.unfiltered()
237
237
238 if 'fncache' not in repo.requirements:
238 if 'fncache' not in repo.requirements:
239 ui.warn(_('(not rebuilding fncache because repository does not '
239 ui.warn(_('(not rebuilding fncache because repository does not '
240 'support fncache\n'))
240 'support fncache\n'))
241 return
241 return
242
242
243 lock = repo.lock()
243 lock = repo.lock()
244 try:
244 try:
245 fnc = repo.store.fncache
245 fnc = repo.store.fncache
246 # Trigger load of fncache.
246 # Trigger load of fncache.
247 if 'irrelevant' in fnc:
247 if 'irrelevant' in fnc:
248 pass
248 pass
249
249
250 oldentries = set(fnc.entries)
250 oldentries = set(fnc.entries)
251 newentries = set()
251 newentries = set()
252 seenfiles = set()
252 seenfiles = set()
253
253
254 repolen = len(repo)
254 repolen = len(repo)
255 for rev in repo:
255 for rev in repo:
256 ui.progress(_('changeset'), rev, total=repolen)
256 ui.progress(_('changeset'), rev, total=repolen)
257
257
258 ctx = repo[rev]
258 ctx = repo[rev]
259 for f in ctx.files():
259 for f in ctx.files():
260 # This is to minimize I/O.
260 # This is to minimize I/O.
261 if f in seenfiles:
261 if f in seenfiles:
262 continue
262 continue
263 seenfiles.add(f)
263 seenfiles.add(f)
264
264
265 i = 'data/%s.i' % f
265 i = 'data/%s.i' % f
266 d = 'data/%s.d' % f
266 d = 'data/%s.d' % f
267
267
268 if repo.store._exists(i):
268 if repo.store._exists(i):
269 newentries.add(i)
269 newentries.add(i)
270 if repo.store._exists(d):
270 if repo.store._exists(d):
271 newentries.add(d)
271 newentries.add(d)
272
272
273 ui.progress(_('changeset'), None)
273 ui.progress(_('changeset'), None)
274
274
275 addcount = len(newentries - oldentries)
275 addcount = len(newentries - oldentries)
276 removecount = len(oldentries - newentries)
276 removecount = len(oldentries - newentries)
277 for p in sorted(oldentries - newentries):
277 for p in sorted(oldentries - newentries):
278 ui.write(_('removing %s\n') % p)
278 ui.write(_('removing %s\n') % p)
279 for p in sorted(newentries - oldentries):
279 for p in sorted(newentries - oldentries):
280 ui.write(_('adding %s\n') % p)
280 ui.write(_('adding %s\n') % p)
281
281
282 if addcount or removecount:
282 if addcount or removecount:
283 ui.write(_('%d items added, %d removed from fncache\n') %
283 ui.write(_('%d items added, %d removed from fncache\n') %
284 (addcount, removecount))
284 (addcount, removecount))
285 fnc.entries = newentries
285 fnc.entries = newentries
286 fnc._dirty = True
286 fnc._dirty = True
287
287
288 tr = repo.transaction('fncache')
288 tr = repo.transaction('fncache')
289 try:
289 try:
290 fnc.write(tr)
290 fnc.write(tr)
291 tr.close()
291 tr.close()
292 finally:
292 finally:
293 tr.release()
293 tr.release()
294 else:
294 else:
295 ui.write(_('fncache already up to date\n'))
295 ui.write(_('fncache already up to date\n'))
296 finally:
296 finally:
297 lock.release()
297 lock.release()
298
298
General Comments 0
You need to be logged in to leave comments. Login now