##// END OF EJS Templates
with: use context manager for transaction in strip
Bryan O'Sullivan -
r27875:add2ba16 default
parent child Browse files
Show More
@@ -1,310 +1,306
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 util,
20 util,
21 )
21 )
22
22
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 """create a bundle with the specified revisions as a backup"""
24 """create a bundle with the specified revisions as a backup"""
25 cgversion = '01'
25 cgversion = '01'
26 if 'generaldelta' in repo.requirements:
26 if 'generaldelta' in repo.requirements:
27 cgversion = '02'
27 cgversion = '02'
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for rev in striplist:
103 for desc in cl.descendants([rev]):
103 for desc in cl.descendants([rev]):
104 tostrip.add(desc)
104 tostrip.add(desc)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 if saveheads or savebases:
150 if saveheads or savebases:
151 # do not compress partial bundle if we remove it from disk later
151 # do not compress partial bundle if we remove it from disk later
152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 compress=False)
153 compress=False)
154
154
155 mfst = repo.manifest
155 mfst = repo.manifest
156
156
157 curtr = repo.currenttransaction()
157 curtr = repo.currenttransaction()
158 if curtr is not None:
158 if curtr is not None:
159 del curtr # avoid carrying reference to transaction for nothing
159 del curtr # avoid carrying reference to transaction for nothing
160 msg = _('programming error: cannot strip from inside a transaction')
160 msg = _('programming error: cannot strip from inside a transaction')
161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162
162
163 try:
163 try:
164 with repo.transaction("strip") as tr:
164 with repo.transaction("strip") as tr:
165 offset = len(tr.entries)
165 offset = len(tr.entries)
166
166
167 tr.startgroup()
167 tr.startgroup()
168 cl.strip(striprev, tr)
168 cl.strip(striprev, tr)
169 mfst.strip(striprev, tr)
169 mfst.strip(striprev, tr)
170 for fn in files:
170 for fn in files:
171 repo.file(fn).strip(striprev, tr)
171 repo.file(fn).strip(striprev, tr)
172 tr.endgroup()
172 tr.endgroup()
173
173
174 for i in xrange(offset, len(tr.entries)):
174 for i in xrange(offset, len(tr.entries)):
175 file, troffset, ignore = tr.entries[i]
175 file, troffset, ignore = tr.entries[i]
176 repo.svfs(file, 'a').truncate(troffset)
176 repo.svfs(file, 'a').truncate(troffset)
177 if troffset == 0:
177 if troffset == 0:
178 repo.store.markremoved(file)
178 repo.store.markremoved(file)
179
179
180 if saveheads or savebases:
180 if saveheads or savebases:
181 ui.note(_("adding branch\n"))
181 ui.note(_("adding branch\n"))
182 f = vfs.open(chgrpfile, "rb")
182 f = vfs.open(chgrpfile, "rb")
183 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
183 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
184 if not repo.ui.verbose:
184 if not repo.ui.verbose:
185 # silence internal shuffling chatter
185 # silence internal shuffling chatter
186 repo.ui.pushbuffer()
186 repo.ui.pushbuffer()
187 if isinstance(gen, bundle2.unbundle20):
187 if isinstance(gen, bundle2.unbundle20):
188 tr = repo.transaction('strip')
188 with repo.transaction('strip') as tr:
189 tr.hookargs = {'source': 'strip',
189 tr.hookargs = {'source': 'strip',
190 'url': 'bundle:' + vfs.join(chgrpfile)}
190 'url': 'bundle:' + vfs.join(chgrpfile)}
191 try:
192 bundle2.applybundle(repo, gen, tr, source='strip',
191 bundle2.applybundle(repo, gen, tr, source='strip',
193 url='bundle:' + vfs.join(chgrpfile))
192 url='bundle:' + vfs.join(chgrpfile))
194 tr.close()
195 finally:
196 tr.release()
197 else:
193 else:
198 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
194 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
199 if not repo.ui.verbose:
195 if not repo.ui.verbose:
200 repo.ui.popbuffer()
196 repo.ui.popbuffer()
201 f.close()
197 f.close()
202
198
203 for m in updatebm:
199 for m in updatebm:
204 bm[m] = repo[newbmtarget].node()
200 bm[m] = repo[newbmtarget].node()
205 lock = tr = None
201 lock = tr = None
206 try:
202 try:
207 lock = repo.lock()
203 lock = repo.lock()
208 tr = repo.transaction('repair')
204 tr = repo.transaction('repair')
209 bm.recordchange(tr)
205 bm.recordchange(tr)
210 tr.close()
206 tr.close()
211 finally:
207 finally:
212 tr.release()
208 tr.release()
213 lock.release()
209 lock.release()
214
210
215 # remove undo files
211 # remove undo files
216 for undovfs, undofile in repo.undofiles():
212 for undovfs, undofile in repo.undofiles():
217 try:
213 try:
218 undovfs.unlink(undofile)
214 undovfs.unlink(undofile)
219 except OSError as e:
215 except OSError as e:
220 if e.errno != errno.ENOENT:
216 if e.errno != errno.ENOENT:
221 ui.warn(_('error removing %s: %s\n') %
217 ui.warn(_('error removing %s: %s\n') %
222 (undovfs.join(undofile), str(e)))
218 (undovfs.join(undofile), str(e)))
223
219
224 except: # re-raises
220 except: # re-raises
225 if backupfile:
221 if backupfile:
226 ui.warn(_("strip failed, full bundle stored in '%s'\n")
222 ui.warn(_("strip failed, full bundle stored in '%s'\n")
227 % vfs.join(backupfile))
223 % vfs.join(backupfile))
228 elif saveheads:
224 elif saveheads:
229 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
225 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
230 % vfs.join(chgrpfile))
226 % vfs.join(chgrpfile))
231 raise
227 raise
232 else:
228 else:
233 if saveheads or savebases:
229 if saveheads or savebases:
234 # Remove partial backup only if there were no exceptions
230 # Remove partial backup only if there were no exceptions
235 vfs.unlink(chgrpfile)
231 vfs.unlink(chgrpfile)
236
232
237 repo.destroyed()
233 repo.destroyed()
238
234
239 def rebuildfncache(ui, repo):
235 def rebuildfncache(ui, repo):
240 """Rebuilds the fncache file from repo history.
236 """Rebuilds the fncache file from repo history.
241
237
242 Missing entries will be added. Extra entries will be removed.
238 Missing entries will be added. Extra entries will be removed.
243 """
239 """
244 repo = repo.unfiltered()
240 repo = repo.unfiltered()
245
241
246 if 'fncache' not in repo.requirements:
242 if 'fncache' not in repo.requirements:
247 ui.warn(_('(not rebuilding fncache because repository does not '
243 ui.warn(_('(not rebuilding fncache because repository does not '
248 'support fncache)\n'))
244 'support fncache)\n'))
249 return
245 return
250
246
251 with repo.lock():
247 with repo.lock():
252 fnc = repo.store.fncache
248 fnc = repo.store.fncache
253 # Trigger load of fncache.
249 # Trigger load of fncache.
254 if 'irrelevant' in fnc:
250 if 'irrelevant' in fnc:
255 pass
251 pass
256
252
257 oldentries = set(fnc.entries)
253 oldentries = set(fnc.entries)
258 newentries = set()
254 newentries = set()
259 seenfiles = set()
255 seenfiles = set()
260
256
261 repolen = len(repo)
257 repolen = len(repo)
262 for rev in repo:
258 for rev in repo:
263 ui.progress(_('changeset'), rev, total=repolen)
259 ui.progress(_('changeset'), rev, total=repolen)
264
260
265 ctx = repo[rev]
261 ctx = repo[rev]
266 for f in ctx.files():
262 for f in ctx.files():
267 # This is to minimize I/O.
263 # This is to minimize I/O.
268 if f in seenfiles:
264 if f in seenfiles:
269 continue
265 continue
270 seenfiles.add(f)
266 seenfiles.add(f)
271
267
272 i = 'data/%s.i' % f
268 i = 'data/%s.i' % f
273 d = 'data/%s.d' % f
269 d = 'data/%s.d' % f
274
270
275 if repo.store._exists(i):
271 if repo.store._exists(i):
276 newentries.add(i)
272 newentries.add(i)
277 if repo.store._exists(d):
273 if repo.store._exists(d):
278 newentries.add(d)
274 newentries.add(d)
279
275
280 ui.progress(_('changeset'), None)
276 ui.progress(_('changeset'), None)
281
277
282 addcount = len(newentries - oldentries)
278 addcount = len(newentries - oldentries)
283 removecount = len(oldentries - newentries)
279 removecount = len(oldentries - newentries)
284 for p in sorted(oldentries - newentries):
280 for p in sorted(oldentries - newentries):
285 ui.write(_('removing %s\n') % p)
281 ui.write(_('removing %s\n') % p)
286 for p in sorted(newentries - oldentries):
282 for p in sorted(newentries - oldentries):
287 ui.write(_('adding %s\n') % p)
283 ui.write(_('adding %s\n') % p)
288
284
289 if addcount or removecount:
285 if addcount or removecount:
290 ui.write(_('%d items added, %d removed from fncache\n') %
286 ui.write(_('%d items added, %d removed from fncache\n') %
291 (addcount, removecount))
287 (addcount, removecount))
292 fnc.entries = newentries
288 fnc.entries = newentries
293 fnc._dirty = True
289 fnc._dirty = True
294
290
295 with repo.transaction('fncache') as tr:
291 with repo.transaction('fncache') as tr:
296 fnc.write(tr)
292 fnc.write(tr)
297 else:
293 else:
298 ui.write(_('fncache already up to date\n'))
294 ui.write(_('fncache already up to date\n'))
299
295
300 def stripbmrevset(repo, mark):
296 def stripbmrevset(repo, mark):
301 """
297 """
302 The revset to strip when strip is called with -B mark
298 The revset to strip when strip is called with -B mark
303
299
304 Needs to live here so extensions can use it and wrap it even when strip is
300 Needs to live here so extensions can use it and wrap it even when strip is
305 not enabled or not present on a box.
301 not enabled or not present on a box.
306 """
302 """
307 return repo.revs("ancestors(bookmark(%s)) - "
303 return repo.revs("ancestors(bookmark(%s)) - "
308 "ancestors(head() and not bookmark(%s)) - "
304 "ancestors(head() and not bookmark(%s)) - "
309 "ancestors(bookmark() and not bookmark(%s))",
305 "ancestors(bookmark() and not bookmark(%s))",
310 mark, mark, mark)
306 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now