##// END OF EJS Templates
with: use context manager in rebuildfncache again
Bryan O'Sullivan -
r27860:0da102e4 default
parent child Browse files
Show More
@@ -1,321 +1,318 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 util,
20 util,
21 )
21 )
22
22
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 """create a bundle with the specified revisions as a backup"""
24 """create a bundle with the specified revisions as a backup"""
25 cgversion = '01'
25 cgversion = '01'
26 if 'generaldelta' in repo.requirements:
26 if 'generaldelta' in repo.requirements:
27 cgversion = '02'
27 cgversion = '02'
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for rev in striplist:
103 for desc in cl.descendants([rev]):
103 for desc in cl.descendants([rev]):
104 tostrip.add(desc)
104 tostrip.add(desc)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 if saveheads or savebases:
150 if saveheads or savebases:
151 # do not compress partial bundle if we remove it from disk later
151 # do not compress partial bundle if we remove it from disk later
152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 compress=False)
153 compress=False)
154
154
155 mfst = repo.manifest
155 mfst = repo.manifest
156
156
157 curtr = repo.currenttransaction()
157 curtr = repo.currenttransaction()
158 if curtr is not None:
158 if curtr is not None:
159 del curtr # avoid carrying reference to transaction for nothing
159 del curtr # avoid carrying reference to transaction for nothing
160 msg = _('programming error: cannot strip from inside a transaction')
160 msg = _('programming error: cannot strip from inside a transaction')
161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162
162
163 tr = repo.transaction("strip")
163 tr = repo.transaction("strip")
164 offset = len(tr.entries)
164 offset = len(tr.entries)
165
165
166 try:
166 try:
167 tr.startgroup()
167 tr.startgroup()
168 cl.strip(striprev, tr)
168 cl.strip(striprev, tr)
169 mfst.strip(striprev, tr)
169 mfst.strip(striprev, tr)
170 for fn in files:
170 for fn in files:
171 repo.file(fn).strip(striprev, tr)
171 repo.file(fn).strip(striprev, tr)
172 tr.endgroup()
172 tr.endgroup()
173
173
174 try:
174 try:
175 for i in xrange(offset, len(tr.entries)):
175 for i in xrange(offset, len(tr.entries)):
176 file, troffset, ignore = tr.entries[i]
176 file, troffset, ignore = tr.entries[i]
177 repo.svfs(file, 'a').truncate(troffset)
177 repo.svfs(file, 'a').truncate(troffset)
178 if troffset == 0:
178 if troffset == 0:
179 repo.store.markremoved(file)
179 repo.store.markremoved(file)
180 tr.close()
180 tr.close()
181 finally:
181 finally:
182 tr.release()
182 tr.release()
183
183
184 if saveheads or savebases:
184 if saveheads or savebases:
185 ui.note(_("adding branch\n"))
185 ui.note(_("adding branch\n"))
186 f = vfs.open(chgrpfile, "rb")
186 f = vfs.open(chgrpfile, "rb")
187 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
187 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
188 if not repo.ui.verbose:
188 if not repo.ui.verbose:
189 # silence internal shuffling chatter
189 # silence internal shuffling chatter
190 repo.ui.pushbuffer()
190 repo.ui.pushbuffer()
191 if isinstance(gen, bundle2.unbundle20):
191 if isinstance(gen, bundle2.unbundle20):
192 tr = repo.transaction('strip')
192 tr = repo.transaction('strip')
193 tr.hookargs = {'source': 'strip',
193 tr.hookargs = {'source': 'strip',
194 'url': 'bundle:' + vfs.join(chgrpfile)}
194 'url': 'bundle:' + vfs.join(chgrpfile)}
195 try:
195 try:
196 bundle2.applybundle(repo, gen, tr, source='strip',
196 bundle2.applybundle(repo, gen, tr, source='strip',
197 url='bundle:' + vfs.join(chgrpfile))
197 url='bundle:' + vfs.join(chgrpfile))
198 tr.close()
198 tr.close()
199 finally:
199 finally:
200 tr.release()
200 tr.release()
201 else:
201 else:
202 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
202 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
203 if not repo.ui.verbose:
203 if not repo.ui.verbose:
204 repo.ui.popbuffer()
204 repo.ui.popbuffer()
205 f.close()
205 f.close()
206
206
207 for m in updatebm:
207 for m in updatebm:
208 bm[m] = repo[newbmtarget].node()
208 bm[m] = repo[newbmtarget].node()
209 lock = tr = None
209 lock = tr = None
210 try:
210 try:
211 lock = repo.lock()
211 lock = repo.lock()
212 tr = repo.transaction('repair')
212 tr = repo.transaction('repair')
213 bm.recordchange(tr)
213 bm.recordchange(tr)
214 tr.close()
214 tr.close()
215 finally:
215 finally:
216 tr.release()
216 tr.release()
217 lock.release()
217 lock.release()
218
218
219 # remove undo files
219 # remove undo files
220 for undovfs, undofile in repo.undofiles():
220 for undovfs, undofile in repo.undofiles():
221 try:
221 try:
222 undovfs.unlink(undofile)
222 undovfs.unlink(undofile)
223 except OSError as e:
223 except OSError as e:
224 if e.errno != errno.ENOENT:
224 if e.errno != errno.ENOENT:
225 ui.warn(_('error removing %s: %s\n') %
225 ui.warn(_('error removing %s: %s\n') %
226 (undovfs.join(undofile), str(e)))
226 (undovfs.join(undofile), str(e)))
227
227
228 except: # re-raises
228 except: # re-raises
229 if backupfile:
229 if backupfile:
230 ui.warn(_("strip failed, full bundle stored in '%s'\n")
230 ui.warn(_("strip failed, full bundle stored in '%s'\n")
231 % vfs.join(backupfile))
231 % vfs.join(backupfile))
232 elif saveheads:
232 elif saveheads:
233 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
233 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
234 % vfs.join(chgrpfile))
234 % vfs.join(chgrpfile))
235 raise
235 raise
236 else:
236 else:
237 if saveheads or savebases:
237 if saveheads or savebases:
238 # Remove partial backup only if there were no exceptions
238 # Remove partial backup only if there were no exceptions
239 vfs.unlink(chgrpfile)
239 vfs.unlink(chgrpfile)
240
240
241 repo.destroyed()
241 repo.destroyed()
242
242
243 def rebuildfncache(ui, repo):
243 def rebuildfncache(ui, repo):
244 """Rebuilds the fncache file from repo history.
244 """Rebuilds the fncache file from repo history.
245
245
246 Missing entries will be added. Extra entries will be removed.
246 Missing entries will be added. Extra entries will be removed.
247 """
247 """
248 repo = repo.unfiltered()
248 repo = repo.unfiltered()
249
249
250 if 'fncache' not in repo.requirements:
250 if 'fncache' not in repo.requirements:
251 ui.warn(_('(not rebuilding fncache because repository does not '
251 ui.warn(_('(not rebuilding fncache because repository does not '
252 'support fncache)\n'))
252 'support fncache)\n'))
253 return
253 return
254
254
255 lock = repo.lock()
255 with repo.lock():
256 try:
257 fnc = repo.store.fncache
256 fnc = repo.store.fncache
258 # Trigger load of fncache.
257 # Trigger load of fncache.
259 if 'irrelevant' in fnc:
258 if 'irrelevant' in fnc:
260 pass
259 pass
261
260
262 oldentries = set(fnc.entries)
261 oldentries = set(fnc.entries)
263 newentries = set()
262 newentries = set()
264 seenfiles = set()
263 seenfiles = set()
265
264
266 repolen = len(repo)
265 repolen = len(repo)
267 for rev in repo:
266 for rev in repo:
268 ui.progress(_('changeset'), rev, total=repolen)
267 ui.progress(_('changeset'), rev, total=repolen)
269
268
270 ctx = repo[rev]
269 ctx = repo[rev]
271 for f in ctx.files():
270 for f in ctx.files():
272 # This is to minimize I/O.
271 # This is to minimize I/O.
273 if f in seenfiles:
272 if f in seenfiles:
274 continue
273 continue
275 seenfiles.add(f)
274 seenfiles.add(f)
276
275
277 i = 'data/%s.i' % f
276 i = 'data/%s.i' % f
278 d = 'data/%s.d' % f
277 d = 'data/%s.d' % f
279
278
280 if repo.store._exists(i):
279 if repo.store._exists(i):
281 newentries.add(i)
280 newentries.add(i)
282 if repo.store._exists(d):
281 if repo.store._exists(d):
283 newentries.add(d)
282 newentries.add(d)
284
283
285 ui.progress(_('changeset'), None)
284 ui.progress(_('changeset'), None)
286
285
287 addcount = len(newentries - oldentries)
286 addcount = len(newentries - oldentries)
288 removecount = len(oldentries - newentries)
287 removecount = len(oldentries - newentries)
289 for p in sorted(oldentries - newentries):
288 for p in sorted(oldentries - newentries):
290 ui.write(_('removing %s\n') % p)
289 ui.write(_('removing %s\n') % p)
291 for p in sorted(newentries - oldentries):
290 for p in sorted(newentries - oldentries):
292 ui.write(_('adding %s\n') % p)
291 ui.write(_('adding %s\n') % p)
293
292
294 if addcount or removecount:
293 if addcount or removecount:
295 ui.write(_('%d items added, %d removed from fncache\n') %
294 ui.write(_('%d items added, %d removed from fncache\n') %
296 (addcount, removecount))
295 (addcount, removecount))
297 fnc.entries = newentries
296 fnc.entries = newentries
298 fnc._dirty = True
297 fnc._dirty = True
299
298
300 tr = repo.transaction('fncache')
299 tr = repo.transaction('fncache')
301 try:
300 try:
302 fnc.write(tr)
301 fnc.write(tr)
303 tr.close()
302 tr.close()
304 finally:
303 finally:
305 tr.release()
304 tr.release()
306 else:
305 else:
307 ui.write(_('fncache already up to date\n'))
306 ui.write(_('fncache already up to date\n'))
308 finally:
309 lock.release()
310
307
311 def stripbmrevset(repo, mark):
308 def stripbmrevset(repo, mark):
312 """
309 """
313 The revset to strip when strip is called with -B mark
310 The revset to strip when strip is called with -B mark
314
311
315 Needs to live here so extensions can use it and wrap it even when strip is
312 Needs to live here so extensions can use it and wrap it even when strip is
316 not enabled or not present on a box.
313 not enabled or not present on a box.
317 """
314 """
318 return repo.revs("ancestors(bookmark(%s)) - "
315 return repo.revs("ancestors(bookmark(%s)) - "
319 "ancestors(head() and not bookmark(%s)) - "
316 "ancestors(head() and not bookmark(%s)) - "
320 "ancestors(bookmark() and not bookmark(%s))",
317 "ancestors(bookmark() and not bookmark(%s))",
321 mark, mark, mark)
318 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now