##// END OF EJS Templates
repair: use 'rebuilding' progress topic in rebuildfncache()
av6 -
r28465:43eb31ea default
parent child Browse files
Show More
@@ -1,314 +1,314 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 util,
20 util,
21 )
21 )
22
22
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 """create a bundle with the specified revisions as a backup"""
24 """create a bundle with the specified revisions as a backup"""
25 cgversion = changegroup.safeversion(repo)
25 cgversion = changegroup.safeversion(repo)
26
26
27 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
27 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
28 version=cgversion)
28 version=cgversion)
29 backupdir = "strip-backup"
29 backupdir = "strip-backup"
30 vfs = repo.vfs
30 vfs = repo.vfs
31 if not vfs.isdir(backupdir):
31 if not vfs.isdir(backupdir):
32 vfs.mkdir(backupdir)
32 vfs.mkdir(backupdir)
33
33
34 # Include a hash of all the nodes in the filename for uniqueness
34 # Include a hash of all the nodes in the filename for uniqueness
35 allcommits = repo.set('%ln::%ln', bases, heads)
35 allcommits = repo.set('%ln::%ln', bases, heads)
36 allhashes = sorted(c.hex() for c in allcommits)
36 allhashes = sorted(c.hex() for c in allcommits)
37 totalhash = util.sha1(''.join(allhashes)).hexdigest()
37 totalhash = util.sha1(''.join(allhashes)).hexdigest()
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39
39
40 comp = None
40 comp = None
41 if cgversion != '01':
41 if cgversion != '01':
42 bundletype = "HG20"
42 bundletype = "HG20"
43 if compress:
43 if compress:
44 comp = 'BZ'
44 comp = 'BZ'
45 elif compress:
45 elif compress:
46 bundletype = "HG10BZ"
46 bundletype = "HG10BZ"
47 else:
47 else:
48 bundletype = "HG10UN"
48 bundletype = "HG10UN"
49 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
49 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
50 compression=comp)
50 compression=comp)
51
51
52 def _collectfiles(repo, striprev):
52 def _collectfiles(repo, striprev):
53 """find out the filelogs affected by the strip"""
53 """find out the filelogs affected by the strip"""
54 files = set()
54 files = set()
55
55
56 for x in xrange(striprev, len(repo)):
56 for x in xrange(striprev, len(repo)):
57 files.update(repo[x].files())
57 files.update(repo[x].files())
58
58
59 return sorted(files)
59 return sorted(files)
60
60
61 def _collectbrokencsets(repo, files, striprev):
61 def _collectbrokencsets(repo, files, striprev):
62 """return the changesets which will be broken by the truncation"""
62 """return the changesets which will be broken by the truncation"""
63 s = set()
63 s = set()
64 def collectone(revlog):
64 def collectone(revlog):
65 _, brokenset = revlog.getstrippoint(striprev)
65 _, brokenset = revlog.getstrippoint(striprev)
66 s.update([revlog.linkrev(r) for r in brokenset])
66 s.update([revlog.linkrev(r) for r in brokenset])
67
67
68 collectone(repo.manifest)
68 collectone(repo.manifest)
69 for fname in files:
69 for fname in files:
70 collectone(repo.file(fname))
70 collectone(repo.file(fname))
71
71
72 return s
72 return s
73
73
74 def strip(ui, repo, nodelist, backup=True, topic='backup'):
74 def strip(ui, repo, nodelist, backup=True, topic='backup'):
75 # This function operates within a transaction of its own, but does
75 # This function operates within a transaction of its own, but does
76 # not take any lock on the repo.
76 # not take any lock on the repo.
77 # Simple way to maintain backwards compatibility for this
77 # Simple way to maintain backwards compatibility for this
78 # argument.
78 # argument.
79 if backup in ['none', 'strip']:
79 if backup in ['none', 'strip']:
80 backup = False
80 backup = False
81
81
82 repo = repo.unfiltered()
82 repo = repo.unfiltered()
83 repo.destroying()
83 repo.destroying()
84
84
85 cl = repo.changelog
85 cl = repo.changelog
86 # TODO handle undo of merge sets
86 # TODO handle undo of merge sets
87 if isinstance(nodelist, str):
87 if isinstance(nodelist, str):
88 nodelist = [nodelist]
88 nodelist = [nodelist]
89 striplist = [cl.rev(node) for node in nodelist]
89 striplist = [cl.rev(node) for node in nodelist]
90 striprev = min(striplist)
90 striprev = min(striplist)
91
91
92 # Some revisions with rev > striprev may not be descendants of striprev.
92 # Some revisions with rev > striprev may not be descendants of striprev.
93 # We have to find these revisions and put them in a bundle, so that
93 # We have to find these revisions and put them in a bundle, so that
94 # we can restore them after the truncations.
94 # we can restore them after the truncations.
95 # To create the bundle we use repo.changegroupsubset which requires
95 # To create the bundle we use repo.changegroupsubset which requires
96 # the list of heads and bases of the set of interesting revisions.
96 # the list of heads and bases of the set of interesting revisions.
97 # (head = revision in the set that has no descendant in the set;
97 # (head = revision in the set that has no descendant in the set;
98 # base = revision in the set that has no ancestor in the set)
98 # base = revision in the set that has no ancestor in the set)
99 tostrip = set(striplist)
99 tostrip = set(striplist)
100 for rev in striplist:
100 for rev in striplist:
101 for desc in cl.descendants([rev]):
101 for desc in cl.descendants([rev]):
102 tostrip.add(desc)
102 tostrip.add(desc)
103
103
104 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
106
106
107 # compute heads
107 # compute heads
108 saveheads = set(saverevs)
108 saveheads = set(saverevs)
109 for r in xrange(striprev + 1, len(cl)):
109 for r in xrange(striprev + 1, len(cl)):
110 if r not in tostrip:
110 if r not in tostrip:
111 saverevs.add(r)
111 saverevs.add(r)
112 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.add(r)
113 saveheads.add(r)
114 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
115
115
116 # compute base nodes
116 # compute base nodes
117 if saverevs:
117 if saverevs:
118 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
119 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
120 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
121 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
122
122
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # is much faster
124 # is much faster
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 if newbmtarget:
126 if newbmtarget:
127 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
128 else:
128 else:
129 newbmtarget = '.'
129 newbmtarget = '.'
130
130
131 bm = repo._bookmarks
131 bm = repo._bookmarks
132 updatebm = []
132 updatebm = []
133 for m in bm:
133 for m in bm:
134 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
135 if rev in tostrip:
135 if rev in tostrip:
136 updatebm.append(m)
136 updatebm.append(m)
137
137
138 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
139 backupfile = None
139 backupfile = None
140 vfs = repo.vfs
140 vfs = repo.vfs
141 node = nodelist[-1]
141 node = nodelist[-1]
142 if backup:
142 if backup:
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
145 vfs.join(backupfile))
145 vfs.join(backupfile))
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 if saveheads or savebases:
148 if saveheads or savebases:
149 # do not compress partial bundle if we remove it from disk later
149 # do not compress partial bundle if we remove it from disk later
150 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
150 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
151 compress=False)
151 compress=False)
152
152
153 mfst = repo.manifest
153 mfst = repo.manifest
154
154
155 curtr = repo.currenttransaction()
155 curtr = repo.currenttransaction()
156 if curtr is not None:
156 if curtr is not None:
157 del curtr # avoid carrying reference to transaction for nothing
157 del curtr # avoid carrying reference to transaction for nothing
158 msg = _('programming error: cannot strip from inside a transaction')
158 msg = _('programming error: cannot strip from inside a transaction')
159 raise error.Abort(msg, hint=_('contact your extension maintainer'))
159 raise error.Abort(msg, hint=_('contact your extension maintainer'))
160
160
161 try:
161 try:
162 with repo.transaction("strip") as tr:
162 with repo.transaction("strip") as tr:
163 offset = len(tr.entries)
163 offset = len(tr.entries)
164
164
165 tr.startgroup()
165 tr.startgroup()
166 cl.strip(striprev, tr)
166 cl.strip(striprev, tr)
167 mfst.strip(striprev, tr)
167 mfst.strip(striprev, tr)
168 for fn in files:
168 for fn in files:
169 repo.file(fn).strip(striprev, tr)
169 repo.file(fn).strip(striprev, tr)
170 tr.endgroup()
170 tr.endgroup()
171
171
172 for i in xrange(offset, len(tr.entries)):
172 for i in xrange(offset, len(tr.entries)):
173 file, troffset, ignore = tr.entries[i]
173 file, troffset, ignore = tr.entries[i]
174 repo.svfs(file, 'a').truncate(troffset)
174 repo.svfs(file, 'a').truncate(troffset)
175 if troffset == 0:
175 if troffset == 0:
176 repo.store.markremoved(file)
176 repo.store.markremoved(file)
177
177
178 if saveheads or savebases:
178 if saveheads or savebases:
179 ui.note(_("adding branch\n"))
179 ui.note(_("adding branch\n"))
180 f = vfs.open(chgrpfile, "rb")
180 f = vfs.open(chgrpfile, "rb")
181 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
181 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
182 if not repo.ui.verbose:
182 if not repo.ui.verbose:
183 # silence internal shuffling chatter
183 # silence internal shuffling chatter
184 repo.ui.pushbuffer()
184 repo.ui.pushbuffer()
185 if isinstance(gen, bundle2.unbundle20):
185 if isinstance(gen, bundle2.unbundle20):
186 with repo.transaction('strip') as tr:
186 with repo.transaction('strip') as tr:
187 tr.hookargs = {'source': 'strip',
187 tr.hookargs = {'source': 'strip',
188 'url': 'bundle:' + vfs.join(chgrpfile)}
188 'url': 'bundle:' + vfs.join(chgrpfile)}
189 bundle2.applybundle(repo, gen, tr, source='strip',
189 bundle2.applybundle(repo, gen, tr, source='strip',
190 url='bundle:' + vfs.join(chgrpfile))
190 url='bundle:' + vfs.join(chgrpfile))
191 else:
191 else:
192 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
192 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
193 if not repo.ui.verbose:
193 if not repo.ui.verbose:
194 repo.ui.popbuffer()
194 repo.ui.popbuffer()
195 f.close()
195 f.close()
196
196
197 for m in updatebm:
197 for m in updatebm:
198 bm[m] = repo[newbmtarget].node()
198 bm[m] = repo[newbmtarget].node()
199 lock = tr = None
199 lock = tr = None
200 try:
200 try:
201 lock = repo.lock()
201 lock = repo.lock()
202 tr = repo.transaction('repair')
202 tr = repo.transaction('repair')
203 bm.recordchange(tr)
203 bm.recordchange(tr)
204 tr.close()
204 tr.close()
205 finally:
205 finally:
206 tr.release()
206 tr.release()
207 lock.release()
207 lock.release()
208
208
209 # remove undo files
209 # remove undo files
210 for undovfs, undofile in repo.undofiles():
210 for undovfs, undofile in repo.undofiles():
211 try:
211 try:
212 undovfs.unlink(undofile)
212 undovfs.unlink(undofile)
213 except OSError as e:
213 except OSError as e:
214 if e.errno != errno.ENOENT:
214 if e.errno != errno.ENOENT:
215 ui.warn(_('error removing %s: %s\n') %
215 ui.warn(_('error removing %s: %s\n') %
216 (undovfs.join(undofile), str(e)))
216 (undovfs.join(undofile), str(e)))
217
217
218 except: # re-raises
218 except: # re-raises
219 if backupfile:
219 if backupfile:
220 ui.warn(_("strip failed, full bundle stored in '%s'\n")
220 ui.warn(_("strip failed, full bundle stored in '%s'\n")
221 % vfs.join(backupfile))
221 % vfs.join(backupfile))
222 elif saveheads:
222 elif saveheads:
223 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
223 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
224 % vfs.join(chgrpfile))
224 % vfs.join(chgrpfile))
225 raise
225 raise
226 else:
226 else:
227 if saveheads or savebases:
227 if saveheads or savebases:
228 # Remove partial backup only if there were no exceptions
228 # Remove partial backup only if there were no exceptions
229 vfs.unlink(chgrpfile)
229 vfs.unlink(chgrpfile)
230
230
231 repo.destroyed()
231 repo.destroyed()
232
232
233 def rebuildfncache(ui, repo):
233 def rebuildfncache(ui, repo):
234 """Rebuilds the fncache file from repo history.
234 """Rebuilds the fncache file from repo history.
235
235
236 Missing entries will be added. Extra entries will be removed.
236 Missing entries will be added. Extra entries will be removed.
237 """
237 """
238 repo = repo.unfiltered()
238 repo = repo.unfiltered()
239
239
240 if 'fncache' not in repo.requirements:
240 if 'fncache' not in repo.requirements:
241 ui.warn(_('(not rebuilding fncache because repository does not '
241 ui.warn(_('(not rebuilding fncache because repository does not '
242 'support fncache)\n'))
242 'support fncache)\n'))
243 return
243 return
244
244
245 with repo.lock():
245 with repo.lock():
246 fnc = repo.store.fncache
246 fnc = repo.store.fncache
247 # Trigger load of fncache.
247 # Trigger load of fncache.
248 if 'irrelevant' in fnc:
248 if 'irrelevant' in fnc:
249 pass
249 pass
250
250
251 oldentries = set(fnc.entries)
251 oldentries = set(fnc.entries)
252 newentries = set()
252 newentries = set()
253 seenfiles = set()
253 seenfiles = set()
254
254
255 repolen = len(repo)
255 repolen = len(repo)
256 for rev in repo:
256 for rev in repo:
257 ui.progress(_('changeset'), rev, total=repolen)
257 ui.progress(_('rebuilding'), rev, total=repolen)
258
258
259 ctx = repo[rev]
259 ctx = repo[rev]
260 for f in ctx.files():
260 for f in ctx.files():
261 # This is to minimize I/O.
261 # This is to minimize I/O.
262 if f in seenfiles:
262 if f in seenfiles:
263 continue
263 continue
264 seenfiles.add(f)
264 seenfiles.add(f)
265
265
266 i = 'data/%s.i' % f
266 i = 'data/%s.i' % f
267 d = 'data/%s.d' % f
267 d = 'data/%s.d' % f
268
268
269 if repo.store._exists(i):
269 if repo.store._exists(i):
270 newentries.add(i)
270 newentries.add(i)
271 if repo.store._exists(d):
271 if repo.store._exists(d):
272 newentries.add(d)
272 newentries.add(d)
273
273
274 ui.progress(_('changeset'), None)
274 ui.progress(_('rebuilding'), None)
275
275
276 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
276 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
277 for dir in util.dirs(seenfiles):
277 for dir in util.dirs(seenfiles):
278 i = 'meta/%s/00manifest.i' % dir
278 i = 'meta/%s/00manifest.i' % dir
279 d = 'meta/%s/00manifest.d' % dir
279 d = 'meta/%s/00manifest.d' % dir
280
280
281 if repo.store._exists(i):
281 if repo.store._exists(i):
282 newentries.add(i)
282 newentries.add(i)
283 if repo.store._exists(d):
283 if repo.store._exists(d):
284 newentries.add(d)
284 newentries.add(d)
285
285
286 addcount = len(newentries - oldentries)
286 addcount = len(newentries - oldentries)
287 removecount = len(oldentries - newentries)
287 removecount = len(oldentries - newentries)
288 for p in sorted(oldentries - newentries):
288 for p in sorted(oldentries - newentries):
289 ui.write(_('removing %s\n') % p)
289 ui.write(_('removing %s\n') % p)
290 for p in sorted(newentries - oldentries):
290 for p in sorted(newentries - oldentries):
291 ui.write(_('adding %s\n') % p)
291 ui.write(_('adding %s\n') % p)
292
292
293 if addcount or removecount:
293 if addcount or removecount:
294 ui.write(_('%d items added, %d removed from fncache\n') %
294 ui.write(_('%d items added, %d removed from fncache\n') %
295 (addcount, removecount))
295 (addcount, removecount))
296 fnc.entries = newentries
296 fnc.entries = newentries
297 fnc._dirty = True
297 fnc._dirty = True
298
298
299 with repo.transaction('fncache') as tr:
299 with repo.transaction('fncache') as tr:
300 fnc.write(tr)
300 fnc.write(tr)
301 else:
301 else:
302 ui.write(_('fncache already up to date\n'))
302 ui.write(_('fncache already up to date\n'))
303
303
304 def stripbmrevset(repo, mark):
304 def stripbmrevset(repo, mark):
305 """
305 """
306 The revset to strip when strip is called with -B mark
306 The revset to strip when strip is called with -B mark
307
307
308 Needs to live here so extensions can use it and wrap it even when strip is
308 Needs to live here so extensions can use it and wrap it even when strip is
309 not enabled or not present on a box.
309 not enabled or not present on a box.
310 """
310 """
311 return repo.revs("ancestors(bookmark(%s)) - "
311 return repo.revs("ancestors(bookmark(%s)) - "
312 "ancestors(head() and not bookmark(%s)) - "
312 "ancestors(head() and not bookmark(%s)) - "
313 "ancestors(bookmark() and not bookmark(%s))",
313 "ancestors(bookmark() and not bookmark(%s))",
314 mark, mark, mark)
314 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now