##// END OF EJS Templates
repair: use absolute_import
Gregory Szorc -
r25970:d1419cfb default
parent child Browse files
Show More
@@ -1,298 +1,306 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, exchange, util, bundle2
9 from __future__ import absolute_import
10 from mercurial.node import short
10
11 from mercurial.i18n import _
12 import errno
11 import errno
13
12
13 from .i18n import _
14 from .node import short
15 from . import (
16 bundle2,
17 changegroup,
18 exchange,
19 util,
20 )
21
14 def _bundle(repo, bases, heads, node, suffix, compress=True):
22 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 """create a bundle with the specified revisions as a backup"""
23 """create a bundle with the specified revisions as a backup"""
16 usebundle2 = (repo.ui.configbool('experimental', 'bundle2-exp', True) and
24 usebundle2 = (repo.ui.configbool('experimental', 'bundle2-exp', True) and
17 repo.ui.config('experimental', 'strip-bundle2-version'))
25 repo.ui.config('experimental', 'strip-bundle2-version'))
18 if usebundle2:
26 if usebundle2:
19 cgversion = repo.ui.config('experimental', 'strip-bundle2-version')
27 cgversion = repo.ui.config('experimental', 'strip-bundle2-version')
20 if cgversion not in changegroup.packermap:
28 if cgversion not in changegroup.packermap:
21 repo.ui.warn(_('unknown strip-bundle2-version value %r; '
29 repo.ui.warn(_('unknown strip-bundle2-version value %r; '
22 'should be one of %r\n') %
30 'should be one of %r\n') %
23 (cgversion, sorted(changegroup.packermap.keys()),))
31 (cgversion, sorted(changegroup.packermap.keys()),))
24 cgversion = '01'
32 cgversion = '01'
25 usebundle2 = False
33 usebundle2 = False
26 else:
34 else:
27 cgversion = '01'
35 cgversion = '01'
28
36
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
37 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
38 version=cgversion)
31 backupdir = "strip-backup"
39 backupdir = "strip-backup"
32 vfs = repo.vfs
40 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
35
43
36 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
47 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
49
42 if usebundle2:
50 if usebundle2:
43 bundletype = "HG20"
51 bundletype = "HG20"
44 elif compress:
52 elif compress:
45 bundletype = "HG10BZ"
53 bundletype = "HG10BZ"
46 else:
54 else:
47 bundletype = "HG10UN"
55 bundletype = "HG10UN"
48 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
56 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
49
57
50 def _collectfiles(repo, striprev):
58 def _collectfiles(repo, striprev):
51 """find out the filelogs affected by the strip"""
59 """find out the filelogs affected by the strip"""
52 files = set()
60 files = set()
53
61
54 for x in xrange(striprev, len(repo)):
62 for x in xrange(striprev, len(repo)):
55 files.update(repo[x].files())
63 files.update(repo[x].files())
56
64
57 return sorted(files)
65 return sorted(files)
58
66
59 def _collectbrokencsets(repo, files, striprev):
67 def _collectbrokencsets(repo, files, striprev):
60 """return the changesets which will be broken by the truncation"""
68 """return the changesets which will be broken by the truncation"""
61 s = set()
69 s = set()
62 def collectone(revlog):
70 def collectone(revlog):
63 _, brokenset = revlog.getstrippoint(striprev)
71 _, brokenset = revlog.getstrippoint(striprev)
64 s.update([revlog.linkrev(r) for r in brokenset])
72 s.update([revlog.linkrev(r) for r in brokenset])
65
73
66 collectone(repo.manifest)
74 collectone(repo.manifest)
67 for fname in files:
75 for fname in files:
68 collectone(repo.file(fname))
76 collectone(repo.file(fname))
69
77
70 return s
78 return s
71
79
72 def strip(ui, repo, nodelist, backup=True, topic='backup'):
80 def strip(ui, repo, nodelist, backup=True, topic='backup'):
73
81
74 # Simple way to maintain backwards compatibility for this
82 # Simple way to maintain backwards compatibility for this
75 # argument.
83 # argument.
76 if backup in ['none', 'strip']:
84 if backup in ['none', 'strip']:
77 backup = False
85 backup = False
78
86
79 repo = repo.unfiltered()
87 repo = repo.unfiltered()
80 repo.destroying()
88 repo.destroying()
81
89
82 cl = repo.changelog
90 cl = repo.changelog
83 # TODO handle undo of merge sets
91 # TODO handle undo of merge sets
84 if isinstance(nodelist, str):
92 if isinstance(nodelist, str):
85 nodelist = [nodelist]
93 nodelist = [nodelist]
86 striplist = [cl.rev(node) for node in nodelist]
94 striplist = [cl.rev(node) for node in nodelist]
87 striprev = min(striplist)
95 striprev = min(striplist)
88
96
89 # Some revisions with rev > striprev may not be descendants of striprev.
97 # Some revisions with rev > striprev may not be descendants of striprev.
90 # We have to find these revisions and put them in a bundle, so that
98 # We have to find these revisions and put them in a bundle, so that
91 # we can restore them after the truncations.
99 # we can restore them after the truncations.
92 # To create the bundle we use repo.changegroupsubset which requires
100 # To create the bundle we use repo.changegroupsubset which requires
93 # the list of heads and bases of the set of interesting revisions.
101 # the list of heads and bases of the set of interesting revisions.
94 # (head = revision in the set that has no descendant in the set;
102 # (head = revision in the set that has no descendant in the set;
95 # base = revision in the set that has no ancestor in the set)
103 # base = revision in the set that has no ancestor in the set)
96 tostrip = set(striplist)
104 tostrip = set(striplist)
97 for rev in striplist:
105 for rev in striplist:
98 for desc in cl.descendants([rev]):
106 for desc in cl.descendants([rev]):
99 tostrip.add(desc)
107 tostrip.add(desc)
100
108
101 files = _collectfiles(repo, striprev)
109 files = _collectfiles(repo, striprev)
102 saverevs = _collectbrokencsets(repo, files, striprev)
110 saverevs = _collectbrokencsets(repo, files, striprev)
103
111
104 # compute heads
112 # compute heads
105 saveheads = set(saverevs)
113 saveheads = set(saverevs)
106 for r in xrange(striprev + 1, len(cl)):
114 for r in xrange(striprev + 1, len(cl)):
107 if r not in tostrip:
115 if r not in tostrip:
108 saverevs.add(r)
116 saverevs.add(r)
109 saveheads.difference_update(cl.parentrevs(r))
117 saveheads.difference_update(cl.parentrevs(r))
110 saveheads.add(r)
118 saveheads.add(r)
111 saveheads = [cl.node(r) for r in saveheads]
119 saveheads = [cl.node(r) for r in saveheads]
112
120
113 # compute base nodes
121 # compute base nodes
114 if saverevs:
122 if saverevs:
115 descendants = set(cl.descendants(saverevs))
123 descendants = set(cl.descendants(saverevs))
116 saverevs.difference_update(descendants)
124 saverevs.difference_update(descendants)
117 savebases = [cl.node(r) for r in saverevs]
125 savebases = [cl.node(r) for r in saverevs]
118 stripbases = [cl.node(r) for r in tostrip]
126 stripbases = [cl.node(r) for r in tostrip]
119
127
120 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
128 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
121 # is much faster
129 # is much faster
122 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
130 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
123 if newbmtarget:
131 if newbmtarget:
124 newbmtarget = repo[newbmtarget.first()].node()
132 newbmtarget = repo[newbmtarget.first()].node()
125 else:
133 else:
126 newbmtarget = '.'
134 newbmtarget = '.'
127
135
128 bm = repo._bookmarks
136 bm = repo._bookmarks
129 updatebm = []
137 updatebm = []
130 for m in bm:
138 for m in bm:
131 rev = repo[bm[m]].rev()
139 rev = repo[bm[m]].rev()
132 if rev in tostrip:
140 if rev in tostrip:
133 updatebm.append(m)
141 updatebm.append(m)
134
142
135 # create a changegroup for all the branches we need to keep
143 # create a changegroup for all the branches we need to keep
136 backupfile = None
144 backupfile = None
137 vfs = repo.vfs
145 vfs = repo.vfs
138 node = nodelist[-1]
146 node = nodelist[-1]
139 if backup:
147 if backup:
140 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
148 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
141 repo.ui.status(_("saved backup bundle to %s\n") %
149 repo.ui.status(_("saved backup bundle to %s\n") %
142 vfs.join(backupfile))
150 vfs.join(backupfile))
143 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
151 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
144 vfs.join(backupfile))
152 vfs.join(backupfile))
145 if saveheads or savebases:
153 if saveheads or savebases:
146 # do not compress partial bundle if we remove it from disk later
154 # do not compress partial bundle if we remove it from disk later
147 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
155 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
148 compress=False)
156 compress=False)
149
157
150 mfst = repo.manifest
158 mfst = repo.manifest
151
159
152 curtr = repo.currenttransaction()
160 curtr = repo.currenttransaction()
153 if curtr is not None:
161 if curtr is not None:
154 del curtr # avoid carrying reference to transaction for nothing
162 del curtr # avoid carrying reference to transaction for nothing
155 msg = _('programming error: cannot strip from inside a transaction')
163 msg = _('programming error: cannot strip from inside a transaction')
156 raise util.Abort(msg, hint=_('contact your extension maintainer'))
164 raise util.Abort(msg, hint=_('contact your extension maintainer'))
157
165
158 tr = repo.transaction("strip")
166 tr = repo.transaction("strip")
159 offset = len(tr.entries)
167 offset = len(tr.entries)
160
168
161 try:
169 try:
162 tr.startgroup()
170 tr.startgroup()
163 cl.strip(striprev, tr)
171 cl.strip(striprev, tr)
164 mfst.strip(striprev, tr)
172 mfst.strip(striprev, tr)
165 for fn in files:
173 for fn in files:
166 repo.file(fn).strip(striprev, tr)
174 repo.file(fn).strip(striprev, tr)
167 tr.endgroup()
175 tr.endgroup()
168
176
169 try:
177 try:
170 for i in xrange(offset, len(tr.entries)):
178 for i in xrange(offset, len(tr.entries)):
171 file, troffset, ignore = tr.entries[i]
179 file, troffset, ignore = tr.entries[i]
172 repo.svfs(file, 'a').truncate(troffset)
180 repo.svfs(file, 'a').truncate(troffset)
173 if troffset == 0:
181 if troffset == 0:
174 repo.store.markremoved(file)
182 repo.store.markremoved(file)
175 tr.close()
183 tr.close()
176 except: # re-raises
184 except: # re-raises
177 tr.abort()
185 tr.abort()
178 raise
186 raise
179
187
180 if saveheads or savebases:
188 if saveheads or savebases:
181 ui.note(_("adding branch\n"))
189 ui.note(_("adding branch\n"))
182 f = vfs.open(chgrpfile, "rb")
190 f = vfs.open(chgrpfile, "rb")
183 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
191 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
184 if not repo.ui.verbose:
192 if not repo.ui.verbose:
185 # silence internal shuffling chatter
193 # silence internal shuffling chatter
186 repo.ui.pushbuffer()
194 repo.ui.pushbuffer()
187 if isinstance(gen, bundle2.unbundle20):
195 if isinstance(gen, bundle2.unbundle20):
188 tr = repo.transaction('strip')
196 tr = repo.transaction('strip')
189 tr.hookargs = {'source': 'strip',
197 tr.hookargs = {'source': 'strip',
190 'url': 'bundle:' + vfs.join(chgrpfile)}
198 'url': 'bundle:' + vfs.join(chgrpfile)}
191 try:
199 try:
192 bundle2.processbundle(repo, gen, lambda: tr)
200 bundle2.processbundle(repo, gen, lambda: tr)
193 tr.close()
201 tr.close()
194 finally:
202 finally:
195 tr.release()
203 tr.release()
196 else:
204 else:
197 changegroup.addchangegroup(repo, gen, 'strip',
205 changegroup.addchangegroup(repo, gen, 'strip',
198 'bundle:' + vfs.join(chgrpfile),
206 'bundle:' + vfs.join(chgrpfile),
199 True)
207 True)
200 if not repo.ui.verbose:
208 if not repo.ui.verbose:
201 repo.ui.popbuffer()
209 repo.ui.popbuffer()
202 f.close()
210 f.close()
203
211
204 # remove undo files
212 # remove undo files
205 for undovfs, undofile in repo.undofiles():
213 for undovfs, undofile in repo.undofiles():
206 try:
214 try:
207 undovfs.unlink(undofile)
215 undovfs.unlink(undofile)
208 except OSError as e:
216 except OSError as e:
209 if e.errno != errno.ENOENT:
217 if e.errno != errno.ENOENT:
210 ui.warn(_('error removing %s: %s\n') %
218 ui.warn(_('error removing %s: %s\n') %
211 (undovfs.join(undofile), str(e)))
219 (undovfs.join(undofile), str(e)))
212
220
213 for m in updatebm:
221 for m in updatebm:
214 bm[m] = repo[newbmtarget].node()
222 bm[m] = repo[newbmtarget].node()
215 bm.write()
223 bm.write()
216 except: # re-raises
224 except: # re-raises
217 if backupfile:
225 if backupfile:
218 ui.warn(_("strip failed, full bundle stored in '%s'\n")
226 ui.warn(_("strip failed, full bundle stored in '%s'\n")
219 % vfs.join(backupfile))
227 % vfs.join(backupfile))
220 elif saveheads:
228 elif saveheads:
221 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
229 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
222 % vfs.join(chgrpfile))
230 % vfs.join(chgrpfile))
223 raise
231 raise
224 else:
232 else:
225 if saveheads or savebases:
233 if saveheads or savebases:
226 # Remove partial backup only if there were no exceptions
234 # Remove partial backup only if there were no exceptions
227 vfs.unlink(chgrpfile)
235 vfs.unlink(chgrpfile)
228
236
229 repo.destroyed()
237 repo.destroyed()
230
238
231 def rebuildfncache(ui, repo):
239 def rebuildfncache(ui, repo):
232 """Rebuilds the fncache file from repo history.
240 """Rebuilds the fncache file from repo history.
233
241
234 Missing entries will be added. Extra entries will be removed.
242 Missing entries will be added. Extra entries will be removed.
235 """
243 """
236 repo = repo.unfiltered()
244 repo = repo.unfiltered()
237
245
238 if 'fncache' not in repo.requirements:
246 if 'fncache' not in repo.requirements:
239 ui.warn(_('(not rebuilding fncache because repository does not '
247 ui.warn(_('(not rebuilding fncache because repository does not '
240 'support fncache)\n'))
248 'support fncache)\n'))
241 return
249 return
242
250
243 lock = repo.lock()
251 lock = repo.lock()
244 try:
252 try:
245 fnc = repo.store.fncache
253 fnc = repo.store.fncache
246 # Trigger load of fncache.
254 # Trigger load of fncache.
247 if 'irrelevant' in fnc:
255 if 'irrelevant' in fnc:
248 pass
256 pass
249
257
250 oldentries = set(fnc.entries)
258 oldentries = set(fnc.entries)
251 newentries = set()
259 newentries = set()
252 seenfiles = set()
260 seenfiles = set()
253
261
254 repolen = len(repo)
262 repolen = len(repo)
255 for rev in repo:
263 for rev in repo:
256 ui.progress(_('changeset'), rev, total=repolen)
264 ui.progress(_('changeset'), rev, total=repolen)
257
265
258 ctx = repo[rev]
266 ctx = repo[rev]
259 for f in ctx.files():
267 for f in ctx.files():
260 # This is to minimize I/O.
268 # This is to minimize I/O.
261 if f in seenfiles:
269 if f in seenfiles:
262 continue
270 continue
263 seenfiles.add(f)
271 seenfiles.add(f)
264
272
265 i = 'data/%s.i' % f
273 i = 'data/%s.i' % f
266 d = 'data/%s.d' % f
274 d = 'data/%s.d' % f
267
275
268 if repo.store._exists(i):
276 if repo.store._exists(i):
269 newentries.add(i)
277 newentries.add(i)
270 if repo.store._exists(d):
278 if repo.store._exists(d):
271 newentries.add(d)
279 newentries.add(d)
272
280
273 ui.progress(_('changeset'), None)
281 ui.progress(_('changeset'), None)
274
282
275 addcount = len(newentries - oldentries)
283 addcount = len(newentries - oldentries)
276 removecount = len(oldentries - newentries)
284 removecount = len(oldentries - newentries)
277 for p in sorted(oldentries - newentries):
285 for p in sorted(oldentries - newentries):
278 ui.write(_('removing %s\n') % p)
286 ui.write(_('removing %s\n') % p)
279 for p in sorted(newentries - oldentries):
287 for p in sorted(newentries - oldentries):
280 ui.write(_('adding %s\n') % p)
288 ui.write(_('adding %s\n') % p)
281
289
282 if addcount or removecount:
290 if addcount or removecount:
283 ui.write(_('%d items added, %d removed from fncache\n') %
291 ui.write(_('%d items added, %d removed from fncache\n') %
284 (addcount, removecount))
292 (addcount, removecount))
285 fnc.entries = newentries
293 fnc.entries = newentries
286 fnc._dirty = True
294 fnc._dirty = True
287
295
288 tr = repo.transaction('fncache')
296 tr = repo.transaction('fncache')
289 try:
297 try:
290 fnc.write(tr)
298 fnc.write(tr)
291 tr.close()
299 tr.close()
292 finally:
300 finally:
293 tr.release()
301 tr.release()
294 else:
302 else:
295 ui.write(_('fncache already up to date\n'))
303 ui.write(_('fncache already up to date\n'))
296 finally:
304 finally:
297 lock.release()
305 lock.release()
298
306
General Comments 0
You need to be logged in to leave comments. Login now