##// END OF EJS Templates
repair: use cg?unpacker.apply() instead of changegroup.addchangegroup()
Augie Fackler -
r26701:b1a0c534 default
parent child Browse files
Show More
@@ -1,313 +1,310 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 util,
20 util,
21 )
21 )
22
22
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 """create a bundle with the specified revisions as a backup"""
24 """create a bundle with the specified revisions as a backup"""
25 cgversion = '01'
25 cgversion = '01'
26 if 'generaldelta' in repo.requirements:
26 if 'generaldelta' in repo.requirements:
27 cgversion = '02'
27 cgversion = '02'
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77
77
78 # Simple way to maintain backwards compatibility for this
78 # Simple way to maintain backwards compatibility for this
79 # argument.
79 # argument.
80 if backup in ['none', 'strip']:
80 if backup in ['none', 'strip']:
81 backup = False
81 backup = False
82
82
83 repo = repo.unfiltered()
83 repo = repo.unfiltered()
84 repo.destroying()
84 repo.destroying()
85
85
86 cl = repo.changelog
86 cl = repo.changelog
87 # TODO handle undo of merge sets
87 # TODO handle undo of merge sets
88 if isinstance(nodelist, str):
88 if isinstance(nodelist, str):
89 nodelist = [nodelist]
89 nodelist = [nodelist]
90 striplist = [cl.rev(node) for node in nodelist]
90 striplist = [cl.rev(node) for node in nodelist]
91 striprev = min(striplist)
91 striprev = min(striplist)
92
92
93 # Some revisions with rev > striprev may not be descendants of striprev.
93 # Some revisions with rev > striprev may not be descendants of striprev.
94 # We have to find these revisions and put them in a bundle, so that
94 # We have to find these revisions and put them in a bundle, so that
95 # we can restore them after the truncations.
95 # we can restore them after the truncations.
96 # To create the bundle we use repo.changegroupsubset which requires
96 # To create the bundle we use repo.changegroupsubset which requires
97 # the list of heads and bases of the set of interesting revisions.
97 # the list of heads and bases of the set of interesting revisions.
98 # (head = revision in the set that has no descendant in the set;
98 # (head = revision in the set that has no descendant in the set;
99 # base = revision in the set that has no ancestor in the set)
99 # base = revision in the set that has no ancestor in the set)
100 tostrip = set(striplist)
100 tostrip = set(striplist)
101 for rev in striplist:
101 for rev in striplist:
102 for desc in cl.descendants([rev]):
102 for desc in cl.descendants([rev]):
103 tostrip.add(desc)
103 tostrip.add(desc)
104
104
105 files = _collectfiles(repo, striprev)
105 files = _collectfiles(repo, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
107
107
108 # compute heads
108 # compute heads
109 saveheads = set(saverevs)
109 saveheads = set(saverevs)
110 for r in xrange(striprev + 1, len(cl)):
110 for r in xrange(striprev + 1, len(cl)):
111 if r not in tostrip:
111 if r not in tostrip:
112 saverevs.add(r)
112 saverevs.add(r)
113 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.add(r)
114 saveheads.add(r)
115 saveheads = [cl.node(r) for r in saveheads]
115 saveheads = [cl.node(r) for r in saveheads]
116
116
117 # compute base nodes
117 # compute base nodes
118 if saverevs:
118 if saverevs:
119 descendants = set(cl.descendants(saverevs))
119 descendants = set(cl.descendants(saverevs))
120 saverevs.difference_update(descendants)
120 saverevs.difference_update(descendants)
121 savebases = [cl.node(r) for r in saverevs]
121 savebases = [cl.node(r) for r in saverevs]
122 stripbases = [cl.node(r) for r in tostrip]
122 stripbases = [cl.node(r) for r in tostrip]
123
123
124 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # is much faster
125 # is much faster
126 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 if newbmtarget:
127 if newbmtarget:
128 newbmtarget = repo[newbmtarget.first()].node()
128 newbmtarget = repo[newbmtarget.first()].node()
129 else:
129 else:
130 newbmtarget = '.'
130 newbmtarget = '.'
131
131
132 bm = repo._bookmarks
132 bm = repo._bookmarks
133 updatebm = []
133 updatebm = []
134 for m in bm:
134 for m in bm:
135 rev = repo[bm[m]].rev()
135 rev = repo[bm[m]].rev()
136 if rev in tostrip:
136 if rev in tostrip:
137 updatebm.append(m)
137 updatebm.append(m)
138
138
139 # create a changegroup for all the branches we need to keep
139 # create a changegroup for all the branches we need to keep
140 backupfile = None
140 backupfile = None
141 vfs = repo.vfs
141 vfs = repo.vfs
142 node = nodelist[-1]
142 node = nodelist[-1]
143 if backup:
143 if backup:
144 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 repo.ui.status(_("saved backup bundle to %s\n") %
145 repo.ui.status(_("saved backup bundle to %s\n") %
146 vfs.join(backupfile))
146 vfs.join(backupfile))
147 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 vfs.join(backupfile))
148 vfs.join(backupfile))
149 if saveheads or savebases:
149 if saveheads or savebases:
150 # do not compress partial bundle if we remove it from disk later
150 # do not compress partial bundle if we remove it from disk later
151 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
151 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
152 compress=False)
153
153
154 mfst = repo.manifest
154 mfst = repo.manifest
155
155
156 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
157 if curtr is not None:
157 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
159 msg = _('programming error: cannot strip from inside a transaction')
159 msg = _('programming error: cannot strip from inside a transaction')
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161
161
162 tr = repo.transaction("strip")
162 tr = repo.transaction("strip")
163 offset = len(tr.entries)
163 offset = len(tr.entries)
164
164
165 try:
165 try:
166 tr.startgroup()
166 tr.startgroup()
167 cl.strip(striprev, tr)
167 cl.strip(striprev, tr)
168 mfst.strip(striprev, tr)
168 mfst.strip(striprev, tr)
169 for fn in files:
169 for fn in files:
170 repo.file(fn).strip(striprev, tr)
170 repo.file(fn).strip(striprev, tr)
171 tr.endgroup()
171 tr.endgroup()
172
172
173 try:
173 try:
174 for i in xrange(offset, len(tr.entries)):
174 for i in xrange(offset, len(tr.entries)):
175 file, troffset, ignore = tr.entries[i]
175 file, troffset, ignore = tr.entries[i]
176 repo.svfs(file, 'a').truncate(troffset)
176 repo.svfs(file, 'a').truncate(troffset)
177 if troffset == 0:
177 if troffset == 0:
178 repo.store.markremoved(file)
178 repo.store.markremoved(file)
179 tr.close()
179 tr.close()
180 finally:
180 finally:
181 tr.release()
181 tr.release()
182
182
183 if saveheads or savebases:
183 if saveheads or savebases:
184 ui.note(_("adding branch\n"))
184 ui.note(_("adding branch\n"))
185 f = vfs.open(chgrpfile, "rb")
185 f = vfs.open(chgrpfile, "rb")
186 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
186 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
187 if not repo.ui.verbose:
187 if not repo.ui.verbose:
188 # silence internal shuffling chatter
188 # silence internal shuffling chatter
189 repo.ui.pushbuffer()
189 repo.ui.pushbuffer()
190 if isinstance(gen, bundle2.unbundle20):
190 if isinstance(gen, bundle2.unbundle20):
191 tr = repo.transaction('strip')
191 tr = repo.transaction('strip')
192 tr.hookargs = {'source': 'strip',
192 tr.hookargs = {'source': 'strip',
193 'url': 'bundle:' + vfs.join(chgrpfile)}
193 'url': 'bundle:' + vfs.join(chgrpfile)}
194 try:
194 try:
195 bundle2.processbundle(repo, gen, lambda: tr)
195 bundle2.processbundle(repo, gen, lambda: tr)
196 tr.close()
196 tr.close()
197 finally:
197 finally:
198 tr.release()
198 tr.release()
199 else:
199 else:
200 changegroup.addchangegroup(repo, gen, 'strip',
200 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
201 'bundle:' + vfs.join(chgrpfile),
202 True)
203 if not repo.ui.verbose:
201 if not repo.ui.verbose:
204 repo.ui.popbuffer()
202 repo.ui.popbuffer()
205 f.close()
203 f.close()
206
204
207 # remove undo files
205 # remove undo files
208 for undovfs, undofile in repo.undofiles():
206 for undovfs, undofile in repo.undofiles():
209 try:
207 try:
210 undovfs.unlink(undofile)
208 undovfs.unlink(undofile)
211 except OSError as e:
209 except OSError as e:
212 if e.errno != errno.ENOENT:
210 if e.errno != errno.ENOENT:
213 ui.warn(_('error removing %s: %s\n') %
211 ui.warn(_('error removing %s: %s\n') %
214 (undovfs.join(undofile), str(e)))
212 (undovfs.join(undofile), str(e)))
215
213
216 for m in updatebm:
214 for m in updatebm:
217 bm[m] = repo[newbmtarget].node()
215 bm[m] = repo[newbmtarget].node()
218 bm.write()
216 bm.write()
219 except: # re-raises
217 except: # re-raises
220 if backupfile:
218 if backupfile:
221 ui.warn(_("strip failed, full bundle stored in '%s'\n")
219 ui.warn(_("strip failed, full bundle stored in '%s'\n")
222 % vfs.join(backupfile))
220 % vfs.join(backupfile))
223 elif saveheads:
221 elif saveheads:
224 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
222 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
225 % vfs.join(chgrpfile))
223 % vfs.join(chgrpfile))
226 raise
224 raise
227 else:
225 else:
228 if saveheads or savebases:
226 if saveheads or savebases:
229 # Remove partial backup only if there were no exceptions
227 # Remove partial backup only if there were no exceptions
230 vfs.unlink(chgrpfile)
228 vfs.unlink(chgrpfile)
231
229
232 repo.destroyed()
230 repo.destroyed()
233
231
234 def rebuildfncache(ui, repo):
232 def rebuildfncache(ui, repo):
235 """Rebuilds the fncache file from repo history.
233 """Rebuilds the fncache file from repo history.
236
234
237 Missing entries will be added. Extra entries will be removed.
235 Missing entries will be added. Extra entries will be removed.
238 """
236 """
239 repo = repo.unfiltered()
237 repo = repo.unfiltered()
240
238
241 if 'fncache' not in repo.requirements:
239 if 'fncache' not in repo.requirements:
242 ui.warn(_('(not rebuilding fncache because repository does not '
240 ui.warn(_('(not rebuilding fncache because repository does not '
243 'support fncache)\n'))
241 'support fncache)\n'))
244 return
242 return
245
243
246 lock = repo.lock()
244 lock = repo.lock()
247 try:
245 try:
248 fnc = repo.store.fncache
246 fnc = repo.store.fncache
249 # Trigger load of fncache.
247 # Trigger load of fncache.
250 if 'irrelevant' in fnc:
248 if 'irrelevant' in fnc:
251 pass
249 pass
252
250
253 oldentries = set(fnc.entries)
251 oldentries = set(fnc.entries)
254 newentries = set()
252 newentries = set()
255 seenfiles = set()
253 seenfiles = set()
256
254
257 repolen = len(repo)
255 repolen = len(repo)
258 for rev in repo:
256 for rev in repo:
259 ui.progress(_('changeset'), rev, total=repolen)
257 ui.progress(_('changeset'), rev, total=repolen)
260
258
261 ctx = repo[rev]
259 ctx = repo[rev]
262 for f in ctx.files():
260 for f in ctx.files():
263 # This is to minimize I/O.
261 # This is to minimize I/O.
264 if f in seenfiles:
262 if f in seenfiles:
265 continue
263 continue
266 seenfiles.add(f)
264 seenfiles.add(f)
267
265
268 i = 'data/%s.i' % f
266 i = 'data/%s.i' % f
269 d = 'data/%s.d' % f
267 d = 'data/%s.d' % f
270
268
271 if repo.store._exists(i):
269 if repo.store._exists(i):
272 newentries.add(i)
270 newentries.add(i)
273 if repo.store._exists(d):
271 if repo.store._exists(d):
274 newentries.add(d)
272 newentries.add(d)
275
273
276 ui.progress(_('changeset'), None)
274 ui.progress(_('changeset'), None)
277
275
278 addcount = len(newentries - oldentries)
276 addcount = len(newentries - oldentries)
279 removecount = len(oldentries - newentries)
277 removecount = len(oldentries - newentries)
280 for p in sorted(oldentries - newentries):
278 for p in sorted(oldentries - newentries):
281 ui.write(_('removing %s\n') % p)
279 ui.write(_('removing %s\n') % p)
282 for p in sorted(newentries - oldentries):
280 for p in sorted(newentries - oldentries):
283 ui.write(_('adding %s\n') % p)
281 ui.write(_('adding %s\n') % p)
284
282
285 if addcount or removecount:
283 if addcount or removecount:
286 ui.write(_('%d items added, %d removed from fncache\n') %
284 ui.write(_('%d items added, %d removed from fncache\n') %
287 (addcount, removecount))
285 (addcount, removecount))
288 fnc.entries = newentries
286 fnc.entries = newentries
289 fnc._dirty = True
287 fnc._dirty = True
290
288
291 tr = repo.transaction('fncache')
289 tr = repo.transaction('fncache')
292 try:
290 try:
293 fnc.write(tr)
291 fnc.write(tr)
294 tr.close()
292 tr.close()
295 finally:
293 finally:
296 tr.release()
294 tr.release()
297 else:
295 else:
298 ui.write(_('fncache already up to date\n'))
296 ui.write(_('fncache already up to date\n'))
299 finally:
297 finally:
300 lock.release()
298 lock.release()
301
299
302 def stripbmrevset(repo, mark):
300 def stripbmrevset(repo, mark):
303 """
301 """
304 The revset to strip when strip is called with -B mark
302 The revset to strip when strip is called with -B mark
305
303
306 Needs to live here so extensions can use it and wrap it even when strip is
304 Needs to live here so extensions can use it and wrap it even when strip is
307 not enabled or not present on a box.
305 not enabled or not present on a box.
308 """
306 """
309 return repo.revs("ancestors(bookmark(%s)) - "
307 return repo.revs("ancestors(bookmark(%s)) - "
310 "ancestors(head() and not bookmark(%s)) - "
308 "ancestors(head() and not bookmark(%s)) - "
311 "ancestors(bookmark() and not bookmark(%s))",
309 "ancestors(bookmark() and not bookmark(%s))",
312 mark, mark, mark)
310 mark, mark, mark)
313
General Comments 0
You need to be logged in to leave comments. Login now