##// END OF EJS Templates
repair: use cg?unpacker.apply() instead of changegroup.addchangegroup()
Augie Fackler -
r26701:b1a0c534 default
parent child Browse files
Show More
@@ -1,313 +1,310 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12
13 13 from .i18n import _
14 14 from .node import short
15 15 from . import (
16 16 bundle2,
17 17 changegroup,
18 18 error,
19 19 exchange,
20 20 util,
21 21 )
22 22
23 23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 24 """create a bundle with the specified revisions as a backup"""
25 25 cgversion = '01'
26 26 if 'generaldelta' in repo.requirements:
27 27 cgversion = '02'
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifest)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77
78 78 # Simple way to maintain backwards compatibility for this
79 79 # argument.
80 80 if backup in ['none', 'strip']:
81 81 backup = False
82 82
83 83 repo = repo.unfiltered()
84 84 repo.destroying()
85 85
86 86 cl = repo.changelog
87 87 # TODO handle undo of merge sets
88 88 if isinstance(nodelist, str):
89 89 nodelist = [nodelist]
90 90 striplist = [cl.rev(node) for node in nodelist]
91 91 striprev = min(striplist)
92 92
93 93 # Some revisions with rev > striprev may not be descendants of striprev.
94 94 # We have to find these revisions and put them in a bundle, so that
95 95 # we can restore them after the truncations.
96 96 # To create the bundle we use repo.changegroupsubset which requires
97 97 # the list of heads and bases of the set of interesting revisions.
98 98 # (head = revision in the set that has no descendant in the set;
99 99 # base = revision in the set that has no ancestor in the set)
100 100 tostrip = set(striplist)
101 101 for rev in striplist:
102 102 for desc in cl.descendants([rev]):
103 103 tostrip.add(desc)
104 104
105 105 files = _collectfiles(repo, striprev)
106 106 saverevs = _collectbrokencsets(repo, files, striprev)
107 107
108 108 # compute heads
109 109 saveheads = set(saverevs)
110 110 for r in xrange(striprev + 1, len(cl)):
111 111 if r not in tostrip:
112 112 saverevs.add(r)
113 113 saveheads.difference_update(cl.parentrevs(r))
114 114 saveheads.add(r)
115 115 saveheads = [cl.node(r) for r in saveheads]
116 116
117 117 # compute base nodes
118 118 if saverevs:
119 119 descendants = set(cl.descendants(saverevs))
120 120 saverevs.difference_update(descendants)
121 121 savebases = [cl.node(r) for r in saverevs]
122 122 stripbases = [cl.node(r) for r in tostrip]
123 123
124 124 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 125 # is much faster
126 126 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 127 if newbmtarget:
128 128 newbmtarget = repo[newbmtarget.first()].node()
129 129 else:
130 130 newbmtarget = '.'
131 131
132 132 bm = repo._bookmarks
133 133 updatebm = []
134 134 for m in bm:
135 135 rev = repo[bm[m]].rev()
136 136 if rev in tostrip:
137 137 updatebm.append(m)
138 138
139 139 # create a changegroup for all the branches we need to keep
140 140 backupfile = None
141 141 vfs = repo.vfs
142 142 node = nodelist[-1]
143 143 if backup:
144 144 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 145 repo.ui.status(_("saved backup bundle to %s\n") %
146 146 vfs.join(backupfile))
147 147 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 148 vfs.join(backupfile))
149 149 if saveheads or savebases:
150 150 # do not compress partial bundle if we remove it from disk later
151 151 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
152 152 compress=False)
153 153
154 154 mfst = repo.manifest
155 155
156 156 curtr = repo.currenttransaction()
157 157 if curtr is not None:
158 158 del curtr # avoid carrying reference to transaction for nothing
159 159 msg = _('programming error: cannot strip from inside a transaction')
160 160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161 161
162 162 tr = repo.transaction("strip")
163 163 offset = len(tr.entries)
164 164
165 165 try:
166 166 tr.startgroup()
167 167 cl.strip(striprev, tr)
168 168 mfst.strip(striprev, tr)
169 169 for fn in files:
170 170 repo.file(fn).strip(striprev, tr)
171 171 tr.endgroup()
172 172
173 173 try:
174 174 for i in xrange(offset, len(tr.entries)):
175 175 file, troffset, ignore = tr.entries[i]
176 176 repo.svfs(file, 'a').truncate(troffset)
177 177 if troffset == 0:
178 178 repo.store.markremoved(file)
179 179 tr.close()
180 180 finally:
181 181 tr.release()
182 182
183 183 if saveheads or savebases:
184 184 ui.note(_("adding branch\n"))
185 185 f = vfs.open(chgrpfile, "rb")
186 186 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
187 187 if not repo.ui.verbose:
188 188 # silence internal shuffling chatter
189 189 repo.ui.pushbuffer()
190 190 if isinstance(gen, bundle2.unbundle20):
191 191 tr = repo.transaction('strip')
192 192 tr.hookargs = {'source': 'strip',
193 193 'url': 'bundle:' + vfs.join(chgrpfile)}
194 194 try:
195 195 bundle2.processbundle(repo, gen, lambda: tr)
196 196 tr.close()
197 197 finally:
198 198 tr.release()
199 199 else:
200 changegroup.addchangegroup(repo, gen, 'strip',
201 'bundle:' + vfs.join(chgrpfile),
202 True)
200 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
203 201 if not repo.ui.verbose:
204 202 repo.ui.popbuffer()
205 203 f.close()
206 204
207 205 # remove undo files
208 206 for undovfs, undofile in repo.undofiles():
209 207 try:
210 208 undovfs.unlink(undofile)
211 209 except OSError as e:
212 210 if e.errno != errno.ENOENT:
213 211 ui.warn(_('error removing %s: %s\n') %
214 212 (undovfs.join(undofile), str(e)))
215 213
216 214 for m in updatebm:
217 215 bm[m] = repo[newbmtarget].node()
218 216 bm.write()
219 217 except: # re-raises
220 218 if backupfile:
221 219 ui.warn(_("strip failed, full bundle stored in '%s'\n")
222 220 % vfs.join(backupfile))
223 221 elif saveheads:
224 222 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
225 223 % vfs.join(chgrpfile))
226 224 raise
227 225 else:
228 226 if saveheads or savebases:
229 227 # Remove partial backup only if there were no exceptions
230 228 vfs.unlink(chgrpfile)
231 229
232 230 repo.destroyed()
233 231
234 232 def rebuildfncache(ui, repo):
235 233 """Rebuilds the fncache file from repo history.
236 234
237 235 Missing entries will be added. Extra entries will be removed.
238 236 """
239 237 repo = repo.unfiltered()
240 238
241 239 if 'fncache' not in repo.requirements:
242 240 ui.warn(_('(not rebuilding fncache because repository does not '
243 241 'support fncache)\n'))
244 242 return
245 243
246 244 lock = repo.lock()
247 245 try:
248 246 fnc = repo.store.fncache
249 247 # Trigger load of fncache.
250 248 if 'irrelevant' in fnc:
251 249 pass
252 250
253 251 oldentries = set(fnc.entries)
254 252 newentries = set()
255 253 seenfiles = set()
256 254
257 255 repolen = len(repo)
258 256 for rev in repo:
259 257 ui.progress(_('changeset'), rev, total=repolen)
260 258
261 259 ctx = repo[rev]
262 260 for f in ctx.files():
263 261 # This is to minimize I/O.
264 262 if f in seenfiles:
265 263 continue
266 264 seenfiles.add(f)
267 265
268 266 i = 'data/%s.i' % f
269 267 d = 'data/%s.d' % f
270 268
271 269 if repo.store._exists(i):
272 270 newentries.add(i)
273 271 if repo.store._exists(d):
274 272 newentries.add(d)
275 273
276 274 ui.progress(_('changeset'), None)
277 275
278 276 addcount = len(newentries - oldentries)
279 277 removecount = len(oldentries - newentries)
280 278 for p in sorted(oldentries - newentries):
281 279 ui.write(_('removing %s\n') % p)
282 280 for p in sorted(newentries - oldentries):
283 281 ui.write(_('adding %s\n') % p)
284 282
285 283 if addcount or removecount:
286 284 ui.write(_('%d items added, %d removed from fncache\n') %
287 285 (addcount, removecount))
288 286 fnc.entries = newentries
289 287 fnc._dirty = True
290 288
291 289 tr = repo.transaction('fncache')
292 290 try:
293 291 fnc.write(tr)
294 292 tr.close()
295 293 finally:
296 294 tr.release()
297 295 else:
298 296 ui.write(_('fncache already up to date\n'))
299 297 finally:
300 298 lock.release()
301 299
302 300 def stripbmrevset(repo, mark):
303 301 """
304 302 The revset to strip when strip is called with -B mark
305 303
306 304 Needs to live here so extensions can use it and wrap it even when strip is
307 305 not enabled or not present on a box.
308 306 """
309 307 return repo.revs("ancestors(bookmark(%s)) - "
310 308 "ancestors(head() and not bookmark(%s)) - "
311 309 "ancestors(bookmark() and not bookmark(%s))",
312 310 mark, mark, mark)
313
General Comments 0
You need to be logged in to leave comments. Login now