##// END OF EJS Templates
strip: use the 'writenewbundle' function to get bundle on disk...
marmoute -
r32468:4c4d9190 default
parent child Browse files
Show More
@@ -1,358 +1,360 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 discovery,
19 20 error,
20 21 exchange,
21 22 obsolete,
22 23 util,
23 24 )
24 25
25 26 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 27 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
28 28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
31 29 backupdir = "strip-backup"
32 30 vfs = repo.vfs
33 31 if not vfs.isdir(backupdir):
34 32 vfs.mkdir(backupdir)
35 33
36 34 # Include a hash of all the nodes in the filename for uniqueness
37 35 allcommits = repo.set('%ln::%ln', bases, heads)
38 36 allhashes = sorted(c.hex() for c in allcommits)
39 37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 39
40 cgversion = changegroup.safeversion(repo)
42 41 comp = None
43 42 if cgversion != '01':
44 43 bundletype = "HG20"
45 44 if compress:
46 45 comp = 'BZ'
47 46 elif compress:
48 47 bundletype = "HG10BZ"
49 48 else:
50 49 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
50
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 contentopts = {'cg.version': cgversion}
53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
54 outgoing, contentopts, vfs, compression=comp)
53 55
54 56 def _collectfiles(repo, striprev):
55 57 """find out the filelogs affected by the strip"""
56 58 files = set()
57 59
58 60 for x in xrange(striprev, len(repo)):
59 61 files.update(repo[x].files())
60 62
61 63 return sorted(files)
62 64
63 65 def _collectbrokencsets(repo, files, striprev):
64 66 """return the changesets which will be broken by the truncation"""
65 67 s = set()
66 68 def collectone(revlog):
67 69 _, brokenset = revlog.getstrippoint(striprev)
68 70 s.update([revlog.linkrev(r) for r in brokenset])
69 71
70 72 collectone(repo.manifestlog._revlog)
71 73 for fname in files:
72 74 collectone(repo.file(fname))
73 75
74 76 return s
75 77
76 78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 79 # This function operates within a transaction of its own, but does
78 80 # not take any lock on the repo.
79 81 # Simple way to maintain backwards compatibility for this
80 82 # argument.
81 83 if backup in ['none', 'strip']:
82 84 backup = False
83 85
84 86 repo = repo.unfiltered()
85 87 repo.destroying()
86 88
87 89 cl = repo.changelog
88 90 # TODO handle undo of merge sets
89 91 if isinstance(nodelist, str):
90 92 nodelist = [nodelist]
91 93 striplist = [cl.rev(node) for node in nodelist]
92 94 striprev = min(striplist)
93 95
94 96 files = _collectfiles(repo, striprev)
95 97 saverevs = _collectbrokencsets(repo, files, striprev)
96 98
97 99 # Some revisions with rev > striprev may not be descendants of striprev.
98 100 # We have to find these revisions and put them in a bundle, so that
99 101 # we can restore them after the truncations.
100 102 # To create the bundle we use repo.changegroupsubset which requires
101 103 # the list of heads and bases of the set of interesting revisions.
102 104 # (head = revision in the set that has no descendant in the set;
103 105 # base = revision in the set that has no ancestor in the set)
104 106 tostrip = set(striplist)
105 107 saveheads = set(saverevs)
106 108 for r in cl.revs(start=striprev + 1):
107 109 if any(p in tostrip for p in cl.parentrevs(r)):
108 110 tostrip.add(r)
109 111
110 112 if r not in tostrip:
111 113 saverevs.add(r)
112 114 saveheads.difference_update(cl.parentrevs(r))
113 115 saveheads.add(r)
114 116 saveheads = [cl.node(r) for r in saveheads]
115 117
116 118 # compute base nodes
117 119 if saverevs:
118 120 descendants = set(cl.descendants(saverevs))
119 121 saverevs.difference_update(descendants)
120 122 savebases = [cl.node(r) for r in saverevs]
121 123 stripbases = [cl.node(r) for r in tostrip]
122 124
123 125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 126 # is much faster
125 127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 128 if newbmtarget:
127 129 newbmtarget = repo[newbmtarget.first()].node()
128 130 else:
129 131 newbmtarget = '.'
130 132
131 133 bm = repo._bookmarks
132 134 updatebm = []
133 135 for m in bm:
134 136 rev = repo[bm[m]].rev()
135 137 if rev in tostrip:
136 138 updatebm.append(m)
137 139
138 140 # create a changegroup for all the branches we need to keep
139 141 backupfile = None
140 142 vfs = repo.vfs
141 143 node = nodelist[-1]
142 144 if backup:
143 145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 146 repo.ui.status(_("saved backup bundle to %s\n") %
145 147 vfs.join(backupfile))
146 148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 149 vfs.join(backupfile))
148 150 tmpbundlefile = None
149 151 if saveheads:
150 152 # do not compress temporary bundle if we remove it from disk later
151 153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 154 compress=False)
153 155
154 156 mfst = repo.manifestlog._revlog
155 157
156 158 curtr = repo.currenttransaction()
157 159 if curtr is not None:
158 160 del curtr # avoid carrying reference to transaction for nothing
159 161 raise error.ProgrammingError('cannot strip from inside a transaction')
160 162
161 163 try:
162 164 with repo.transaction("strip") as tr:
163 165 offset = len(tr.entries)
164 166
165 167 tr.startgroup()
166 168 cl.strip(striprev, tr)
167 169 mfst.strip(striprev, tr)
168 170 striptrees(repo, tr, striprev, files)
169 171
170 172 for fn in files:
171 173 repo.file(fn).strip(striprev, tr)
172 174 tr.endgroup()
173 175
174 176 for i in xrange(offset, len(tr.entries)):
175 177 file, troffset, ignore = tr.entries[i]
176 178 with repo.svfs(file, 'a', checkambig=True) as fp:
177 179 fp.truncate(troffset)
178 180 if troffset == 0:
179 181 repo.store.markremoved(file)
180 182
181 183 if tmpbundlefile:
182 184 ui.note(_("adding branch\n"))
183 185 f = vfs.open(tmpbundlefile, "rb")
184 186 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
185 187 if not repo.ui.verbose:
186 188 # silence internal shuffling chatter
187 189 repo.ui.pushbuffer()
188 190 if isinstance(gen, bundle2.unbundle20):
189 191 with repo.transaction('strip') as tr:
190 192 tr.hookargs = {'source': 'strip',
191 193 'url': 'bundle:' + vfs.join(tmpbundlefile)}
192 194 bundle2.applybundle(repo, gen, tr, source='strip',
193 195 url='bundle:' + vfs.join(tmpbundlefile))
194 196 else:
195 197 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
196 198 True)
197 199 if not repo.ui.verbose:
198 200 repo.ui.popbuffer()
199 201 f.close()
200 202 repo._phasecache.invalidate()
201 203
202 204 for m in updatebm:
203 205 bm[m] = repo[newbmtarget].node()
204 206
205 207 with repo.lock():
206 208 with repo.transaction('repair') as tr:
207 209 bm.recordchange(tr)
208 210
209 211 # remove undo files
210 212 for undovfs, undofile in repo.undofiles():
211 213 try:
212 214 undovfs.unlink(undofile)
213 215 except OSError as e:
214 216 if e.errno != errno.ENOENT:
215 217 ui.warn(_('error removing %s: %s\n') %
216 218 (undovfs.join(undofile), str(e)))
217 219
218 220 except: # re-raises
219 221 if backupfile:
220 222 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
221 223 % vfs.join(backupfile))
222 224 if tmpbundlefile:
223 225 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
224 226 % vfs.join(tmpbundlefile))
225 227 ui.warn(_("(fix the problem, then recover the changesets with "
226 228 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
227 229 raise
228 230 else:
229 231 if tmpbundlefile:
230 232 # Remove temporary bundle only if there were no exceptions
231 233 vfs.unlink(tmpbundlefile)
232 234
233 235 repo.destroyed()
234 236 # return the backup file path (or None if 'backup' was False) so
235 237 # extensions can use it
236 238 return backupfile
237 239
238 240 def striptrees(repo, tr, striprev, files):
239 241 if 'treemanifest' in repo.requirements: # safe but unnecessary
240 242 # otherwise
241 243 for unencoded, encoded, size in repo.store.datafiles():
242 244 if (unencoded.startswith('meta/') and
243 245 unencoded.endswith('00manifest.i')):
244 246 dir = unencoded[5:-12]
245 247 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
246 248
247 249 def rebuildfncache(ui, repo):
248 250 """Rebuilds the fncache file from repo history.
249 251
250 252 Missing entries will be added. Extra entries will be removed.
251 253 """
252 254 repo = repo.unfiltered()
253 255
254 256 if 'fncache' not in repo.requirements:
255 257 ui.warn(_('(not rebuilding fncache because repository does not '
256 258 'support fncache)\n'))
257 259 return
258 260
259 261 with repo.lock():
260 262 fnc = repo.store.fncache
261 263 # Trigger load of fncache.
262 264 if 'irrelevant' in fnc:
263 265 pass
264 266
265 267 oldentries = set(fnc.entries)
266 268 newentries = set()
267 269 seenfiles = set()
268 270
269 271 repolen = len(repo)
270 272 for rev in repo:
271 273 ui.progress(_('rebuilding'), rev, total=repolen,
272 274 unit=_('changesets'))
273 275
274 276 ctx = repo[rev]
275 277 for f in ctx.files():
276 278 # This is to minimize I/O.
277 279 if f in seenfiles:
278 280 continue
279 281 seenfiles.add(f)
280 282
281 283 i = 'data/%s.i' % f
282 284 d = 'data/%s.d' % f
283 285
284 286 if repo.store._exists(i):
285 287 newentries.add(i)
286 288 if repo.store._exists(d):
287 289 newentries.add(d)
288 290
289 291 ui.progress(_('rebuilding'), None)
290 292
291 293 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
292 294 for dir in util.dirs(seenfiles):
293 295 i = 'meta/%s/00manifest.i' % dir
294 296 d = 'meta/%s/00manifest.d' % dir
295 297
296 298 if repo.store._exists(i):
297 299 newentries.add(i)
298 300 if repo.store._exists(d):
299 301 newentries.add(d)
300 302
301 303 addcount = len(newentries - oldentries)
302 304 removecount = len(oldentries - newentries)
303 305 for p in sorted(oldentries - newentries):
304 306 ui.write(_('removing %s\n') % p)
305 307 for p in sorted(newentries - oldentries):
306 308 ui.write(_('adding %s\n') % p)
307 309
308 310 if addcount or removecount:
309 311 ui.write(_('%d items added, %d removed from fncache\n') %
310 312 (addcount, removecount))
311 313 fnc.entries = newentries
312 314 fnc._dirty = True
313 315
314 316 with repo.transaction('fncache') as tr:
315 317 fnc.write(tr)
316 318 else:
317 319 ui.write(_('fncache already up to date\n'))
318 320
319 321 def stripbmrevset(repo, mark):
320 322 """
321 323 The revset to strip when strip is called with -B mark
322 324
323 325 Needs to live here so extensions can use it and wrap it even when strip is
324 326 not enabled or not present on a box.
325 327 """
326 328 return repo.revs("ancestors(bookmark(%s)) - "
327 329 "ancestors(head() and not bookmark(%s)) - "
328 330 "ancestors(bookmark() and not bookmark(%s))",
329 331 mark, mark, mark)
330 332
331 333 def deleteobsmarkers(obsstore, indices):
332 334 """Delete some obsmarkers from obsstore and return how many were deleted
333 335
334 336 'indices' is a list of ints which are the indices
335 337 of the markers to be deleted.
336 338
337 339 Every invocation of this function completely rewrites the obsstore file,
338 340 skipping the markers we want to be removed. The new temporary file is
339 341 created, remaining markers are written there and on .close() this file
340 342 gets atomically renamed to obsstore, thus guaranteeing consistency."""
341 343 if not indices:
342 344 # we don't want to rewrite the obsstore with the same content
343 345 return
344 346
345 347 left = []
346 348 current = obsstore._all
347 349 n = 0
348 350 for i, m in enumerate(current):
349 351 if i in indices:
350 352 n += 1
351 353 continue
352 354 left.append(m)
353 355
354 356 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
355 357 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
356 358 newobsstorefile.write(bytes)
357 359 newobsstorefile.close()
358 360 return n
General Comments 0
You need to be logged in to leave comments. Login now