##// END OF EJS Templates
repair: make strip() return backup file path...
Martin von Zweigbergk -
r30274:c1345969 default
parent child Browse files
Show More
@@ -1,359 +1,362 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 error,
20 20 exchange,
21 21 obsolete,
22 22 util,
23 23 )
24 24
25 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 26 """create a bundle with the specified revisions as a backup"""
27 27 cgversion = changegroup.safeversion(repo)
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifest)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77 # This function operates within a transaction of its own, but does
78 78 # not take any lock on the repo.
79 79 # Simple way to maintain backwards compatibility for this
80 80 # argument.
81 81 if backup in ['none', 'strip']:
82 82 backup = False
83 83
84 84 repo = repo.unfiltered()
85 85 repo.destroying()
86 86
87 87 cl = repo.changelog
88 88 # TODO handle undo of merge sets
89 89 if isinstance(nodelist, str):
90 90 nodelist = [nodelist]
91 91 striplist = [cl.rev(node) for node in nodelist]
92 92 striprev = min(striplist)
93 93
94 94 # Some revisions with rev > striprev may not be descendants of striprev.
95 95 # We have to find these revisions and put them in a bundle, so that
96 96 # we can restore them after the truncations.
97 97 # To create the bundle we use repo.changegroupsubset which requires
98 98 # the list of heads and bases of the set of interesting revisions.
99 99 # (head = revision in the set that has no descendant in the set;
100 100 # base = revision in the set that has no ancestor in the set)
101 101 tostrip = set(striplist)
102 102 for rev in striplist:
103 103 for desc in cl.descendants([rev]):
104 104 tostrip.add(desc)
105 105
106 106 files = _collectfiles(repo, striprev)
107 107 saverevs = _collectbrokencsets(repo, files, striprev)
108 108
109 109 # compute heads
110 110 saveheads = set(saverevs)
111 111 for r in xrange(striprev + 1, len(cl)):
112 112 if r not in tostrip:
113 113 saverevs.add(r)
114 114 saveheads.difference_update(cl.parentrevs(r))
115 115 saveheads.add(r)
116 116 saveheads = [cl.node(r) for r in saveheads]
117 117
118 118 # compute base nodes
119 119 if saverevs:
120 120 descendants = set(cl.descendants(saverevs))
121 121 saverevs.difference_update(descendants)
122 122 savebases = [cl.node(r) for r in saverevs]
123 123 stripbases = [cl.node(r) for r in tostrip]
124 124
125 125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 126 # is much faster
127 127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 128 if newbmtarget:
129 129 newbmtarget = repo[newbmtarget.first()].node()
130 130 else:
131 131 newbmtarget = '.'
132 132
133 133 bm = repo._bookmarks
134 134 updatebm = []
135 135 for m in bm:
136 136 rev = repo[bm[m]].rev()
137 137 if rev in tostrip:
138 138 updatebm.append(m)
139 139
140 140 # create a changegroup for all the branches we need to keep
141 141 backupfile = None
142 142 vfs = repo.vfs
143 143 node = nodelist[-1]
144 144 if backup:
145 145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 146 repo.ui.status(_("saved backup bundle to %s\n") %
147 147 vfs.join(backupfile))
148 148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 149 vfs.join(backupfile))
150 150 tmpbundlefile = None
151 151 if saveheads:
152 152 # do not compress temporary bundle if we remove it from disk later
153 153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 154 compress=False)
155 155
156 156 mfst = repo.manifest
157 157
158 158 curtr = repo.currenttransaction()
159 159 if curtr is not None:
160 160 del curtr # avoid carrying reference to transaction for nothing
161 161 msg = _('programming error: cannot strip from inside a transaction')
162 162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163 163
164 164 try:
165 165 with repo.transaction("strip") as tr:
166 166 offset = len(tr.entries)
167 167
168 168 tr.startgroup()
169 169 cl.strip(striprev, tr)
170 170 mfst.strip(striprev, tr)
171 171 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 172 # otherwise
173 173 for unencoded, encoded, size in repo.store.datafiles():
174 174 if (unencoded.startswith('meta/') and
175 175 unencoded.endswith('00manifest.i')):
176 176 dir = unencoded[5:-12]
177 177 repo.manifest.dirlog(dir).strip(striprev, tr)
178 178 for fn in files:
179 179 repo.file(fn).strip(striprev, tr)
180 180 tr.endgroup()
181 181
182 182 for i in xrange(offset, len(tr.entries)):
183 183 file, troffset, ignore = tr.entries[i]
184 184 with repo.svfs(file, 'a', checkambig=True) as fp:
185 185 fp.truncate(troffset)
186 186 if troffset == 0:
187 187 repo.store.markremoved(file)
188 188
189 189 if tmpbundlefile:
190 190 ui.note(_("adding branch\n"))
191 191 f = vfs.open(tmpbundlefile, "rb")
192 192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
193 193 if not repo.ui.verbose:
194 194 # silence internal shuffling chatter
195 195 repo.ui.pushbuffer()
196 196 if isinstance(gen, bundle2.unbundle20):
197 197 with repo.transaction('strip') as tr:
198 198 tr.hookargs = {'source': 'strip',
199 199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
200 200 bundle2.applybundle(repo, gen, tr, source='strip',
201 201 url='bundle:' + vfs.join(tmpbundlefile))
202 202 else:
203 203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
204 204 True)
205 205 if not repo.ui.verbose:
206 206 repo.ui.popbuffer()
207 207 f.close()
208 208 repo._phasecache.invalidate()
209 209
210 210 for m in updatebm:
211 211 bm[m] = repo[newbmtarget].node()
212 212 lock = tr = None
213 213 try:
214 214 lock = repo.lock()
215 215 tr = repo.transaction('repair')
216 216 bm.recordchange(tr)
217 217 tr.close()
218 218 finally:
219 219 tr.release()
220 220 lock.release()
221 221
222 222 # remove undo files
223 223 for undovfs, undofile in repo.undofiles():
224 224 try:
225 225 undovfs.unlink(undofile)
226 226 except OSError as e:
227 227 if e.errno != errno.ENOENT:
228 228 ui.warn(_('error removing %s: %s\n') %
229 229 (undovfs.join(undofile), str(e)))
230 230
231 231 except: # re-raises
232 232 if backupfile:
233 233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 234 % vfs.join(backupfile))
235 235 if tmpbundlefile:
236 236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 237 % vfs.join(tmpbundlefile))
238 238 ui.warn(_("(fix the problem, then recover the changesets with "
239 239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 240 raise
241 241 else:
242 242 if tmpbundlefile:
243 243 # Remove temporary bundle only if there were no exceptions
244 244 vfs.unlink(tmpbundlefile)
245 245
246 246 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
249 return backupfile
247 250
248 251 def rebuildfncache(ui, repo):
249 252 """Rebuilds the fncache file from repo history.
250 253
251 254 Missing entries will be added. Extra entries will be removed.
252 255 """
253 256 repo = repo.unfiltered()
254 257
255 258 if 'fncache' not in repo.requirements:
256 259 ui.warn(_('(not rebuilding fncache because repository does not '
257 260 'support fncache)\n'))
258 261 return
259 262
260 263 with repo.lock():
261 264 fnc = repo.store.fncache
262 265 # Trigger load of fncache.
263 266 if 'irrelevant' in fnc:
264 267 pass
265 268
266 269 oldentries = set(fnc.entries)
267 270 newentries = set()
268 271 seenfiles = set()
269 272
270 273 repolen = len(repo)
271 274 for rev in repo:
272 275 ui.progress(_('rebuilding'), rev, total=repolen,
273 276 unit=_('changesets'))
274 277
275 278 ctx = repo[rev]
276 279 for f in ctx.files():
277 280 # This is to minimize I/O.
278 281 if f in seenfiles:
279 282 continue
280 283 seenfiles.add(f)
281 284
282 285 i = 'data/%s.i' % f
283 286 d = 'data/%s.d' % f
284 287
285 288 if repo.store._exists(i):
286 289 newentries.add(i)
287 290 if repo.store._exists(d):
288 291 newentries.add(d)
289 292
290 293 ui.progress(_('rebuilding'), None)
291 294
292 295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
293 296 for dir in util.dirs(seenfiles):
294 297 i = 'meta/%s/00manifest.i' % dir
295 298 d = 'meta/%s/00manifest.d' % dir
296 299
297 300 if repo.store._exists(i):
298 301 newentries.add(i)
299 302 if repo.store._exists(d):
300 303 newentries.add(d)
301 304
302 305 addcount = len(newentries - oldentries)
303 306 removecount = len(oldentries - newentries)
304 307 for p in sorted(oldentries - newentries):
305 308 ui.write(_('removing %s\n') % p)
306 309 for p in sorted(newentries - oldentries):
307 310 ui.write(_('adding %s\n') % p)
308 311
309 312 if addcount or removecount:
310 313 ui.write(_('%d items added, %d removed from fncache\n') %
311 314 (addcount, removecount))
312 315 fnc.entries = newentries
313 316 fnc._dirty = True
314 317
315 318 with repo.transaction('fncache') as tr:
316 319 fnc.write(tr)
317 320 else:
318 321 ui.write(_('fncache already up to date\n'))
319 322
320 323 def stripbmrevset(repo, mark):
321 324 """
322 325 The revset to strip when strip is called with -B mark
323 326
324 327 Needs to live here so extensions can use it and wrap it even when strip is
325 328 not enabled or not present on a box.
326 329 """
327 330 return repo.revs("ancestors(bookmark(%s)) - "
328 331 "ancestors(head() and not bookmark(%s)) - "
329 332 "ancestors(bookmark() and not bookmark(%s))",
330 333 mark, mark, mark)
331 334
332 335 def deleteobsmarkers(obsstore, indices):
333 336 """Delete some obsmarkers from obsstore and return how many were deleted
334 337
335 338 'indices' is a list of ints which are the indices
336 339 of the markers to be deleted.
337 340
338 341 Every invocation of this function completely rewrites the obsstore file,
339 342 skipping the markers we want to be removed. The new temporary file is
340 343 created, remaining markers are written there and on .close() this file
341 344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
342 345 if not indices:
343 346 # we don't want to rewrite the obsstore with the same content
344 347 return
345 348
346 349 left = []
347 350 current = obsstore._all
348 351 n = 0
349 352 for i, m in enumerate(current):
350 353 if i in indices:
351 354 n += 1
352 355 continue
353 356 left.append(m)
354 357
355 358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
356 359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
357 360 newobsstorefile.write(bytes)
358 361 newobsstorefile.close()
359 362 return n
General Comments 0
You need to be logged in to leave comments. Login now