##// END OF EJS Templates
repair: clarify in comment that caller must take lock, but not transaction...
Martin von Zweigbergk -
r32922:eb84b4ad default
parent child Browse files
Show More
@@ -1,373 +1,374 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 discovery,
20 20 error,
21 21 exchange,
22 22 obsolete,
23 23 util,
24 24 )
25 25
26 26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 27 """create a bundle with the specified revisions as a backup"""
28 28
29 29 backupdir = "strip-backup"
30 30 vfs = repo.vfs
31 31 if not vfs.isdir(backupdir):
32 32 vfs.mkdir(backupdir)
33 33
34 34 # Include a hash of all the nodes in the filename for uniqueness
35 35 allcommits = repo.set('%ln::%ln', bases, heads)
36 36 allhashes = sorted(c.hex() for c in allcommits)
37 37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 39
40 40 cgversion = changegroup.safeversion(repo)
41 41 comp = None
42 42 if cgversion != '01':
43 43 bundletype = "HG20"
44 44 if compress:
45 45 comp = 'BZ'
46 46 elif compress:
47 47 bundletype = "HG10BZ"
48 48 else:
49 49 bundletype = "HG10UN"
50 50
51 51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 52 contentopts = {'cg.version': cgversion, 'obsolescence': obsolescence}
53 53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
54 54 outgoing, contentopts, vfs, compression=comp)
55 55
56 56 def _collectfiles(repo, striprev):
57 57 """find out the filelogs affected by the strip"""
58 58 files = set()
59 59
60 60 for x in xrange(striprev, len(repo)):
61 61 files.update(repo[x].files())
62 62
63 63 return sorted(files)
64 64
65 65 def _collectbrokencsets(repo, files, striprev):
66 66 """return the changesets which will be broken by the truncation"""
67 67 s = set()
68 68 def collectone(revlog):
69 69 _, brokenset = revlog.getstrippoint(striprev)
70 70 s.update([revlog.linkrev(r) for r in brokenset])
71 71
72 72 collectone(repo.manifestlog._revlog)
73 73 for fname in files:
74 74 collectone(repo.file(fname))
75 75
76 76 return s
77 77
78 78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
79 # This function operates within a transaction of its own, but does
80 # not take any lock on the repo.
79 # This function requires the caller to lock the repo, but it operates
80 # within a transaction of its own, and thus requires there to be no current
81 # transaction when it is called.
81 82 # Simple way to maintain backwards compatibility for this
82 83 # argument.
83 84 if backup in ['none', 'strip']:
84 85 backup = False
85 86
86 87 repo = repo.unfiltered()
87 88 repo.destroying()
88 89
89 90 cl = repo.changelog
90 91 # TODO handle undo of merge sets
91 92 if isinstance(nodelist, str):
92 93 nodelist = [nodelist]
93 94 striplist = [cl.rev(node) for node in nodelist]
94 95 striprev = min(striplist)
95 96
96 97 files = _collectfiles(repo, striprev)
97 98 saverevs = _collectbrokencsets(repo, files, striprev)
98 99
99 100 # Some revisions with rev > striprev may not be descendants of striprev.
100 101 # We have to find these revisions and put them in a bundle, so that
101 102 # we can restore them after the truncations.
102 103 # To create the bundle we use repo.changegroupsubset which requires
103 104 # the list of heads and bases of the set of interesting revisions.
104 105 # (head = revision in the set that has no descendant in the set;
105 106 # base = revision in the set that has no ancestor in the set)
106 107 tostrip = set(striplist)
107 108 saveheads = set(saverevs)
108 109 for r in cl.revs(start=striprev + 1):
109 110 if any(p in tostrip for p in cl.parentrevs(r)):
110 111 tostrip.add(r)
111 112
112 113 if r not in tostrip:
113 114 saverevs.add(r)
114 115 saveheads.difference_update(cl.parentrevs(r))
115 116 saveheads.add(r)
116 117 saveheads = [cl.node(r) for r in saveheads]
117 118
118 119 # compute base nodes
119 120 if saverevs:
120 121 descendants = set(cl.descendants(saverevs))
121 122 saverevs.difference_update(descendants)
122 123 savebases = [cl.node(r) for r in saverevs]
123 124 stripbases = [cl.node(r) for r in tostrip]
124 125
125 126 stripobsidx = obsmarkers = ()
126 127 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
127 128 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
128 129 if obsmarkers:
129 130 stripobsidx = [i for i, m in enumerate(repo.obsstore)
130 131 if m in obsmarkers]
131 132
132 133 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
133 134 # is much faster
134 135 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
135 136 if newbmtarget:
136 137 newbmtarget = repo[newbmtarget.first()].node()
137 138 else:
138 139 newbmtarget = '.'
139 140
140 141 bm = repo._bookmarks
141 142 updatebm = []
142 143 for m in bm:
143 144 rev = repo[bm[m]].rev()
144 145 if rev in tostrip:
145 146 updatebm.append(m)
146 147
147 148 # create a changegroup for all the branches we need to keep
148 149 backupfile = None
149 150 vfs = repo.vfs
150 151 node = nodelist[-1]
151 152 if backup:
152 153 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
153 154 repo.ui.status(_("saved backup bundle to %s\n") %
154 155 vfs.join(backupfile))
155 156 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
156 157 vfs.join(backupfile))
157 158 tmpbundlefile = None
158 159 if saveheads:
159 160 # do not compress temporary bundle if we remove it from disk later
160 161 #
161 162 # We do not include obsolescence, it might re-introduce prune markers
162 163 # we are trying to strip. This is harmless since the stripped markers
163 164 # are already backed up and we did not touched the markers for the
164 165 # saved changesets.
165 166 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
166 167 compress=False, obsolescence=False)
167 168
168 169 mfst = repo.manifestlog._revlog
169 170
170 171 curtr = repo.currenttransaction()
171 172 if curtr is not None:
172 173 del curtr # avoid carrying reference to transaction for nothing
173 174 raise error.ProgrammingError('cannot strip from inside a transaction')
174 175
175 176 try:
176 177 with repo.transaction("strip") as tr:
177 178 offset = len(tr.entries)
178 179
179 180 tr.startgroup()
180 181 cl.strip(striprev, tr)
181 182 mfst.strip(striprev, tr)
182 183 striptrees(repo, tr, striprev, files)
183 184
184 185 for fn in files:
185 186 repo.file(fn).strip(striprev, tr)
186 187 tr.endgroup()
187 188
188 189 for i in xrange(offset, len(tr.entries)):
189 190 file, troffset, ignore = tr.entries[i]
190 191 with repo.svfs(file, 'a', checkambig=True) as fp:
191 192 fp.truncate(troffset)
192 193 if troffset == 0:
193 194 repo.store.markremoved(file)
194 195
195 196 deleteobsmarkers(repo.obsstore, stripobsidx)
196 197 del repo.obsstore
197 198
198 199 if tmpbundlefile:
199 200 ui.note(_("adding branch\n"))
200 201 f = vfs.open(tmpbundlefile, "rb")
201 202 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
202 203 if not repo.ui.verbose:
203 204 # silence internal shuffling chatter
204 205 repo.ui.pushbuffer()
205 206 if isinstance(gen, bundle2.unbundle20):
206 207 with repo.transaction('strip') as tr:
207 208 bundle2.applybundle(repo, gen, tr, source='strip',
208 209 url='bundle:' + vfs.join(tmpbundlefile))
209 210 else:
210 211 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
211 212 True)
212 213 if not repo.ui.verbose:
213 214 repo.ui.popbuffer()
214 215 f.close()
215 216 repo._phasecache.invalidate()
216 217
217 218 for m in updatebm:
218 219 bm[m] = repo[newbmtarget].node()
219 220
220 221 with repo.lock():
221 222 with repo.transaction('repair') as tr:
222 223 bm.recordchange(tr)
223 224
224 225 # remove undo files
225 226 for undovfs, undofile in repo.undofiles():
226 227 try:
227 228 undovfs.unlink(undofile)
228 229 except OSError as e:
229 230 if e.errno != errno.ENOENT:
230 231 ui.warn(_('error removing %s: %s\n') %
231 232 (undovfs.join(undofile), str(e)))
232 233
233 234 except: # re-raises
234 235 if backupfile:
235 236 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
236 237 % vfs.join(backupfile))
237 238 if tmpbundlefile:
238 239 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
239 240 % vfs.join(tmpbundlefile))
240 241 ui.warn(_("(fix the problem, then recover the changesets with "
241 242 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
242 243 raise
243 244 else:
244 245 if tmpbundlefile:
245 246 # Remove temporary bundle only if there were no exceptions
246 247 vfs.unlink(tmpbundlefile)
247 248
248 249 repo.destroyed()
249 250 # return the backup file path (or None if 'backup' was False) so
250 251 # extensions can use it
251 252 return backupfile
252 253
253 254 def striptrees(repo, tr, striprev, files):
254 255 if 'treemanifest' in repo.requirements: # safe but unnecessary
255 256 # otherwise
256 257 for unencoded, encoded, size in repo.store.datafiles():
257 258 if (unencoded.startswith('meta/') and
258 259 unencoded.endswith('00manifest.i')):
259 260 dir = unencoded[5:-12]
260 261 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
261 262
262 263 def rebuildfncache(ui, repo):
263 264 """Rebuilds the fncache file from repo history.
264 265
265 266 Missing entries will be added. Extra entries will be removed.
266 267 """
267 268 repo = repo.unfiltered()
268 269
269 270 if 'fncache' not in repo.requirements:
270 271 ui.warn(_('(not rebuilding fncache because repository does not '
271 272 'support fncache)\n'))
272 273 return
273 274
274 275 with repo.lock():
275 276 fnc = repo.store.fncache
276 277 # Trigger load of fncache.
277 278 if 'irrelevant' in fnc:
278 279 pass
279 280
280 281 oldentries = set(fnc.entries)
281 282 newentries = set()
282 283 seenfiles = set()
283 284
284 285 repolen = len(repo)
285 286 for rev in repo:
286 287 ui.progress(_('rebuilding'), rev, total=repolen,
287 288 unit=_('changesets'))
288 289
289 290 ctx = repo[rev]
290 291 for f in ctx.files():
291 292 # This is to minimize I/O.
292 293 if f in seenfiles:
293 294 continue
294 295 seenfiles.add(f)
295 296
296 297 i = 'data/%s.i' % f
297 298 d = 'data/%s.d' % f
298 299
299 300 if repo.store._exists(i):
300 301 newentries.add(i)
301 302 if repo.store._exists(d):
302 303 newentries.add(d)
303 304
304 305 ui.progress(_('rebuilding'), None)
305 306
306 307 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
307 308 for dir in util.dirs(seenfiles):
308 309 i = 'meta/%s/00manifest.i' % dir
309 310 d = 'meta/%s/00manifest.d' % dir
310 311
311 312 if repo.store._exists(i):
312 313 newentries.add(i)
313 314 if repo.store._exists(d):
314 315 newentries.add(d)
315 316
316 317 addcount = len(newentries - oldentries)
317 318 removecount = len(oldentries - newentries)
318 319 for p in sorted(oldentries - newentries):
319 320 ui.write(_('removing %s\n') % p)
320 321 for p in sorted(newentries - oldentries):
321 322 ui.write(_('adding %s\n') % p)
322 323
323 324 if addcount or removecount:
324 325 ui.write(_('%d items added, %d removed from fncache\n') %
325 326 (addcount, removecount))
326 327 fnc.entries = newentries
327 328 fnc._dirty = True
328 329
329 330 with repo.transaction('fncache') as tr:
330 331 fnc.write(tr)
331 332 else:
332 333 ui.write(_('fncache already up to date\n'))
333 334
334 335 def stripbmrevset(repo, mark):
335 336 """
336 337 The revset to strip when strip is called with -B mark
337 338
338 339 Needs to live here so extensions can use it and wrap it even when strip is
339 340 not enabled or not present on a box.
340 341 """
341 342 return repo.revs("ancestors(bookmark(%s)) - "
342 343 "ancestors(head() and not bookmark(%s)) - "
343 344 "ancestors(bookmark() and not bookmark(%s))",
344 345 mark, mark, mark)
345 346
346 347 def deleteobsmarkers(obsstore, indices):
347 348 """Delete some obsmarkers from obsstore and return how many were deleted
348 349
349 350 'indices' is a list of ints which are the indices
350 351 of the markers to be deleted.
351 352
352 353 Every invocation of this function completely rewrites the obsstore file,
353 354 skipping the markers we want to be removed. The new temporary file is
354 355 created, remaining markers are written there and on .close() this file
355 356 gets atomically renamed to obsstore, thus guaranteeing consistency."""
356 357 if not indices:
357 358 # we don't want to rewrite the obsstore with the same content
358 359 return
359 360
360 361 left = []
361 362 current = obsstore._all
362 363 n = 0
363 364 for i, m in enumerate(current):
364 365 if i in indices:
365 366 n += 1
366 367 continue
367 368 left.append(m)
368 369
369 370 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
370 371 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
371 372 newobsstorefile.write(bytes)
372 373 newobsstorefile.close()
373 374 return n
General Comments 0
You need to be logged in to leave comments. Login now