##// END OF EJS Templates
repair: remove unnecessary locking for bookmarks...
Martin von Zweigbergk -
r32925:4c6e4a44 default
parent child Browse files
Show More
@@ -1,372 +1,371
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 discovery,
20 20 error,
21 21 exchange,
22 22 obsolete,
23 23 util,
24 24 )
25 25
26 26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 27 """create a bundle with the specified revisions as a backup"""
28 28
29 29 backupdir = "strip-backup"
30 30 vfs = repo.vfs
31 31 if not vfs.isdir(backupdir):
32 32 vfs.mkdir(backupdir)
33 33
34 34 # Include a hash of all the nodes in the filename for uniqueness
35 35 allcommits = repo.set('%ln::%ln', bases, heads)
36 36 allhashes = sorted(c.hex() for c in allcommits)
37 37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 39
40 40 cgversion = changegroup.safeversion(repo)
41 41 comp = None
42 42 if cgversion != '01':
43 43 bundletype = "HG20"
44 44 if compress:
45 45 comp = 'BZ'
46 46 elif compress:
47 47 bundletype = "HG10BZ"
48 48 else:
49 49 bundletype = "HG10UN"
50 50
51 51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 52 contentopts = {'cg.version': cgversion, 'obsolescence': obsolescence}
53 53 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
54 54 outgoing, contentopts, vfs, compression=comp)
55 55
56 56 def _collectfiles(repo, striprev):
57 57 """find out the filelogs affected by the strip"""
58 58 files = set()
59 59
60 60 for x in xrange(striprev, len(repo)):
61 61 files.update(repo[x].files())
62 62
63 63 return sorted(files)
64 64
65 65 def _collectbrokencsets(repo, files, striprev):
66 66 """return the changesets which will be broken by the truncation"""
67 67 s = set()
68 68 def collectone(revlog):
69 69 _, brokenset = revlog.getstrippoint(striprev)
70 70 s.update([revlog.linkrev(r) for r in brokenset])
71 71
72 72 collectone(repo.manifestlog._revlog)
73 73 for fname in files:
74 74 collectone(repo.file(fname))
75 75
76 76 return s
77 77
78 78 def strip(ui, repo, nodelist, backup=True, topic='backup'):
79 79 # This function requires the caller to lock the repo, but it operates
80 80 # within a transaction of its own, and thus requires there to be no current
81 81 # transaction when it is called.
82 82 if repo.currenttransaction() is not None:
83 83 raise error.ProgrammingError('cannot strip from inside a transaction')
84 84
85 85 # Simple way to maintain backwards compatibility for this
86 86 # argument.
87 87 if backup in ['none', 'strip']:
88 88 backup = False
89 89
90 90 repo = repo.unfiltered()
91 91 repo.destroying()
92 92
93 93 cl = repo.changelog
94 94 # TODO handle undo of merge sets
95 95 if isinstance(nodelist, str):
96 96 nodelist = [nodelist]
97 97 striplist = [cl.rev(node) for node in nodelist]
98 98 striprev = min(striplist)
99 99
100 100 files = _collectfiles(repo, striprev)
101 101 saverevs = _collectbrokencsets(repo, files, striprev)
102 102
103 103 # Some revisions with rev > striprev may not be descendants of striprev.
104 104 # We have to find these revisions and put them in a bundle, so that
105 105 # we can restore them after the truncations.
106 106 # To create the bundle we use repo.changegroupsubset which requires
107 107 # the list of heads and bases of the set of interesting revisions.
108 108 # (head = revision in the set that has no descendant in the set;
109 109 # base = revision in the set that has no ancestor in the set)
110 110 tostrip = set(striplist)
111 111 saveheads = set(saverevs)
112 112 for r in cl.revs(start=striprev + 1):
113 113 if any(p in tostrip for p in cl.parentrevs(r)):
114 114 tostrip.add(r)
115 115
116 116 if r not in tostrip:
117 117 saverevs.add(r)
118 118 saveheads.difference_update(cl.parentrevs(r))
119 119 saveheads.add(r)
120 120 saveheads = [cl.node(r) for r in saveheads]
121 121
122 122 # compute base nodes
123 123 if saverevs:
124 124 descendants = set(cl.descendants(saverevs))
125 125 saverevs.difference_update(descendants)
126 126 savebases = [cl.node(r) for r in saverevs]
127 127 stripbases = [cl.node(r) for r in tostrip]
128 128
129 129 stripobsidx = obsmarkers = ()
130 130 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
131 131 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
132 132 if obsmarkers:
133 133 stripobsidx = [i for i, m in enumerate(repo.obsstore)
134 134 if m in obsmarkers]
135 135
136 136 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
137 137 # is much faster
138 138 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
139 139 if newbmtarget:
140 140 newbmtarget = repo[newbmtarget.first()].node()
141 141 else:
142 142 newbmtarget = '.'
143 143
144 144 bm = repo._bookmarks
145 145 updatebm = []
146 146 for m in bm:
147 147 rev = repo[bm[m]].rev()
148 148 if rev in tostrip:
149 149 updatebm.append(m)
150 150
151 151 # create a changegroup for all the branches we need to keep
152 152 backupfile = None
153 153 vfs = repo.vfs
154 154 node = nodelist[-1]
155 155 if backup:
156 156 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
157 157 repo.ui.status(_("saved backup bundle to %s\n") %
158 158 vfs.join(backupfile))
159 159 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
160 160 vfs.join(backupfile))
161 161 tmpbundlefile = None
162 162 if saveheads:
163 163 # do not compress temporary bundle if we remove it from disk later
164 164 #
165 165 # We do not include obsolescence, it might re-introduce prune markers
166 166 # we are trying to strip. This is harmless since the stripped markers
167 167 # are already backed up and we did not touched the markers for the
168 168 # saved changesets.
169 169 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
170 170 compress=False, obsolescence=False)
171 171
172 172 mfst = repo.manifestlog._revlog
173 173
174 174 try:
175 175 with repo.transaction("strip") as tr:
176 176 offset = len(tr.entries)
177 177
178 178 tr.startgroup()
179 179 cl.strip(striprev, tr)
180 180 mfst.strip(striprev, tr)
181 181 striptrees(repo, tr, striprev, files)
182 182
183 183 for fn in files:
184 184 repo.file(fn).strip(striprev, tr)
185 185 tr.endgroup()
186 186
187 187 for i in xrange(offset, len(tr.entries)):
188 188 file, troffset, ignore = tr.entries[i]
189 189 with repo.svfs(file, 'a', checkambig=True) as fp:
190 190 fp.truncate(troffset)
191 191 if troffset == 0:
192 192 repo.store.markremoved(file)
193 193
194 194 deleteobsmarkers(repo.obsstore, stripobsidx)
195 195 del repo.obsstore
196 196
197 197 if tmpbundlefile:
198 198 ui.note(_("adding branch\n"))
199 199 f = vfs.open(tmpbundlefile, "rb")
200 200 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
201 201 if not repo.ui.verbose:
202 202 # silence internal shuffling chatter
203 203 repo.ui.pushbuffer()
204 204 if isinstance(gen, bundle2.unbundle20):
205 205 with repo.transaction('strip') as tr:
206 206 bundle2.applybundle(repo, gen, tr, source='strip',
207 207 url='bundle:' + vfs.join(tmpbundlefile))
208 208 else:
209 209 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
210 210 True)
211 211 if not repo.ui.verbose:
212 212 repo.ui.popbuffer()
213 213 f.close()
214 214 repo._phasecache.invalidate()
215 215
216 216 for m in updatebm:
217 217 bm[m] = repo[newbmtarget].node()
218 218
219 with repo.lock():
220 with repo.transaction('repair') as tr:
221 bm.recordchange(tr)
219 with repo.transaction('repair') as tr:
220 bm.recordchange(tr)
222 221
223 222 # remove undo files
224 223 for undovfs, undofile in repo.undofiles():
225 224 try:
226 225 undovfs.unlink(undofile)
227 226 except OSError as e:
228 227 if e.errno != errno.ENOENT:
229 228 ui.warn(_('error removing %s: %s\n') %
230 229 (undovfs.join(undofile), str(e)))
231 230
232 231 except: # re-raises
233 232 if backupfile:
234 233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
235 234 % vfs.join(backupfile))
236 235 if tmpbundlefile:
237 236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
238 237 % vfs.join(tmpbundlefile))
239 238 ui.warn(_("(fix the problem, then recover the changesets with "
240 239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
241 240 raise
242 241 else:
243 242 if tmpbundlefile:
244 243 # Remove temporary bundle only if there were no exceptions
245 244 vfs.unlink(tmpbundlefile)
246 245
247 246 repo.destroyed()
248 247 # return the backup file path (or None if 'backup' was False) so
249 248 # extensions can use it
250 249 return backupfile
251 250
252 251 def striptrees(repo, tr, striprev, files):
253 252 if 'treemanifest' in repo.requirements: # safe but unnecessary
254 253 # otherwise
255 254 for unencoded, encoded, size in repo.store.datafiles():
256 255 if (unencoded.startswith('meta/') and
257 256 unencoded.endswith('00manifest.i')):
258 257 dir = unencoded[5:-12]
259 258 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
260 259
261 260 def rebuildfncache(ui, repo):
262 261 """Rebuilds the fncache file from repo history.
263 262
264 263 Missing entries will be added. Extra entries will be removed.
265 264 """
266 265 repo = repo.unfiltered()
267 266
268 267 if 'fncache' not in repo.requirements:
269 268 ui.warn(_('(not rebuilding fncache because repository does not '
270 269 'support fncache)\n'))
271 270 return
272 271
273 272 with repo.lock():
274 273 fnc = repo.store.fncache
275 274 # Trigger load of fncache.
276 275 if 'irrelevant' in fnc:
277 276 pass
278 277
279 278 oldentries = set(fnc.entries)
280 279 newentries = set()
281 280 seenfiles = set()
282 281
283 282 repolen = len(repo)
284 283 for rev in repo:
285 284 ui.progress(_('rebuilding'), rev, total=repolen,
286 285 unit=_('changesets'))
287 286
288 287 ctx = repo[rev]
289 288 for f in ctx.files():
290 289 # This is to minimize I/O.
291 290 if f in seenfiles:
292 291 continue
293 292 seenfiles.add(f)
294 293
295 294 i = 'data/%s.i' % f
296 295 d = 'data/%s.d' % f
297 296
298 297 if repo.store._exists(i):
299 298 newentries.add(i)
300 299 if repo.store._exists(d):
301 300 newentries.add(d)
302 301
303 302 ui.progress(_('rebuilding'), None)
304 303
305 304 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
306 305 for dir in util.dirs(seenfiles):
307 306 i = 'meta/%s/00manifest.i' % dir
308 307 d = 'meta/%s/00manifest.d' % dir
309 308
310 309 if repo.store._exists(i):
311 310 newentries.add(i)
312 311 if repo.store._exists(d):
313 312 newentries.add(d)
314 313
315 314 addcount = len(newentries - oldentries)
316 315 removecount = len(oldentries - newentries)
317 316 for p in sorted(oldentries - newentries):
318 317 ui.write(_('removing %s\n') % p)
319 318 for p in sorted(newentries - oldentries):
320 319 ui.write(_('adding %s\n') % p)
321 320
322 321 if addcount or removecount:
323 322 ui.write(_('%d items added, %d removed from fncache\n') %
324 323 (addcount, removecount))
325 324 fnc.entries = newentries
326 325 fnc._dirty = True
327 326
328 327 with repo.transaction('fncache') as tr:
329 328 fnc.write(tr)
330 329 else:
331 330 ui.write(_('fncache already up to date\n'))
332 331
333 332 def stripbmrevset(repo, mark):
334 333 """
335 334 The revset to strip when strip is called with -B mark
336 335
337 336 Needs to live here so extensions can use it and wrap it even when strip is
338 337 not enabled or not present on a box.
339 338 """
340 339 return repo.revs("ancestors(bookmark(%s)) - "
341 340 "ancestors(head() and not bookmark(%s)) - "
342 341 "ancestors(bookmark() and not bookmark(%s))",
343 342 mark, mark, mark)
344 343
345 344 def deleteobsmarkers(obsstore, indices):
346 345 """Delete some obsmarkers from obsstore and return how many were deleted
347 346
348 347 'indices' is a list of ints which are the indices
349 348 of the markers to be deleted.
350 349
351 350 Every invocation of this function completely rewrites the obsstore file,
352 351 skipping the markers we want to be removed. The new temporary file is
353 352 created, remaining markers are written there and on .close() this file
354 353 gets atomically renamed to obsstore, thus guaranteeing consistency."""
355 354 if not indices:
356 355 # we don't want to rewrite the obsstore with the same content
357 356 return
358 357
359 358 left = []
360 359 current = obsstore._all
361 360 n = 0
362 361 for i, m in enumerate(current):
363 362 if i in indices:
364 363 n += 1
365 364 continue
366 365 left.append(m)
367 366
368 367 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
369 368 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
370 369 newobsstorefile.write(bytes)
371 370 newobsstorefile.close()
372 371 return n
General Comments 0
You need to be logged in to leave comments. Login now