##// END OF EJS Templates
repair: build dirlogs using manifest, rather than repo shortcut method...
Augie Fackler -
r29708:d1c3721d default
parent child Browse files
Show More
@@ -1,354 +1,354 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 error,
20 20 exchange,
21 21 obsolete,
22 22 util,
23 23 )
24 24
25 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 26 """create a bundle with the specified revisions as a backup"""
27 27 cgversion = changegroup.safeversion(repo)
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifest)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77 # This function operates within a transaction of its own, but does
78 78 # not take any lock on the repo.
79 79 # Simple way to maintain backwards compatibility for this
80 80 # argument.
81 81 if backup in ['none', 'strip']:
82 82 backup = False
83 83
84 84 repo = repo.unfiltered()
85 85 repo.destroying()
86 86
87 87 cl = repo.changelog
88 88 # TODO handle undo of merge sets
89 89 if isinstance(nodelist, str):
90 90 nodelist = [nodelist]
91 91 striplist = [cl.rev(node) for node in nodelist]
92 92 striprev = min(striplist)
93 93
94 94 # Some revisions with rev > striprev may not be descendants of striprev.
95 95 # We have to find these revisions and put them in a bundle, so that
96 96 # we can restore them after the truncations.
97 97 # To create the bundle we use repo.changegroupsubset which requires
98 98 # the list of heads and bases of the set of interesting revisions.
99 99 # (head = revision in the set that has no descendant in the set;
100 100 # base = revision in the set that has no ancestor in the set)
101 101 tostrip = set(striplist)
102 102 for rev in striplist:
103 103 for desc in cl.descendants([rev]):
104 104 tostrip.add(desc)
105 105
106 106 files = _collectfiles(repo, striprev)
107 107 saverevs = _collectbrokencsets(repo, files, striprev)
108 108
109 109 # compute heads
110 110 saveheads = set(saverevs)
111 111 for r in xrange(striprev + 1, len(cl)):
112 112 if r not in tostrip:
113 113 saverevs.add(r)
114 114 saveheads.difference_update(cl.parentrevs(r))
115 115 saveheads.add(r)
116 116 saveheads = [cl.node(r) for r in saveheads]
117 117
118 118 # compute base nodes
119 119 if saverevs:
120 120 descendants = set(cl.descendants(saverevs))
121 121 saverevs.difference_update(descendants)
122 122 savebases = [cl.node(r) for r in saverevs]
123 123 stripbases = [cl.node(r) for r in tostrip]
124 124
125 125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 126 # is much faster
127 127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 128 if newbmtarget:
129 129 newbmtarget = repo[newbmtarget.first()].node()
130 130 else:
131 131 newbmtarget = '.'
132 132
133 133 bm = repo._bookmarks
134 134 updatebm = []
135 135 for m in bm:
136 136 rev = repo[bm[m]].rev()
137 137 if rev in tostrip:
138 138 updatebm.append(m)
139 139
140 140 # create a changegroup for all the branches we need to keep
141 141 backupfile = None
142 142 vfs = repo.vfs
143 143 node = nodelist[-1]
144 144 if backup:
145 145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 146 repo.ui.status(_("saved backup bundle to %s\n") %
147 147 vfs.join(backupfile))
148 148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 149 vfs.join(backupfile))
150 150 if saveheads or savebases:
151 151 # do not compress partial bundle if we remove it from disk later
152 152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 153 compress=False)
154 154
155 155 mfst = repo.manifest
156 156
157 157 curtr = repo.currenttransaction()
158 158 if curtr is not None:
159 159 del curtr # avoid carrying reference to transaction for nothing
160 160 msg = _('programming error: cannot strip from inside a transaction')
161 161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 162
163 163 try:
164 164 with repo.transaction("strip") as tr:
165 165 offset = len(tr.entries)
166 166
167 167 tr.startgroup()
168 168 cl.strip(striprev, tr)
169 169 mfst.strip(striprev, tr)
170 170 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 171 # otherwise
172 172 for unencoded, encoded, size in repo.store.datafiles():
173 173 if (unencoded.startswith('meta/') and
174 174 unencoded.endswith('00manifest.i')):
175 175 dir = unencoded[5:-12]
176 repo.dirlog(dir).strip(striprev, tr)
176 repo.manifest.dirlog(dir).strip(striprev, tr)
177 177 for fn in files:
178 178 repo.file(fn).strip(striprev, tr)
179 179 tr.endgroup()
180 180
181 181 for i in xrange(offset, len(tr.entries)):
182 182 file, troffset, ignore = tr.entries[i]
183 183 repo.svfs(file, 'a').truncate(troffset)
184 184 if troffset == 0:
185 185 repo.store.markremoved(file)
186 186
187 187 if saveheads or savebases:
188 188 ui.note(_("adding branch\n"))
189 189 f = vfs.open(chgrpfile, "rb")
190 190 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
191 191 if not repo.ui.verbose:
192 192 # silence internal shuffling chatter
193 193 repo.ui.pushbuffer()
194 194 if isinstance(gen, bundle2.unbundle20):
195 195 with repo.transaction('strip') as tr:
196 196 tr.hookargs = {'source': 'strip',
197 197 'url': 'bundle:' + vfs.join(chgrpfile)}
198 198 bundle2.applybundle(repo, gen, tr, source='strip',
199 199 url='bundle:' + vfs.join(chgrpfile))
200 200 else:
201 201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
202 202 if not repo.ui.verbose:
203 203 repo.ui.popbuffer()
204 204 f.close()
205 205 repo._phasecache.invalidate()
206 206
207 207 for m in updatebm:
208 208 bm[m] = repo[newbmtarget].node()
209 209 lock = tr = None
210 210 try:
211 211 lock = repo.lock()
212 212 tr = repo.transaction('repair')
213 213 bm.recordchange(tr)
214 214 tr.close()
215 215 finally:
216 216 tr.release()
217 217 lock.release()
218 218
219 219 # remove undo files
220 220 for undovfs, undofile in repo.undofiles():
221 221 try:
222 222 undovfs.unlink(undofile)
223 223 except OSError as e:
224 224 if e.errno != errno.ENOENT:
225 225 ui.warn(_('error removing %s: %s\n') %
226 226 (undovfs.join(undofile), str(e)))
227 227
228 228 except: # re-raises
229 229 if backupfile:
230 230 ui.warn(_("strip failed, full bundle stored in '%s'\n")
231 231 % vfs.join(backupfile))
232 232 elif saveheads:
233 233 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
234 234 % vfs.join(chgrpfile))
235 235 raise
236 236 else:
237 237 if saveheads or savebases:
238 238 # Remove partial backup only if there were no exceptions
239 239 vfs.unlink(chgrpfile)
240 240
241 241 repo.destroyed()
242 242
243 243 def rebuildfncache(ui, repo):
244 244 """Rebuilds the fncache file from repo history.
245 245
246 246 Missing entries will be added. Extra entries will be removed.
247 247 """
248 248 repo = repo.unfiltered()
249 249
250 250 if 'fncache' not in repo.requirements:
251 251 ui.warn(_('(not rebuilding fncache because repository does not '
252 252 'support fncache)\n'))
253 253 return
254 254
255 255 with repo.lock():
256 256 fnc = repo.store.fncache
257 257 # Trigger load of fncache.
258 258 if 'irrelevant' in fnc:
259 259 pass
260 260
261 261 oldentries = set(fnc.entries)
262 262 newentries = set()
263 263 seenfiles = set()
264 264
265 265 repolen = len(repo)
266 266 for rev in repo:
267 267 ui.progress(_('rebuilding'), rev, total=repolen,
268 268 unit=_('changesets'))
269 269
270 270 ctx = repo[rev]
271 271 for f in ctx.files():
272 272 # This is to minimize I/O.
273 273 if f in seenfiles:
274 274 continue
275 275 seenfiles.add(f)
276 276
277 277 i = 'data/%s.i' % f
278 278 d = 'data/%s.d' % f
279 279
280 280 if repo.store._exists(i):
281 281 newentries.add(i)
282 282 if repo.store._exists(d):
283 283 newentries.add(d)
284 284
285 285 ui.progress(_('rebuilding'), None)
286 286
287 287 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
288 288 for dir in util.dirs(seenfiles):
289 289 i = 'meta/%s/00manifest.i' % dir
290 290 d = 'meta/%s/00manifest.d' % dir
291 291
292 292 if repo.store._exists(i):
293 293 newentries.add(i)
294 294 if repo.store._exists(d):
295 295 newentries.add(d)
296 296
297 297 addcount = len(newentries - oldentries)
298 298 removecount = len(oldentries - newentries)
299 299 for p in sorted(oldentries - newentries):
300 300 ui.write(_('removing %s\n') % p)
301 301 for p in sorted(newentries - oldentries):
302 302 ui.write(_('adding %s\n') % p)
303 303
304 304 if addcount or removecount:
305 305 ui.write(_('%d items added, %d removed from fncache\n') %
306 306 (addcount, removecount))
307 307 fnc.entries = newentries
308 308 fnc._dirty = True
309 309
310 310 with repo.transaction('fncache') as tr:
311 311 fnc.write(tr)
312 312 else:
313 313 ui.write(_('fncache already up to date\n'))
314 314
315 315 def stripbmrevset(repo, mark):
316 316 """
317 317 The revset to strip when strip is called with -B mark
318 318
319 319 Needs to live here so extensions can use it and wrap it even when strip is
320 320 not enabled or not present on a box.
321 321 """
322 322 return repo.revs("ancestors(bookmark(%s)) - "
323 323 "ancestors(head() and not bookmark(%s)) - "
324 324 "ancestors(bookmark() and not bookmark(%s))",
325 325 mark, mark, mark)
326 326
327 327 def deleteobsmarkers(obsstore, indices):
328 328 """Delete some obsmarkers from obsstore and return how many were deleted
329 329
330 330 'indices' is a list of ints which are the indices
331 331 of the markers to be deleted.
332 332
333 333 Every invocation of this function completely rewrites the obsstore file,
334 334 skipping the markers we want to be removed. The new temporary file is
335 335 created, remaining markers are written there and on .close() this file
336 336 gets atomically renamed to obsstore, thus guaranteeing consistency."""
337 337 if not indices:
338 338 # we don't want to rewrite the obsstore with the same content
339 339 return
340 340
341 341 left = []
342 342 current = obsstore._all
343 343 n = 0
344 344 for i, m in enumerate(current):
345 345 if i in indices:
346 346 n += 1
347 347 continue
348 348 left.append(m)
349 349
350 350 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
351 351 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
352 352 newobsstorefile.write(bytes)
353 353 newobsstorefile.close()
354 354 return n
General Comments 0
You need to be logged in to leave comments. Login now