##// END OF EJS Templates
repair: open a file with checkambig=True to avoid file stat ambiguity...
FUJIWARA Katsunori -
r30001:e38d85be default
parent child Browse files
Show More
@@ -1,358 +1,359 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 error,
20 20 exchange,
21 21 obsolete,
22 22 util,
23 23 )
24 24
25 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 26 """create a bundle with the specified revisions as a backup"""
27 27 cgversion = changegroup.safeversion(repo)
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifest)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77 # This function operates within a transaction of its own, but does
78 78 # not take any lock on the repo.
79 79 # Simple way to maintain backwards compatibility for this
80 80 # argument.
81 81 if backup in ['none', 'strip']:
82 82 backup = False
83 83
84 84 repo = repo.unfiltered()
85 85 repo.destroying()
86 86
87 87 cl = repo.changelog
88 88 # TODO handle undo of merge sets
89 89 if isinstance(nodelist, str):
90 90 nodelist = [nodelist]
91 91 striplist = [cl.rev(node) for node in nodelist]
92 92 striprev = min(striplist)
93 93
94 94 # Some revisions with rev > striprev may not be descendants of striprev.
95 95 # We have to find these revisions and put them in a bundle, so that
96 96 # we can restore them after the truncations.
97 97 # To create the bundle we use repo.changegroupsubset which requires
98 98 # the list of heads and bases of the set of interesting revisions.
99 99 # (head = revision in the set that has no descendant in the set;
100 100 # base = revision in the set that has no ancestor in the set)
101 101 tostrip = set(striplist)
102 102 for rev in striplist:
103 103 for desc in cl.descendants([rev]):
104 104 tostrip.add(desc)
105 105
106 106 files = _collectfiles(repo, striprev)
107 107 saverevs = _collectbrokencsets(repo, files, striprev)
108 108
109 109 # compute heads
110 110 saveheads = set(saverevs)
111 111 for r in xrange(striprev + 1, len(cl)):
112 112 if r not in tostrip:
113 113 saverevs.add(r)
114 114 saveheads.difference_update(cl.parentrevs(r))
115 115 saveheads.add(r)
116 116 saveheads = [cl.node(r) for r in saveheads]
117 117
118 118 # compute base nodes
119 119 if saverevs:
120 120 descendants = set(cl.descendants(saverevs))
121 121 saverevs.difference_update(descendants)
122 122 savebases = [cl.node(r) for r in saverevs]
123 123 stripbases = [cl.node(r) for r in tostrip]
124 124
125 125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 126 # is much faster
127 127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 128 if newbmtarget:
129 129 newbmtarget = repo[newbmtarget.first()].node()
130 130 else:
131 131 newbmtarget = '.'
132 132
133 133 bm = repo._bookmarks
134 134 updatebm = []
135 135 for m in bm:
136 136 rev = repo[bm[m]].rev()
137 137 if rev in tostrip:
138 138 updatebm.append(m)
139 139
140 140 # create a changegroup for all the branches we need to keep
141 141 backupfile = None
142 142 vfs = repo.vfs
143 143 node = nodelist[-1]
144 144 if backup:
145 145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 146 repo.ui.status(_("saved backup bundle to %s\n") %
147 147 vfs.join(backupfile))
148 148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 149 vfs.join(backupfile))
150 150 tmpbundlefile = None
151 151 if saveheads:
152 152 # do not compress temporary bundle if we remove it from disk later
153 153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 154 compress=False)
155 155
156 156 mfst = repo.manifest
157 157
158 158 curtr = repo.currenttransaction()
159 159 if curtr is not None:
160 160 del curtr # avoid carrying reference to transaction for nothing
161 161 msg = _('programming error: cannot strip from inside a transaction')
162 162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163 163
164 164 try:
165 165 with repo.transaction("strip") as tr:
166 166 offset = len(tr.entries)
167 167
168 168 tr.startgroup()
169 169 cl.strip(striprev, tr)
170 170 mfst.strip(striprev, tr)
171 171 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 172 # otherwise
173 173 for unencoded, encoded, size in repo.store.datafiles():
174 174 if (unencoded.startswith('meta/') and
175 175 unencoded.endswith('00manifest.i')):
176 176 dir = unencoded[5:-12]
177 177 repo.manifest.dirlog(dir).strip(striprev, tr)
178 178 for fn in files:
179 179 repo.file(fn).strip(striprev, tr)
180 180 tr.endgroup()
181 181
182 182 for i in xrange(offset, len(tr.entries)):
183 183 file, troffset, ignore = tr.entries[i]
184 repo.svfs(file, 'a').truncate(troffset)
184 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
185 186 if troffset == 0:
186 187 repo.store.markremoved(file)
187 188
188 189 if tmpbundlefile:
189 190 ui.note(_("adding branch\n"))
190 191 f = vfs.open(tmpbundlefile, "rb")
191 192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 193 if not repo.ui.verbose:
193 194 # silence internal shuffling chatter
194 195 repo.ui.pushbuffer()
195 196 if isinstance(gen, bundle2.unbundle20):
196 197 with repo.transaction('strip') as tr:
197 198 tr.hookargs = {'source': 'strip',
198 199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 200 bundle2.applybundle(repo, gen, tr, source='strip',
200 201 url='bundle:' + vfs.join(tmpbundlefile))
201 202 else:
202 203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 204 True)
204 205 if not repo.ui.verbose:
205 206 repo.ui.popbuffer()
206 207 f.close()
207 208 repo._phasecache.invalidate()
208 209
209 210 for m in updatebm:
210 211 bm[m] = repo[newbmtarget].node()
211 212 lock = tr = None
212 213 try:
213 214 lock = repo.lock()
214 215 tr = repo.transaction('repair')
215 216 bm.recordchange(tr)
216 217 tr.close()
217 218 finally:
218 219 tr.release()
219 220 lock.release()
220 221
221 222 # remove undo files
222 223 for undovfs, undofile in repo.undofiles():
223 224 try:
224 225 undovfs.unlink(undofile)
225 226 except OSError as e:
226 227 if e.errno != errno.ENOENT:
227 228 ui.warn(_('error removing %s: %s\n') %
228 229 (undovfs.join(undofile), str(e)))
229 230
230 231 except: # re-raises
231 232 if backupfile:
232 233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 234 % vfs.join(backupfile))
234 235 if tmpbundlefile:
235 236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 237 % vfs.join(tmpbundlefile))
237 238 ui.warn(_("(fix the problem, then recover the changesets with "
238 239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 240 raise
240 241 else:
241 242 if tmpbundlefile:
242 243 # Remove temporary bundle only if there were no exceptions
243 244 vfs.unlink(tmpbundlefile)
244 245
245 246 repo.destroyed()
246 247
247 248 def rebuildfncache(ui, repo):
248 249 """Rebuilds the fncache file from repo history.
249 250
250 251 Missing entries will be added. Extra entries will be removed.
251 252 """
252 253 repo = repo.unfiltered()
253 254
254 255 if 'fncache' not in repo.requirements:
255 256 ui.warn(_('(not rebuilding fncache because repository does not '
256 257 'support fncache)\n'))
257 258 return
258 259
259 260 with repo.lock():
260 261 fnc = repo.store.fncache
261 262 # Trigger load of fncache.
262 263 if 'irrelevant' in fnc:
263 264 pass
264 265
265 266 oldentries = set(fnc.entries)
266 267 newentries = set()
267 268 seenfiles = set()
268 269
269 270 repolen = len(repo)
270 271 for rev in repo:
271 272 ui.progress(_('rebuilding'), rev, total=repolen,
272 273 unit=_('changesets'))
273 274
274 275 ctx = repo[rev]
275 276 for f in ctx.files():
276 277 # This is to minimize I/O.
277 278 if f in seenfiles:
278 279 continue
279 280 seenfiles.add(f)
280 281
281 282 i = 'data/%s.i' % f
282 283 d = 'data/%s.d' % f
283 284
284 285 if repo.store._exists(i):
285 286 newentries.add(i)
286 287 if repo.store._exists(d):
287 288 newentries.add(d)
288 289
289 290 ui.progress(_('rebuilding'), None)
290 291
291 292 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
292 293 for dir in util.dirs(seenfiles):
293 294 i = 'meta/%s/00manifest.i' % dir
294 295 d = 'meta/%s/00manifest.d' % dir
295 296
296 297 if repo.store._exists(i):
297 298 newentries.add(i)
298 299 if repo.store._exists(d):
299 300 newentries.add(d)
300 301
301 302 addcount = len(newentries - oldentries)
302 303 removecount = len(oldentries - newentries)
303 304 for p in sorted(oldentries - newentries):
304 305 ui.write(_('removing %s\n') % p)
305 306 for p in sorted(newentries - oldentries):
306 307 ui.write(_('adding %s\n') % p)
307 308
308 309 if addcount or removecount:
309 310 ui.write(_('%d items added, %d removed from fncache\n') %
310 311 (addcount, removecount))
311 312 fnc.entries = newentries
312 313 fnc._dirty = True
313 314
314 315 with repo.transaction('fncache') as tr:
315 316 fnc.write(tr)
316 317 else:
317 318 ui.write(_('fncache already up to date\n'))
318 319
319 320 def stripbmrevset(repo, mark):
320 321 """
321 322 The revset to strip when strip is called with -B mark
322 323
323 324 Needs to live here so extensions can use it and wrap it even when strip is
324 325 not enabled or not present on a box.
325 326 """
326 327 return repo.revs("ancestors(bookmark(%s)) - "
327 328 "ancestors(head() and not bookmark(%s)) - "
328 329 "ancestors(bookmark() and not bookmark(%s))",
329 330 mark, mark, mark)
330 331
331 332 def deleteobsmarkers(obsstore, indices):
332 333 """Delete some obsmarkers from obsstore and return how many were deleted
333 334
334 335 'indices' is a list of ints which are the indices
335 336 of the markers to be deleted.
336 337
337 338 Every invocation of this function completely rewrites the obsstore file,
338 339 skipping the markers we want to be removed. The new temporary file is
339 340 created, remaining markers are written there and on .close() this file
340 341 gets atomically renamed to obsstore, thus guaranteeing consistency."""
341 342 if not indices:
342 343 # we don't want to rewrite the obsstore with the same content
343 344 return
344 345
345 346 left = []
346 347 current = obsstore._all
347 348 n = 0
348 349 for i, m in enumerate(current):
349 350 if i in indices:
350 351 n += 1
351 352 continue
352 353 left.append(m)
353 354
354 355 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
355 356 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
356 357 newobsstorefile.write(bytes)
357 358 newobsstorefile.close()
358 359 return n
General Comments 0
You need to be logged in to leave comments. Login now