##// END OF EJS Templates
repair: open a file with checkambig=True to avoid file stat ambiguity...
FUJIWARA Katsunori -
r30001:e38d85be default
parent child Browse files
Show More
@@ -1,358 +1,359 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for rev in striplist:
103 for desc in cl.descendants([rev]):
103 for desc in cl.descendants([rev]):
104 tostrip.add(desc)
104 tostrip.add(desc)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 tmpbundlefile = None
150 tmpbundlefile = None
151 if saveheads:
151 if saveheads:
152 # do not compress temporary bundle if we remove it from disk later
152 # do not compress temporary bundle if we remove it from disk later
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 compress=False)
154 compress=False)
155
155
156 mfst = repo.manifest
156 mfst = repo.manifest
157
157
158 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
159 if curtr is not None:
159 if curtr is not None:
160 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
161 msg = _('programming error: cannot strip from inside a transaction')
161 msg = _('programming error: cannot strip from inside a transaction')
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163
163
164 try:
164 try:
165 with repo.transaction("strip") as tr:
165 with repo.transaction("strip") as tr:
166 offset = len(tr.entries)
166 offset = len(tr.entries)
167
167
168 tr.startgroup()
168 tr.startgroup()
169 cl.strip(striprev, tr)
169 cl.strip(striprev, tr)
170 mfst.strip(striprev, tr)
170 mfst.strip(striprev, tr)
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 # otherwise
172 # otherwise
173 for unencoded, encoded, size in repo.store.datafiles():
173 for unencoded, encoded, size in repo.store.datafiles():
174 if (unencoded.startswith('meta/') and
174 if (unencoded.startswith('meta/') and
175 unencoded.endswith('00manifest.i')):
175 unencoded.endswith('00manifest.i')):
176 dir = unencoded[5:-12]
176 dir = unencoded[5:-12]
177 repo.manifest.dirlog(dir).strip(striprev, tr)
177 repo.manifest.dirlog(dir).strip(striprev, tr)
178 for fn in files:
178 for fn in files:
179 repo.file(fn).strip(striprev, tr)
179 repo.file(fn).strip(striprev, tr)
180 tr.endgroup()
180 tr.endgroup()
181
181
182 for i in xrange(offset, len(tr.entries)):
182 for i in xrange(offset, len(tr.entries)):
183 file, troffset, ignore = tr.entries[i]
183 file, troffset, ignore = tr.entries[i]
184 repo.svfs(file, 'a').truncate(troffset)
184 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
185 if troffset == 0:
186 if troffset == 0:
186 repo.store.markremoved(file)
187 repo.store.markremoved(file)
187
188
188 if tmpbundlefile:
189 if tmpbundlefile:
189 ui.note(_("adding branch\n"))
190 ui.note(_("adding branch\n"))
190 f = vfs.open(tmpbundlefile, "rb")
191 f = vfs.open(tmpbundlefile, "rb")
191 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 if not repo.ui.verbose:
193 if not repo.ui.verbose:
193 # silence internal shuffling chatter
194 # silence internal shuffling chatter
194 repo.ui.pushbuffer()
195 repo.ui.pushbuffer()
195 if isinstance(gen, bundle2.unbundle20):
196 if isinstance(gen, bundle2.unbundle20):
196 with repo.transaction('strip') as tr:
197 with repo.transaction('strip') as tr:
197 tr.hookargs = {'source': 'strip',
198 tr.hookargs = {'source': 'strip',
198 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 bundle2.applybundle(repo, gen, tr, source='strip',
200 bundle2.applybundle(repo, gen, tr, source='strip',
200 url='bundle:' + vfs.join(tmpbundlefile))
201 url='bundle:' + vfs.join(tmpbundlefile))
201 else:
202 else:
202 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 True)
204 True)
204 if not repo.ui.verbose:
205 if not repo.ui.verbose:
205 repo.ui.popbuffer()
206 repo.ui.popbuffer()
206 f.close()
207 f.close()
207 repo._phasecache.invalidate()
208 repo._phasecache.invalidate()
208
209
209 for m in updatebm:
210 for m in updatebm:
210 bm[m] = repo[newbmtarget].node()
211 bm[m] = repo[newbmtarget].node()
211 lock = tr = None
212 lock = tr = None
212 try:
213 try:
213 lock = repo.lock()
214 lock = repo.lock()
214 tr = repo.transaction('repair')
215 tr = repo.transaction('repair')
215 bm.recordchange(tr)
216 bm.recordchange(tr)
216 tr.close()
217 tr.close()
217 finally:
218 finally:
218 tr.release()
219 tr.release()
219 lock.release()
220 lock.release()
220
221
221 # remove undo files
222 # remove undo files
222 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
223 try:
224 try:
224 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
225 except OSError as e:
226 except OSError as e:
226 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
227 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
228 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
229
230
230 except: # re-raises
231 except: # re-raises
231 if backupfile:
232 if backupfile:
232 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 % vfs.join(backupfile))
234 % vfs.join(backupfile))
234 if tmpbundlefile:
235 if tmpbundlefile:
235 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
237 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
238 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 raise
240 raise
240 else:
241 else:
241 if tmpbundlefile:
242 if tmpbundlefile:
242 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
243 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
244
245
245 repo.destroyed()
246 repo.destroyed()
246
247
247 def rebuildfncache(ui, repo):
248 def rebuildfncache(ui, repo):
248 """Rebuilds the fncache file from repo history.
249 """Rebuilds the fncache file from repo history.
249
250
250 Missing entries will be added. Extra entries will be removed.
251 Missing entries will be added. Extra entries will be removed.
251 """
252 """
252 repo = repo.unfiltered()
253 repo = repo.unfiltered()
253
254
254 if 'fncache' not in repo.requirements:
255 if 'fncache' not in repo.requirements:
255 ui.warn(_('(not rebuilding fncache because repository does not '
256 ui.warn(_('(not rebuilding fncache because repository does not '
256 'support fncache)\n'))
257 'support fncache)\n'))
257 return
258 return
258
259
259 with repo.lock():
260 with repo.lock():
260 fnc = repo.store.fncache
261 fnc = repo.store.fncache
261 # Trigger load of fncache.
262 # Trigger load of fncache.
262 if 'irrelevant' in fnc:
263 if 'irrelevant' in fnc:
263 pass
264 pass
264
265
265 oldentries = set(fnc.entries)
266 oldentries = set(fnc.entries)
266 newentries = set()
267 newentries = set()
267 seenfiles = set()
268 seenfiles = set()
268
269
269 repolen = len(repo)
270 repolen = len(repo)
270 for rev in repo:
271 for rev in repo:
271 ui.progress(_('rebuilding'), rev, total=repolen,
272 ui.progress(_('rebuilding'), rev, total=repolen,
272 unit=_('changesets'))
273 unit=_('changesets'))
273
274
274 ctx = repo[rev]
275 ctx = repo[rev]
275 for f in ctx.files():
276 for f in ctx.files():
276 # This is to minimize I/O.
277 # This is to minimize I/O.
277 if f in seenfiles:
278 if f in seenfiles:
278 continue
279 continue
279 seenfiles.add(f)
280 seenfiles.add(f)
280
281
281 i = 'data/%s.i' % f
282 i = 'data/%s.i' % f
282 d = 'data/%s.d' % f
283 d = 'data/%s.d' % f
283
284
284 if repo.store._exists(i):
285 if repo.store._exists(i):
285 newentries.add(i)
286 newentries.add(i)
286 if repo.store._exists(d):
287 if repo.store._exists(d):
287 newentries.add(d)
288 newentries.add(d)
288
289
289 ui.progress(_('rebuilding'), None)
290 ui.progress(_('rebuilding'), None)
290
291
291 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
292 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
292 for dir in util.dirs(seenfiles):
293 for dir in util.dirs(seenfiles):
293 i = 'meta/%s/00manifest.i' % dir
294 i = 'meta/%s/00manifest.i' % dir
294 d = 'meta/%s/00manifest.d' % dir
295 d = 'meta/%s/00manifest.d' % dir
295
296
296 if repo.store._exists(i):
297 if repo.store._exists(i):
297 newentries.add(i)
298 newentries.add(i)
298 if repo.store._exists(d):
299 if repo.store._exists(d):
299 newentries.add(d)
300 newentries.add(d)
300
301
301 addcount = len(newentries - oldentries)
302 addcount = len(newentries - oldentries)
302 removecount = len(oldentries - newentries)
303 removecount = len(oldentries - newentries)
303 for p in sorted(oldentries - newentries):
304 for p in sorted(oldentries - newentries):
304 ui.write(_('removing %s\n') % p)
305 ui.write(_('removing %s\n') % p)
305 for p in sorted(newentries - oldentries):
306 for p in sorted(newentries - oldentries):
306 ui.write(_('adding %s\n') % p)
307 ui.write(_('adding %s\n') % p)
307
308
308 if addcount or removecount:
309 if addcount or removecount:
309 ui.write(_('%d items added, %d removed from fncache\n') %
310 ui.write(_('%d items added, %d removed from fncache\n') %
310 (addcount, removecount))
311 (addcount, removecount))
311 fnc.entries = newentries
312 fnc.entries = newentries
312 fnc._dirty = True
313 fnc._dirty = True
313
314
314 with repo.transaction('fncache') as tr:
315 with repo.transaction('fncache') as tr:
315 fnc.write(tr)
316 fnc.write(tr)
316 else:
317 else:
317 ui.write(_('fncache already up to date\n'))
318 ui.write(_('fncache already up to date\n'))
318
319
319 def stripbmrevset(repo, mark):
320 def stripbmrevset(repo, mark):
320 """
321 """
321 The revset to strip when strip is called with -B mark
322 The revset to strip when strip is called with -B mark
322
323
323 Needs to live here so extensions can use it and wrap it even when strip is
324 Needs to live here so extensions can use it and wrap it even when strip is
324 not enabled or not present on a box.
325 not enabled or not present on a box.
325 """
326 """
326 return repo.revs("ancestors(bookmark(%s)) - "
327 return repo.revs("ancestors(bookmark(%s)) - "
327 "ancestors(head() and not bookmark(%s)) - "
328 "ancestors(head() and not bookmark(%s)) - "
328 "ancestors(bookmark() and not bookmark(%s))",
329 "ancestors(bookmark() and not bookmark(%s))",
329 mark, mark, mark)
330 mark, mark, mark)
330
331
331 def deleteobsmarkers(obsstore, indices):
332 def deleteobsmarkers(obsstore, indices):
332 """Delete some obsmarkers from obsstore and return how many were deleted
333 """Delete some obsmarkers from obsstore and return how many were deleted
333
334
334 'indices' is a list of ints which are the indices
335 'indices' is a list of ints which are the indices
335 of the markers to be deleted.
336 of the markers to be deleted.
336
337
337 Every invocation of this function completely rewrites the obsstore file,
338 Every invocation of this function completely rewrites the obsstore file,
338 skipping the markers we want to be removed. The new temporary file is
339 skipping the markers we want to be removed. The new temporary file is
339 created, remaining markers are written there and on .close() this file
340 created, remaining markers are written there and on .close() this file
340 gets atomically renamed to obsstore, thus guaranteeing consistency."""
341 gets atomically renamed to obsstore, thus guaranteeing consistency."""
341 if not indices:
342 if not indices:
342 # we don't want to rewrite the obsstore with the same content
343 # we don't want to rewrite the obsstore with the same content
343 return
344 return
344
345
345 left = []
346 left = []
346 current = obsstore._all
347 current = obsstore._all
347 n = 0
348 n = 0
348 for i, m in enumerate(current):
349 for i, m in enumerate(current):
349 if i in indices:
350 if i in indices:
350 n += 1
351 n += 1
351 continue
352 continue
352 left.append(m)
353 left.append(m)
353
354
354 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
355 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
355 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
356 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
356 newobsstorefile.write(bytes)
357 newobsstorefile.write(bytes)
357 newobsstorefile.close()
358 newobsstorefile.close()
358 return n
359 return n
General Comments 0
You need to be logged in to leave comments. Login now