##// END OF EJS Templates
strip: move tree strip logic to it's own function...
Durham Goode -
r32196:a2be2abe default
parent child Browse files
Show More
@@ -1,354 +1,358 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifestlog._revlog)
70 collectone(repo.manifestlog._revlog)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 files = _collectfiles(repo, striprev)
94 files = _collectfiles(repo, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
95 saverevs = _collectbrokencsets(repo, files, striprev)
96
96
97 # Some revisions with rev > striprev may not be descendants of striprev.
97 # Some revisions with rev > striprev may not be descendants of striprev.
98 # We have to find these revisions and put them in a bundle, so that
98 # We have to find these revisions and put them in a bundle, so that
99 # we can restore them after the truncations.
99 # we can restore them after the truncations.
100 # To create the bundle we use repo.changegroupsubset which requires
100 # To create the bundle we use repo.changegroupsubset which requires
101 # the list of heads and bases of the set of interesting revisions.
101 # the list of heads and bases of the set of interesting revisions.
102 # (head = revision in the set that has no descendant in the set;
102 # (head = revision in the set that has no descendant in the set;
103 # base = revision in the set that has no ancestor in the set)
103 # base = revision in the set that has no ancestor in the set)
104 tostrip = set(striplist)
104 tostrip = set(striplist)
105 saveheads = set(saverevs)
105 saveheads = set(saverevs)
106 for r in cl.revs(start=striprev + 1):
106 for r in cl.revs(start=striprev + 1):
107 if any(p in tostrip for p in cl.parentrevs(r)):
107 if any(p in tostrip for p in cl.parentrevs(r)):
108 tostrip.add(r)
108 tostrip.add(r)
109
109
110 if r not in tostrip:
110 if r not in tostrip:
111 saverevs.add(r)
111 saverevs.add(r)
112 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.add(r)
113 saveheads.add(r)
114 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
115
115
116 # compute base nodes
116 # compute base nodes
117 if saverevs:
117 if saverevs:
118 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
119 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
120 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
121 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
122
122
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # is much faster
124 # is much faster
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 if newbmtarget:
126 if newbmtarget:
127 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
128 else:
128 else:
129 newbmtarget = '.'
129 newbmtarget = '.'
130
130
131 bm = repo._bookmarks
131 bm = repo._bookmarks
132 updatebm = []
132 updatebm = []
133 for m in bm:
133 for m in bm:
134 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
135 if rev in tostrip:
135 if rev in tostrip:
136 updatebm.append(m)
136 updatebm.append(m)
137
137
138 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
139 backupfile = None
139 backupfile = None
140 vfs = repo.vfs
140 vfs = repo.vfs
141 node = nodelist[-1]
141 node = nodelist[-1]
142 if backup:
142 if backup:
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
145 vfs.join(backupfile))
145 vfs.join(backupfile))
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 tmpbundlefile = None
148 tmpbundlefile = None
149 if saveheads:
149 if saveheads:
150 # do not compress temporary bundle if we remove it from disk later
150 # do not compress temporary bundle if we remove it from disk later
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
151 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
152 compress=False)
153
153
154 mfst = repo.manifestlog._revlog
154 mfst = repo.manifestlog._revlog
155
155
156 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
157 if curtr is not None:
157 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
159 raise error.ProgrammingError('cannot strip from inside a transaction')
159 raise error.ProgrammingError('cannot strip from inside a transaction')
160
160
161 try:
161 try:
162 with repo.transaction("strip") as tr:
162 with repo.transaction("strip") as tr:
163 offset = len(tr.entries)
163 offset = len(tr.entries)
164
164
165 tr.startgroup()
165 tr.startgroup()
166 cl.strip(striprev, tr)
166 cl.strip(striprev, tr)
167 mfst.strip(striprev, tr)
167 mfst.strip(striprev, tr)
168 if 'treemanifest' in repo.requirements: # safe but unnecessary
168 striptrees(repo, tr, striprev, files)
169 # otherwise
169
170 for unencoded, encoded, size in repo.store.datafiles():
171 if (unencoded.startswith('meta/') and
172 unencoded.endswith('00manifest.i')):
173 dir = unencoded[5:-12]
174 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
175 for fn in files:
170 for fn in files:
176 repo.file(fn).strip(striprev, tr)
171 repo.file(fn).strip(striprev, tr)
177 tr.endgroup()
172 tr.endgroup()
178
173
179 for i in xrange(offset, len(tr.entries)):
174 for i in xrange(offset, len(tr.entries)):
180 file, troffset, ignore = tr.entries[i]
175 file, troffset, ignore = tr.entries[i]
181 with repo.svfs(file, 'a', checkambig=True) as fp:
176 with repo.svfs(file, 'a', checkambig=True) as fp:
182 fp.truncate(troffset)
177 fp.truncate(troffset)
183 if troffset == 0:
178 if troffset == 0:
184 repo.store.markremoved(file)
179 repo.store.markremoved(file)
185
180
186 if tmpbundlefile:
181 if tmpbundlefile:
187 ui.note(_("adding branch\n"))
182 ui.note(_("adding branch\n"))
188 f = vfs.open(tmpbundlefile, "rb")
183 f = vfs.open(tmpbundlefile, "rb")
189 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
184 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
190 if not repo.ui.verbose:
185 if not repo.ui.verbose:
191 # silence internal shuffling chatter
186 # silence internal shuffling chatter
192 repo.ui.pushbuffer()
187 repo.ui.pushbuffer()
193 if isinstance(gen, bundle2.unbundle20):
188 if isinstance(gen, bundle2.unbundle20):
194 with repo.transaction('strip') as tr:
189 with repo.transaction('strip') as tr:
195 tr.hookargs = {'source': 'strip',
190 tr.hookargs = {'source': 'strip',
196 'url': 'bundle:' + vfs.join(tmpbundlefile)}
191 'url': 'bundle:' + vfs.join(tmpbundlefile)}
197 bundle2.applybundle(repo, gen, tr, source='strip',
192 bundle2.applybundle(repo, gen, tr, source='strip',
198 url='bundle:' + vfs.join(tmpbundlefile))
193 url='bundle:' + vfs.join(tmpbundlefile))
199 else:
194 else:
200 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
195 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
201 True)
196 True)
202 if not repo.ui.verbose:
197 if not repo.ui.verbose:
203 repo.ui.popbuffer()
198 repo.ui.popbuffer()
204 f.close()
199 f.close()
205 repo._phasecache.invalidate()
200 repo._phasecache.invalidate()
206
201
207 for m in updatebm:
202 for m in updatebm:
208 bm[m] = repo[newbmtarget].node()
203 bm[m] = repo[newbmtarget].node()
209
204
210 with repo.lock():
205 with repo.lock():
211 with repo.transaction('repair') as tr:
206 with repo.transaction('repair') as tr:
212 bm.recordchange(tr)
207 bm.recordchange(tr)
213
208
214 # remove undo files
209 # remove undo files
215 for undovfs, undofile in repo.undofiles():
210 for undovfs, undofile in repo.undofiles():
216 try:
211 try:
217 undovfs.unlink(undofile)
212 undovfs.unlink(undofile)
218 except OSError as e:
213 except OSError as e:
219 if e.errno != errno.ENOENT:
214 if e.errno != errno.ENOENT:
220 ui.warn(_('error removing %s: %s\n') %
215 ui.warn(_('error removing %s: %s\n') %
221 (undovfs.join(undofile), str(e)))
216 (undovfs.join(undofile), str(e)))
222
217
223 except: # re-raises
218 except: # re-raises
224 if backupfile:
219 if backupfile:
225 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
220 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
226 % vfs.join(backupfile))
221 % vfs.join(backupfile))
227 if tmpbundlefile:
222 if tmpbundlefile:
228 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
223 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
229 % vfs.join(tmpbundlefile))
224 % vfs.join(tmpbundlefile))
230 ui.warn(_("(fix the problem, then recover the changesets with "
225 ui.warn(_("(fix the problem, then recover the changesets with "
231 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
226 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
232 raise
227 raise
233 else:
228 else:
234 if tmpbundlefile:
229 if tmpbundlefile:
235 # Remove temporary bundle only if there were no exceptions
230 # Remove temporary bundle only if there were no exceptions
236 vfs.unlink(tmpbundlefile)
231 vfs.unlink(tmpbundlefile)
237
232
238 repo.destroyed()
233 repo.destroyed()
239 # return the backup file path (or None if 'backup' was False) so
234 # return the backup file path (or None if 'backup' was False) so
240 # extensions can use it
235 # extensions can use it
241 return backupfile
236 return backupfile
242
237
238 def striptrees(repo, tr, striprev, files):
239 if 'treemanifest' in repo.requirements: # safe but unnecessary
240 # otherwise
241 for unencoded, encoded, size in repo.store.datafiles():
242 if (unencoded.startswith('meta/') and
243 unencoded.endswith('00manifest.i')):
244 dir = unencoded[5:-12]
245 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
246
243 def rebuildfncache(ui, repo):
247 def rebuildfncache(ui, repo):
244 """Rebuilds the fncache file from repo history.
248 """Rebuilds the fncache file from repo history.
245
249
246 Missing entries will be added. Extra entries will be removed.
250 Missing entries will be added. Extra entries will be removed.
247 """
251 """
248 repo = repo.unfiltered()
252 repo = repo.unfiltered()
249
253
250 if 'fncache' not in repo.requirements:
254 if 'fncache' not in repo.requirements:
251 ui.warn(_('(not rebuilding fncache because repository does not '
255 ui.warn(_('(not rebuilding fncache because repository does not '
252 'support fncache)\n'))
256 'support fncache)\n'))
253 return
257 return
254
258
255 with repo.lock():
259 with repo.lock():
256 fnc = repo.store.fncache
260 fnc = repo.store.fncache
257 # Trigger load of fncache.
261 # Trigger load of fncache.
258 if 'irrelevant' in fnc:
262 if 'irrelevant' in fnc:
259 pass
263 pass
260
264
261 oldentries = set(fnc.entries)
265 oldentries = set(fnc.entries)
262 newentries = set()
266 newentries = set()
263 seenfiles = set()
267 seenfiles = set()
264
268
265 repolen = len(repo)
269 repolen = len(repo)
266 for rev in repo:
270 for rev in repo:
267 ui.progress(_('rebuilding'), rev, total=repolen,
271 ui.progress(_('rebuilding'), rev, total=repolen,
268 unit=_('changesets'))
272 unit=_('changesets'))
269
273
270 ctx = repo[rev]
274 ctx = repo[rev]
271 for f in ctx.files():
275 for f in ctx.files():
272 # This is to minimize I/O.
276 # This is to minimize I/O.
273 if f in seenfiles:
277 if f in seenfiles:
274 continue
278 continue
275 seenfiles.add(f)
279 seenfiles.add(f)
276
280
277 i = 'data/%s.i' % f
281 i = 'data/%s.i' % f
278 d = 'data/%s.d' % f
282 d = 'data/%s.d' % f
279
283
280 if repo.store._exists(i):
284 if repo.store._exists(i):
281 newentries.add(i)
285 newentries.add(i)
282 if repo.store._exists(d):
286 if repo.store._exists(d):
283 newentries.add(d)
287 newentries.add(d)
284
288
285 ui.progress(_('rebuilding'), None)
289 ui.progress(_('rebuilding'), None)
286
290
287 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
291 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
288 for dir in util.dirs(seenfiles):
292 for dir in util.dirs(seenfiles):
289 i = 'meta/%s/00manifest.i' % dir
293 i = 'meta/%s/00manifest.i' % dir
290 d = 'meta/%s/00manifest.d' % dir
294 d = 'meta/%s/00manifest.d' % dir
291
295
292 if repo.store._exists(i):
296 if repo.store._exists(i):
293 newentries.add(i)
297 newentries.add(i)
294 if repo.store._exists(d):
298 if repo.store._exists(d):
295 newentries.add(d)
299 newentries.add(d)
296
300
297 addcount = len(newentries - oldentries)
301 addcount = len(newentries - oldentries)
298 removecount = len(oldentries - newentries)
302 removecount = len(oldentries - newentries)
299 for p in sorted(oldentries - newentries):
303 for p in sorted(oldentries - newentries):
300 ui.write(_('removing %s\n') % p)
304 ui.write(_('removing %s\n') % p)
301 for p in sorted(newentries - oldentries):
305 for p in sorted(newentries - oldentries):
302 ui.write(_('adding %s\n') % p)
306 ui.write(_('adding %s\n') % p)
303
307
304 if addcount or removecount:
308 if addcount or removecount:
305 ui.write(_('%d items added, %d removed from fncache\n') %
309 ui.write(_('%d items added, %d removed from fncache\n') %
306 (addcount, removecount))
310 (addcount, removecount))
307 fnc.entries = newentries
311 fnc.entries = newentries
308 fnc._dirty = True
312 fnc._dirty = True
309
313
310 with repo.transaction('fncache') as tr:
314 with repo.transaction('fncache') as tr:
311 fnc.write(tr)
315 fnc.write(tr)
312 else:
316 else:
313 ui.write(_('fncache already up to date\n'))
317 ui.write(_('fncache already up to date\n'))
314
318
315 def stripbmrevset(repo, mark):
319 def stripbmrevset(repo, mark):
316 """
320 """
317 The revset to strip when strip is called with -B mark
321 The revset to strip when strip is called with -B mark
318
322
319 Needs to live here so extensions can use it and wrap it even when strip is
323 Needs to live here so extensions can use it and wrap it even when strip is
320 not enabled or not present on a box.
324 not enabled or not present on a box.
321 """
325 """
322 return repo.revs("ancestors(bookmark(%s)) - "
326 return repo.revs("ancestors(bookmark(%s)) - "
323 "ancestors(head() and not bookmark(%s)) - "
327 "ancestors(head() and not bookmark(%s)) - "
324 "ancestors(bookmark() and not bookmark(%s))",
328 "ancestors(bookmark() and not bookmark(%s))",
325 mark, mark, mark)
329 mark, mark, mark)
326
330
327 def deleteobsmarkers(obsstore, indices):
331 def deleteobsmarkers(obsstore, indices):
328 """Delete some obsmarkers from obsstore and return how many were deleted
332 """Delete some obsmarkers from obsstore and return how many were deleted
329
333
330 'indices' is a list of ints which are the indices
334 'indices' is a list of ints which are the indices
331 of the markers to be deleted.
335 of the markers to be deleted.
332
336
333 Every invocation of this function completely rewrites the obsstore file,
337 Every invocation of this function completely rewrites the obsstore file,
334 skipping the markers we want to be removed. The new temporary file is
338 skipping the markers we want to be removed. The new temporary file is
335 created, remaining markers are written there and on .close() this file
339 created, remaining markers are written there and on .close() this file
336 gets atomically renamed to obsstore, thus guaranteeing consistency."""
340 gets atomically renamed to obsstore, thus guaranteeing consistency."""
337 if not indices:
341 if not indices:
338 # we don't want to rewrite the obsstore with the same content
342 # we don't want to rewrite the obsstore with the same content
339 return
343 return
340
344
341 left = []
345 left = []
342 current = obsstore._all
346 current = obsstore._all
343 n = 0
347 n = 0
344 for i, m in enumerate(current):
348 for i, m in enumerate(current):
345 if i in indices:
349 if i in indices:
346 n += 1
350 n += 1
347 continue
351 continue
348 left.append(m)
352 left.append(m)
349
353
350 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
354 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
351 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
355 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
352 newobsstorefile.write(bytes)
356 newobsstorefile.write(bytes)
353 newobsstorefile.close()
357 newobsstorefile.close()
354 return n
358 return n
General Comments 0
You need to be logged in to leave comments. Login now