##// END OF EJS Templates
strip: simplify some repeated conditions...
Martin von Zweigbergk -
r29951:e7acbe53 default
parent child Browse files
Show More
@@ -1,354 +1,355
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for rev in striplist:
103 for desc in cl.descendants([rev]):
103 for desc in cl.descendants([rev]):
104 tostrip.add(desc)
104 tostrip.add(desc)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 if saveheads or savebases:
150 chgrpfile = None
151 if saveheads:
151 # do not compress partial bundle if we remove it from disk later
152 # do not compress partial bundle if we remove it from disk later
152 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 compress=False)
154 compress=False)
154
155
155 mfst = repo.manifest
156 mfst = repo.manifest
156
157
157 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
158 if curtr is not None:
159 if curtr is not None:
159 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
160 msg = _('programming error: cannot strip from inside a transaction')
161 msg = _('programming error: cannot strip from inside a transaction')
161 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162
163
163 try:
164 try:
164 with repo.transaction("strip") as tr:
165 with repo.transaction("strip") as tr:
165 offset = len(tr.entries)
166 offset = len(tr.entries)
166
167
167 tr.startgroup()
168 tr.startgroup()
168 cl.strip(striprev, tr)
169 cl.strip(striprev, tr)
169 mfst.strip(striprev, tr)
170 mfst.strip(striprev, tr)
170 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 # otherwise
172 # otherwise
172 for unencoded, encoded, size in repo.store.datafiles():
173 for unencoded, encoded, size in repo.store.datafiles():
173 if (unencoded.startswith('meta/') and
174 if (unencoded.startswith('meta/') and
174 unencoded.endswith('00manifest.i')):
175 unencoded.endswith('00manifest.i')):
175 dir = unencoded[5:-12]
176 dir = unencoded[5:-12]
176 repo.manifest.dirlog(dir).strip(striprev, tr)
177 repo.manifest.dirlog(dir).strip(striprev, tr)
177 for fn in files:
178 for fn in files:
178 repo.file(fn).strip(striprev, tr)
179 repo.file(fn).strip(striprev, tr)
179 tr.endgroup()
180 tr.endgroup()
180
181
181 for i in xrange(offset, len(tr.entries)):
182 for i in xrange(offset, len(tr.entries)):
182 file, troffset, ignore = tr.entries[i]
183 file, troffset, ignore = tr.entries[i]
183 repo.svfs(file, 'a').truncate(troffset)
184 repo.svfs(file, 'a').truncate(troffset)
184 if troffset == 0:
185 if troffset == 0:
185 repo.store.markremoved(file)
186 repo.store.markremoved(file)
186
187
187 if saveheads or savebases:
188 if chgrpfile:
188 ui.note(_("adding branch\n"))
189 ui.note(_("adding branch\n"))
189 f = vfs.open(chgrpfile, "rb")
190 f = vfs.open(chgrpfile, "rb")
190 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
191 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
191 if not repo.ui.verbose:
192 if not repo.ui.verbose:
192 # silence internal shuffling chatter
193 # silence internal shuffling chatter
193 repo.ui.pushbuffer()
194 repo.ui.pushbuffer()
194 if isinstance(gen, bundle2.unbundle20):
195 if isinstance(gen, bundle2.unbundle20):
195 with repo.transaction('strip') as tr:
196 with repo.transaction('strip') as tr:
196 tr.hookargs = {'source': 'strip',
197 tr.hookargs = {'source': 'strip',
197 'url': 'bundle:' + vfs.join(chgrpfile)}
198 'url': 'bundle:' + vfs.join(chgrpfile)}
198 bundle2.applybundle(repo, gen, tr, source='strip',
199 bundle2.applybundle(repo, gen, tr, source='strip',
199 url='bundle:' + vfs.join(chgrpfile))
200 url='bundle:' + vfs.join(chgrpfile))
200 else:
201 else:
201 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
202 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
202 if not repo.ui.verbose:
203 if not repo.ui.verbose:
203 repo.ui.popbuffer()
204 repo.ui.popbuffer()
204 f.close()
205 f.close()
205 repo._phasecache.invalidate()
206 repo._phasecache.invalidate()
206
207
207 for m in updatebm:
208 for m in updatebm:
208 bm[m] = repo[newbmtarget].node()
209 bm[m] = repo[newbmtarget].node()
209 lock = tr = None
210 lock = tr = None
210 try:
211 try:
211 lock = repo.lock()
212 lock = repo.lock()
212 tr = repo.transaction('repair')
213 tr = repo.transaction('repair')
213 bm.recordchange(tr)
214 bm.recordchange(tr)
214 tr.close()
215 tr.close()
215 finally:
216 finally:
216 tr.release()
217 tr.release()
217 lock.release()
218 lock.release()
218
219
219 # remove undo files
220 # remove undo files
220 for undovfs, undofile in repo.undofiles():
221 for undovfs, undofile in repo.undofiles():
221 try:
222 try:
222 undovfs.unlink(undofile)
223 undovfs.unlink(undofile)
223 except OSError as e:
224 except OSError as e:
224 if e.errno != errno.ENOENT:
225 if e.errno != errno.ENOENT:
225 ui.warn(_('error removing %s: %s\n') %
226 ui.warn(_('error removing %s: %s\n') %
226 (undovfs.join(undofile), str(e)))
227 (undovfs.join(undofile), str(e)))
227
228
228 except: # re-raises
229 except: # re-raises
229 if backupfile:
230 if backupfile:
230 ui.warn(_("strip failed, full bundle stored in '%s'\n")
231 ui.warn(_("strip failed, full bundle stored in '%s'\n")
231 % vfs.join(backupfile))
232 % vfs.join(backupfile))
232 elif saveheads:
233 elif chgrpfile:
233 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
234 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
234 % vfs.join(chgrpfile))
235 % vfs.join(chgrpfile))
235 raise
236 raise
236 else:
237 else:
237 if saveheads or savebases:
238 if chgrpfile:
238 # Remove partial backup only if there were no exceptions
239 # Remove partial backup only if there were no exceptions
239 vfs.unlink(chgrpfile)
240 vfs.unlink(chgrpfile)
240
241
241 repo.destroyed()
242 repo.destroyed()
242
243
243 def rebuildfncache(ui, repo):
244 def rebuildfncache(ui, repo):
244 """Rebuilds the fncache file from repo history.
245 """Rebuilds the fncache file from repo history.
245
246
246 Missing entries will be added. Extra entries will be removed.
247 Missing entries will be added. Extra entries will be removed.
247 """
248 """
248 repo = repo.unfiltered()
249 repo = repo.unfiltered()
249
250
250 if 'fncache' not in repo.requirements:
251 if 'fncache' not in repo.requirements:
251 ui.warn(_('(not rebuilding fncache because repository does not '
252 ui.warn(_('(not rebuilding fncache because repository does not '
252 'support fncache)\n'))
253 'support fncache)\n'))
253 return
254 return
254
255
255 with repo.lock():
256 with repo.lock():
256 fnc = repo.store.fncache
257 fnc = repo.store.fncache
257 # Trigger load of fncache.
258 # Trigger load of fncache.
258 if 'irrelevant' in fnc:
259 if 'irrelevant' in fnc:
259 pass
260 pass
260
261
261 oldentries = set(fnc.entries)
262 oldentries = set(fnc.entries)
262 newentries = set()
263 newentries = set()
263 seenfiles = set()
264 seenfiles = set()
264
265
265 repolen = len(repo)
266 repolen = len(repo)
266 for rev in repo:
267 for rev in repo:
267 ui.progress(_('rebuilding'), rev, total=repolen,
268 ui.progress(_('rebuilding'), rev, total=repolen,
268 unit=_('changesets'))
269 unit=_('changesets'))
269
270
270 ctx = repo[rev]
271 ctx = repo[rev]
271 for f in ctx.files():
272 for f in ctx.files():
272 # This is to minimize I/O.
273 # This is to minimize I/O.
273 if f in seenfiles:
274 if f in seenfiles:
274 continue
275 continue
275 seenfiles.add(f)
276 seenfiles.add(f)
276
277
277 i = 'data/%s.i' % f
278 i = 'data/%s.i' % f
278 d = 'data/%s.d' % f
279 d = 'data/%s.d' % f
279
280
280 if repo.store._exists(i):
281 if repo.store._exists(i):
281 newentries.add(i)
282 newentries.add(i)
282 if repo.store._exists(d):
283 if repo.store._exists(d):
283 newentries.add(d)
284 newentries.add(d)
284
285
285 ui.progress(_('rebuilding'), None)
286 ui.progress(_('rebuilding'), None)
286
287
287 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
288 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
288 for dir in util.dirs(seenfiles):
289 for dir in util.dirs(seenfiles):
289 i = 'meta/%s/00manifest.i' % dir
290 i = 'meta/%s/00manifest.i' % dir
290 d = 'meta/%s/00manifest.d' % dir
291 d = 'meta/%s/00manifest.d' % dir
291
292
292 if repo.store._exists(i):
293 if repo.store._exists(i):
293 newentries.add(i)
294 newentries.add(i)
294 if repo.store._exists(d):
295 if repo.store._exists(d):
295 newentries.add(d)
296 newentries.add(d)
296
297
297 addcount = len(newentries - oldentries)
298 addcount = len(newentries - oldentries)
298 removecount = len(oldentries - newentries)
299 removecount = len(oldentries - newentries)
299 for p in sorted(oldentries - newentries):
300 for p in sorted(oldentries - newentries):
300 ui.write(_('removing %s\n') % p)
301 ui.write(_('removing %s\n') % p)
301 for p in sorted(newentries - oldentries):
302 for p in sorted(newentries - oldentries):
302 ui.write(_('adding %s\n') % p)
303 ui.write(_('adding %s\n') % p)
303
304
304 if addcount or removecount:
305 if addcount or removecount:
305 ui.write(_('%d items added, %d removed from fncache\n') %
306 ui.write(_('%d items added, %d removed from fncache\n') %
306 (addcount, removecount))
307 (addcount, removecount))
307 fnc.entries = newentries
308 fnc.entries = newentries
308 fnc._dirty = True
309 fnc._dirty = True
309
310
310 with repo.transaction('fncache') as tr:
311 with repo.transaction('fncache') as tr:
311 fnc.write(tr)
312 fnc.write(tr)
312 else:
313 else:
313 ui.write(_('fncache already up to date\n'))
314 ui.write(_('fncache already up to date\n'))
314
315
315 def stripbmrevset(repo, mark):
316 def stripbmrevset(repo, mark):
316 """
317 """
317 The revset to strip when strip is called with -B mark
318 The revset to strip when strip is called with -B mark
318
319
319 Needs to live here so extensions can use it and wrap it even when strip is
320 Needs to live here so extensions can use it and wrap it even when strip is
320 not enabled or not present on a box.
321 not enabled or not present on a box.
321 """
322 """
322 return repo.revs("ancestors(bookmark(%s)) - "
323 return repo.revs("ancestors(bookmark(%s)) - "
323 "ancestors(head() and not bookmark(%s)) - "
324 "ancestors(head() and not bookmark(%s)) - "
324 "ancestors(bookmark() and not bookmark(%s))",
325 "ancestors(bookmark() and not bookmark(%s))",
325 mark, mark, mark)
326 mark, mark, mark)
326
327
327 def deleteobsmarkers(obsstore, indices):
328 def deleteobsmarkers(obsstore, indices):
328 """Delete some obsmarkers from obsstore and return how many were deleted
329 """Delete some obsmarkers from obsstore and return how many were deleted
329
330
330 'indices' is a list of ints which are the indices
331 'indices' is a list of ints which are the indices
331 of the markers to be deleted.
332 of the markers to be deleted.
332
333
333 Every invocation of this function completely rewrites the obsstore file,
334 Every invocation of this function completely rewrites the obsstore file,
334 skipping the markers we want to be removed. The new temporary file is
335 skipping the markers we want to be removed. The new temporary file is
335 created, remaining markers are written there and on .close() this file
336 created, remaining markers are written there and on .close() this file
336 gets atomically renamed to obsstore, thus guaranteeing consistency."""
337 gets atomically renamed to obsstore, thus guaranteeing consistency."""
337 if not indices:
338 if not indices:
338 # we don't want to rewrite the obsstore with the same content
339 # we don't want to rewrite the obsstore with the same content
339 return
340 return
340
341
341 left = []
342 left = []
342 current = obsstore._all
343 current = obsstore._all
343 n = 0
344 n = 0
344 for i, m in enumerate(current):
345 for i, m in enumerate(current):
345 if i in indices:
346 if i in indices:
346 n += 1
347 n += 1
347 continue
348 continue
348 left.append(m)
349 left.append(m)
349
350
350 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
351 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
351 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
352 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
352 newobsstorefile.write(bytes)
353 newobsstorefile.write(bytes)
353 newobsstorefile.close()
354 newobsstorefile.close()
354 return n
355 return n
General Comments 0
You need to be logged in to leave comments. Login now