##// END OF EJS Templates
strip: simplify some repeated conditions...
Martin von Zweigbergk -
r29951:e7acbe53 default
parent child Browse files
Show More
@@ -1,354 +1,355
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 error,
20 20 exchange,
21 21 obsolete,
22 22 util,
23 23 )
24 24
25 25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 26 """create a bundle with the specified revisions as a backup"""
27 27 cgversion = changegroup.safeversion(repo)
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 allcommits = repo.set('%ln::%ln', bases, heads)
38 38 allhashes = sorted(c.hex() for c in allcommits)
39 39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41 41
42 42 comp = None
43 43 if cgversion != '01':
44 44 bundletype = "HG20"
45 45 if compress:
46 46 comp = 'BZ'
47 47 elif compress:
48 48 bundletype = "HG10BZ"
49 49 else:
50 50 bundletype = "HG10UN"
51 51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 52 compression=comp)
53 53
54 54 def _collectfiles(repo, striprev):
55 55 """find out the filelogs affected by the strip"""
56 56 files = set()
57 57
58 58 for x in xrange(striprev, len(repo)):
59 59 files.update(repo[x].files())
60 60
61 61 return sorted(files)
62 62
63 63 def _collectbrokencsets(repo, files, striprev):
64 64 """return the changesets which will be broken by the truncation"""
65 65 s = set()
66 66 def collectone(revlog):
67 67 _, brokenset = revlog.getstrippoint(striprev)
68 68 s.update([revlog.linkrev(r) for r in brokenset])
69 69
70 70 collectone(repo.manifest)
71 71 for fname in files:
72 72 collectone(repo.file(fname))
73 73
74 74 return s
75 75
76 76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 77 # This function operates within a transaction of its own, but does
78 78 # not take any lock on the repo.
79 79 # Simple way to maintain backwards compatibility for this
80 80 # argument.
81 81 if backup in ['none', 'strip']:
82 82 backup = False
83 83
84 84 repo = repo.unfiltered()
85 85 repo.destroying()
86 86
87 87 cl = repo.changelog
88 88 # TODO handle undo of merge sets
89 89 if isinstance(nodelist, str):
90 90 nodelist = [nodelist]
91 91 striplist = [cl.rev(node) for node in nodelist]
92 92 striprev = min(striplist)
93 93
94 94 # Some revisions with rev > striprev may not be descendants of striprev.
95 95 # We have to find these revisions and put them in a bundle, so that
96 96 # we can restore them after the truncations.
97 97 # To create the bundle we use repo.changegroupsubset which requires
98 98 # the list of heads and bases of the set of interesting revisions.
99 99 # (head = revision in the set that has no descendant in the set;
100 100 # base = revision in the set that has no ancestor in the set)
101 101 tostrip = set(striplist)
102 102 for rev in striplist:
103 103 for desc in cl.descendants([rev]):
104 104 tostrip.add(desc)
105 105
106 106 files = _collectfiles(repo, striprev)
107 107 saverevs = _collectbrokencsets(repo, files, striprev)
108 108
109 109 # compute heads
110 110 saveheads = set(saverevs)
111 111 for r in xrange(striprev + 1, len(cl)):
112 112 if r not in tostrip:
113 113 saverevs.add(r)
114 114 saveheads.difference_update(cl.parentrevs(r))
115 115 saveheads.add(r)
116 116 saveheads = [cl.node(r) for r in saveheads]
117 117
118 118 # compute base nodes
119 119 if saverevs:
120 120 descendants = set(cl.descendants(saverevs))
121 121 saverevs.difference_update(descendants)
122 122 savebases = [cl.node(r) for r in saverevs]
123 123 stripbases = [cl.node(r) for r in tostrip]
124 124
125 125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 126 # is much faster
127 127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 128 if newbmtarget:
129 129 newbmtarget = repo[newbmtarget.first()].node()
130 130 else:
131 131 newbmtarget = '.'
132 132
133 133 bm = repo._bookmarks
134 134 updatebm = []
135 135 for m in bm:
136 136 rev = repo[bm[m]].rev()
137 137 if rev in tostrip:
138 138 updatebm.append(m)
139 139
140 140 # create a changegroup for all the branches we need to keep
141 141 backupfile = None
142 142 vfs = repo.vfs
143 143 node = nodelist[-1]
144 144 if backup:
145 145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 146 repo.ui.status(_("saved backup bundle to %s\n") %
147 147 vfs.join(backupfile))
148 148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 149 vfs.join(backupfile))
150 if saveheads or savebases:
150 chgrpfile = None
151 if saveheads:
151 152 # do not compress partial bundle if we remove it from disk later
152 153 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
153 154 compress=False)
154 155
155 156 mfst = repo.manifest
156 157
157 158 curtr = repo.currenttransaction()
158 159 if curtr is not None:
159 160 del curtr # avoid carrying reference to transaction for nothing
160 161 msg = _('programming error: cannot strip from inside a transaction')
161 162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 163
163 164 try:
164 165 with repo.transaction("strip") as tr:
165 166 offset = len(tr.entries)
166 167
167 168 tr.startgroup()
168 169 cl.strip(striprev, tr)
169 170 mfst.strip(striprev, tr)
170 171 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 172 # otherwise
172 173 for unencoded, encoded, size in repo.store.datafiles():
173 174 if (unencoded.startswith('meta/') and
174 175 unencoded.endswith('00manifest.i')):
175 176 dir = unencoded[5:-12]
176 177 repo.manifest.dirlog(dir).strip(striprev, tr)
177 178 for fn in files:
178 179 repo.file(fn).strip(striprev, tr)
179 180 tr.endgroup()
180 181
181 182 for i in xrange(offset, len(tr.entries)):
182 183 file, troffset, ignore = tr.entries[i]
183 184 repo.svfs(file, 'a').truncate(troffset)
184 185 if troffset == 0:
185 186 repo.store.markremoved(file)
186 187
187 if saveheads or savebases:
188 if chgrpfile:
188 189 ui.note(_("adding branch\n"))
189 190 f = vfs.open(chgrpfile, "rb")
190 191 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
191 192 if not repo.ui.verbose:
192 193 # silence internal shuffling chatter
193 194 repo.ui.pushbuffer()
194 195 if isinstance(gen, bundle2.unbundle20):
195 196 with repo.transaction('strip') as tr:
196 197 tr.hookargs = {'source': 'strip',
197 198 'url': 'bundle:' + vfs.join(chgrpfile)}
198 199 bundle2.applybundle(repo, gen, tr, source='strip',
199 200 url='bundle:' + vfs.join(chgrpfile))
200 201 else:
201 202 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
202 203 if not repo.ui.verbose:
203 204 repo.ui.popbuffer()
204 205 f.close()
205 206 repo._phasecache.invalidate()
206 207
207 208 for m in updatebm:
208 209 bm[m] = repo[newbmtarget].node()
209 210 lock = tr = None
210 211 try:
211 212 lock = repo.lock()
212 213 tr = repo.transaction('repair')
213 214 bm.recordchange(tr)
214 215 tr.close()
215 216 finally:
216 217 tr.release()
217 218 lock.release()
218 219
219 220 # remove undo files
220 221 for undovfs, undofile in repo.undofiles():
221 222 try:
222 223 undovfs.unlink(undofile)
223 224 except OSError as e:
224 225 if e.errno != errno.ENOENT:
225 226 ui.warn(_('error removing %s: %s\n') %
226 227 (undovfs.join(undofile), str(e)))
227 228
228 229 except: # re-raises
229 230 if backupfile:
230 231 ui.warn(_("strip failed, full bundle stored in '%s'\n")
231 232 % vfs.join(backupfile))
232 elif saveheads:
233 elif chgrpfile:
233 234 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
234 235 % vfs.join(chgrpfile))
235 236 raise
236 237 else:
237 if saveheads or savebases:
238 if chgrpfile:
238 239 # Remove partial backup only if there were no exceptions
239 240 vfs.unlink(chgrpfile)
240 241
241 242 repo.destroyed()
242 243
243 244 def rebuildfncache(ui, repo):
244 245 """Rebuilds the fncache file from repo history.
245 246
246 247 Missing entries will be added. Extra entries will be removed.
247 248 """
248 249 repo = repo.unfiltered()
249 250
250 251 if 'fncache' not in repo.requirements:
251 252 ui.warn(_('(not rebuilding fncache because repository does not '
252 253 'support fncache)\n'))
253 254 return
254 255
255 256 with repo.lock():
256 257 fnc = repo.store.fncache
257 258 # Trigger load of fncache.
258 259 if 'irrelevant' in fnc:
259 260 pass
260 261
261 262 oldentries = set(fnc.entries)
262 263 newentries = set()
263 264 seenfiles = set()
264 265
265 266 repolen = len(repo)
266 267 for rev in repo:
267 268 ui.progress(_('rebuilding'), rev, total=repolen,
268 269 unit=_('changesets'))
269 270
270 271 ctx = repo[rev]
271 272 for f in ctx.files():
272 273 # This is to minimize I/O.
273 274 if f in seenfiles:
274 275 continue
275 276 seenfiles.add(f)
276 277
277 278 i = 'data/%s.i' % f
278 279 d = 'data/%s.d' % f
279 280
280 281 if repo.store._exists(i):
281 282 newentries.add(i)
282 283 if repo.store._exists(d):
283 284 newentries.add(d)
284 285
285 286 ui.progress(_('rebuilding'), None)
286 287
287 288 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
288 289 for dir in util.dirs(seenfiles):
289 290 i = 'meta/%s/00manifest.i' % dir
290 291 d = 'meta/%s/00manifest.d' % dir
291 292
292 293 if repo.store._exists(i):
293 294 newentries.add(i)
294 295 if repo.store._exists(d):
295 296 newentries.add(d)
296 297
297 298 addcount = len(newentries - oldentries)
298 299 removecount = len(oldentries - newentries)
299 300 for p in sorted(oldentries - newentries):
300 301 ui.write(_('removing %s\n') % p)
301 302 for p in sorted(newentries - oldentries):
302 303 ui.write(_('adding %s\n') % p)
303 304
304 305 if addcount or removecount:
305 306 ui.write(_('%d items added, %d removed from fncache\n') %
306 307 (addcount, removecount))
307 308 fnc.entries = newentries
308 309 fnc._dirty = True
309 310
310 311 with repo.transaction('fncache') as tr:
311 312 fnc.write(tr)
312 313 else:
313 314 ui.write(_('fncache already up to date\n'))
314 315
315 316 def stripbmrevset(repo, mark):
316 317 """
317 318 The revset to strip when strip is called with -B mark
318 319
319 320 Needs to live here so extensions can use it and wrap it even when strip is
320 321 not enabled or not present on a box.
321 322 """
322 323 return repo.revs("ancestors(bookmark(%s)) - "
323 324 "ancestors(head() and not bookmark(%s)) - "
324 325 "ancestors(bookmark() and not bookmark(%s))",
325 326 mark, mark, mark)
326 327
327 328 def deleteobsmarkers(obsstore, indices):
328 329 """Delete some obsmarkers from obsstore and return how many were deleted
329 330
330 331 'indices' is a list of ints which are the indices
331 332 of the markers to be deleted.
332 333
333 334 Every invocation of this function completely rewrites the obsstore file,
334 335 skipping the markers we want to be removed. The new temporary file is
335 336 created, remaining markers are written there and on .close() this file
336 337 gets atomically renamed to obsstore, thus guaranteeing consistency."""
337 338 if not indices:
338 339 # we don't want to rewrite the obsstore with the same content
339 340 return
340 341
341 342 left = []
342 343 current = obsstore._all
343 344 n = 0
344 345 for i, m in enumerate(current):
345 346 if i in indices:
346 347 n += 1
347 348 continue
348 349 left.append(m)
349 350
350 351 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
351 352 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
352 353 newobsstorefile.write(bytes)
353 354 newobsstorefile.close()
354 355 return n
General Comments 0
You need to be logged in to leave comments. Login now