##// END OF EJS Templates
strip: respect the backup option in stripcallback...
Jun Wu -
r33108:208de153 default
parent child Browse files
Show More
@@ -1,433 +1,433 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import short
16 16 from . import (
17 17 bundle2,
18 18 changegroup,
19 19 discovery,
20 20 error,
21 21 exchange,
22 22 obsolete,
23 23 util,
24 24 )
25 25
26 26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 27 """create a bundle with the specified revisions as a backup"""
28 28
29 29 backupdir = "strip-backup"
30 30 vfs = repo.vfs
31 31 if not vfs.isdir(backupdir):
32 32 vfs.mkdir(backupdir)
33 33
34 34 # Include a hash of all the nodes in the filename for uniqueness
35 35 allcommits = repo.set('%ln::%ln', bases, heads)
36 36 allhashes = sorted(c.hex() for c in allcommits)
37 37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 39
40 40 cgversion = changegroup.safeversion(repo)
41 41 comp = None
42 42 if cgversion != '01':
43 43 bundletype = "HG20"
44 44 if compress:
45 45 comp = 'BZ'
46 46 elif compress:
47 47 bundletype = "HG10BZ"
48 48 else:
49 49 bundletype = "HG10UN"
50 50
51 51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 52 contentopts = {
53 53 'cg.version': cgversion,
54 54 'obsolescence': obsolescence,
55 55 'phases': True,
56 56 }
57 57 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 58 outgoing, contentopts, vfs, compression=comp)
59 59
60 60 def _collectfiles(repo, striprev):
61 61 """find out the filelogs affected by the strip"""
62 62 files = set()
63 63
64 64 for x in xrange(striprev, len(repo)):
65 65 files.update(repo[x].files())
66 66
67 67 return sorted(files)
68 68
69 69 def _collectbrokencsets(repo, files, striprev):
70 70 """return the changesets which will be broken by the truncation"""
71 71 s = set()
72 72 def collectone(revlog):
73 73 _, brokenset = revlog.getstrippoint(striprev)
74 74 s.update([revlog.linkrev(r) for r in brokenset])
75 75
76 76 collectone(repo.manifestlog._revlog)
77 77 for fname in files:
78 78 collectone(repo.file(fname))
79 79
80 80 return s
81 81
82 82 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 83 # This function requires the caller to lock the repo, but it operates
84 84 # within a transaction of its own, and thus requires there to be no current
85 85 # transaction when it is called.
86 86 if repo.currenttransaction() is not None:
87 87 raise error.ProgrammingError('cannot strip from inside a transaction')
88 88
89 89 # Simple way to maintain backwards compatibility for this
90 90 # argument.
91 91 if backup in ['none', 'strip']:
92 92 backup = False
93 93
94 94 repo = repo.unfiltered()
95 95 repo.destroying()
96 96
97 97 cl = repo.changelog
98 98 # TODO handle undo of merge sets
99 99 if isinstance(nodelist, str):
100 100 nodelist = [nodelist]
101 101 striplist = [cl.rev(node) for node in nodelist]
102 102 striprev = min(striplist)
103 103
104 104 files = _collectfiles(repo, striprev)
105 105 saverevs = _collectbrokencsets(repo, files, striprev)
106 106
107 107 # Some revisions with rev > striprev may not be descendants of striprev.
108 108 # We have to find these revisions and put them in a bundle, so that
109 109 # we can restore them after the truncations.
110 110 # To create the bundle we use repo.changegroupsubset which requires
111 111 # the list of heads and bases of the set of interesting revisions.
112 112 # (head = revision in the set that has no descendant in the set;
113 113 # base = revision in the set that has no ancestor in the set)
114 114 tostrip = set(striplist)
115 115 saveheads = set(saverevs)
116 116 for r in cl.revs(start=striprev + 1):
117 117 if any(p in tostrip for p in cl.parentrevs(r)):
118 118 tostrip.add(r)
119 119
120 120 if r not in tostrip:
121 121 saverevs.add(r)
122 122 saveheads.difference_update(cl.parentrevs(r))
123 123 saveheads.add(r)
124 124 saveheads = [cl.node(r) for r in saveheads]
125 125
126 126 # compute base nodes
127 127 if saverevs:
128 128 descendants = set(cl.descendants(saverevs))
129 129 saverevs.difference_update(descendants)
130 130 savebases = [cl.node(r) for r in saverevs]
131 131 stripbases = [cl.node(r) for r in tostrip]
132 132
133 133 stripobsidx = obsmarkers = ()
134 134 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
135 135 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
136 136 if obsmarkers:
137 137 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 138 if m in obsmarkers]
139 139
140 140 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 141 # is much faster
142 142 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 143 if newbmtarget:
144 144 newbmtarget = repo[newbmtarget.first()].node()
145 145 else:
146 146 newbmtarget = '.'
147 147
148 148 bm = repo._bookmarks
149 149 updatebm = []
150 150 for m in bm:
151 151 rev = repo[bm[m]].rev()
152 152 if rev in tostrip:
153 153 updatebm.append(m)
154 154
155 155 # create a changegroup for all the branches we need to keep
156 156 backupfile = None
157 157 vfs = repo.vfs
158 158 node = nodelist[-1]
159 159 if backup:
160 160 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 161 repo.ui.status(_("saved backup bundle to %s\n") %
162 162 vfs.join(backupfile))
163 163 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 164 vfs.join(backupfile))
165 165 tmpbundlefile = None
166 166 if saveheads:
167 167 # do not compress temporary bundle if we remove it from disk later
168 168 #
169 169 # We do not include obsolescence, it might re-introduce prune markers
170 170 # we are trying to strip. This is harmless since the stripped markers
171 171 # are already backed up and we did not touched the markers for the
172 172 # saved changesets.
173 173 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 174 compress=False, obsolescence=False)
175 175
176 176 mfst = repo.manifestlog._revlog
177 177
178 178 try:
179 179 with repo.transaction("strip") as tr:
180 180 offset = len(tr.entries)
181 181
182 182 tr.startgroup()
183 183 cl.strip(striprev, tr)
184 184 mfst.strip(striprev, tr)
185 185 striptrees(repo, tr, striprev, files)
186 186
187 187 for fn in files:
188 188 repo.file(fn).strip(striprev, tr)
189 189 tr.endgroup()
190 190
191 191 for i in xrange(offset, len(tr.entries)):
192 192 file, troffset, ignore = tr.entries[i]
193 193 with repo.svfs(file, 'a', checkambig=True) as fp:
194 194 fp.truncate(troffset)
195 195 if troffset == 0:
196 196 repo.store.markremoved(file)
197 197
198 198 deleteobsmarkers(repo.obsstore, stripobsidx)
199 199 del repo.obsstore
200 200
201 201 repo._phasecache.filterunknown(repo)
202 202 if tmpbundlefile:
203 203 ui.note(_("adding branch\n"))
204 204 f = vfs.open(tmpbundlefile, "rb")
205 205 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 206 if not repo.ui.verbose:
207 207 # silence internal shuffling chatter
208 208 repo.ui.pushbuffer()
209 209 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 210 txnname = 'strip'
211 211 if not isinstance(gen, bundle2.unbundle20):
212 212 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 213 with repo.transaction(txnname) as tr:
214 214 bundle2.applybundle(repo, gen, tr, source='strip',
215 215 url=tmpbundleurl, emptyok=True)
216 216 if not repo.ui.verbose:
217 217 repo.ui.popbuffer()
218 218 f.close()
219 219 repo._phasecache.invalidate()
220 220
221 221 for m in updatebm:
222 222 bm[m] = repo[newbmtarget].node()
223 223
224 224 with repo.transaction('repair') as tr:
225 225 bm.recordchange(tr)
226 226
227 227 # remove undo files
228 228 for undovfs, undofile in repo.undofiles():
229 229 try:
230 230 undovfs.unlink(undofile)
231 231 except OSError as e:
232 232 if e.errno != errno.ENOENT:
233 233 ui.warn(_('error removing %s: %s\n') %
234 234 (undovfs.join(undofile), str(e)))
235 235
236 236 except: # re-raises
237 237 if backupfile:
238 238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 239 % vfs.join(backupfile))
240 240 if tmpbundlefile:
241 241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 242 % vfs.join(tmpbundlefile))
243 243 ui.warn(_("(fix the problem, then recover the changesets with "
244 244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 245 raise
246 246 else:
247 247 if tmpbundlefile:
248 248 # Remove temporary bundle only if there were no exceptions
249 249 vfs.unlink(tmpbundlefile)
250 250
251 251 repo.destroyed()
252 252 # return the backup file path (or None if 'backup' was False) so
253 253 # extensions can use it
254 254 return backupfile
255 255
256 256 def safestriproots(ui, repo, nodes):
257 257 """return list of roots of nodes where descendants are covered by nodes"""
258 258 torev = repo.unfiltered().changelog.rev
259 259 revs = set(torev(n) for n in nodes)
260 260 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 261 # orphaned = affected - wanted
262 262 # affected = descendants(roots(wanted))
263 263 # wanted = revs
264 264 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 265 notstrip = revs - tostrip
266 266 if notstrip:
267 267 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 268 ui.warn(_('warning: orphaned descendants detected, '
269 269 'not stripping %s\n') % nodestr)
270 270 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271 271
272 272 class stripcallback(object):
273 273 """used as a transaction postclose callback"""
274 274
275 275 def __init__(self, ui, repo, backup, topic):
276 276 self.ui = ui
277 277 self.repo = repo
278 278 self.backup = backup
279 279 self.topic = topic or 'backup'
280 280 self.nodelist = []
281 281
282 282 def addnodes(self, nodes):
283 283 self.nodelist.extend(nodes)
284 284
285 285 def __call__(self, tr):
286 286 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 287 if roots:
288 strip(self.ui, self.repo, roots, True, self.topic)
288 strip(self.ui, self.repo, roots, self.backup, self.topic)
289 289
290 290 def delayedstrip(ui, repo, nodelist, topic=None):
291 291 """like strip, but works inside transaction and won't strip irreverent revs
292 292
293 293 nodelist must explicitly contain all descendants. Otherwise a warning will
294 294 be printed that some nodes are not stripped.
295 295
296 296 Always do a backup. The last non-None "topic" will be used as the backup
297 297 topic name. The default backup topic name is "backup".
298 298 """
299 299 tr = repo.currenttransaction()
300 300 if not tr:
301 301 nodes = safestriproots(ui, repo, nodelist)
302 302 return strip(ui, repo, nodes, True, topic)
303 303 # transaction postclose callbacks are called in alphabet order.
304 304 # use '\xff' as prefix so we are likely to be called last.
305 305 callback = tr.getpostclose('\xffstrip')
306 306 if callback is None:
307 307 callback = stripcallback(ui, repo, True, topic)
308 308 tr.addpostclose('\xffstrip', callback)
309 309 if topic:
310 310 callback.topic = topic
311 311 callback.addnodes(nodelist)
312 312
313 313 def striptrees(repo, tr, striprev, files):
314 314 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 315 # otherwise
316 316 for unencoded, encoded, size in repo.store.datafiles():
317 317 if (unencoded.startswith('meta/') and
318 318 unencoded.endswith('00manifest.i')):
319 319 dir = unencoded[5:-12]
320 320 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321 321
322 322 def rebuildfncache(ui, repo):
323 323 """Rebuilds the fncache file from repo history.
324 324
325 325 Missing entries will be added. Extra entries will be removed.
326 326 """
327 327 repo = repo.unfiltered()
328 328
329 329 if 'fncache' not in repo.requirements:
330 330 ui.warn(_('(not rebuilding fncache because repository does not '
331 331 'support fncache)\n'))
332 332 return
333 333
334 334 with repo.lock():
335 335 fnc = repo.store.fncache
336 336 # Trigger load of fncache.
337 337 if 'irrelevant' in fnc:
338 338 pass
339 339
340 340 oldentries = set(fnc.entries)
341 341 newentries = set()
342 342 seenfiles = set()
343 343
344 344 repolen = len(repo)
345 345 for rev in repo:
346 346 ui.progress(_('rebuilding'), rev, total=repolen,
347 347 unit=_('changesets'))
348 348
349 349 ctx = repo[rev]
350 350 for f in ctx.files():
351 351 # This is to minimize I/O.
352 352 if f in seenfiles:
353 353 continue
354 354 seenfiles.add(f)
355 355
356 356 i = 'data/%s.i' % f
357 357 d = 'data/%s.d' % f
358 358
359 359 if repo.store._exists(i):
360 360 newentries.add(i)
361 361 if repo.store._exists(d):
362 362 newentries.add(d)
363 363
364 364 ui.progress(_('rebuilding'), None)
365 365
366 366 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 367 for dir in util.dirs(seenfiles):
368 368 i = 'meta/%s/00manifest.i' % dir
369 369 d = 'meta/%s/00manifest.d' % dir
370 370
371 371 if repo.store._exists(i):
372 372 newentries.add(i)
373 373 if repo.store._exists(d):
374 374 newentries.add(d)
375 375
376 376 addcount = len(newentries - oldentries)
377 377 removecount = len(oldentries - newentries)
378 378 for p in sorted(oldentries - newentries):
379 379 ui.write(_('removing %s\n') % p)
380 380 for p in sorted(newentries - oldentries):
381 381 ui.write(_('adding %s\n') % p)
382 382
383 383 if addcount or removecount:
384 384 ui.write(_('%d items added, %d removed from fncache\n') %
385 385 (addcount, removecount))
386 386 fnc.entries = newentries
387 387 fnc._dirty = True
388 388
389 389 with repo.transaction('fncache') as tr:
390 390 fnc.write(tr)
391 391 else:
392 392 ui.write(_('fncache already up to date\n'))
393 393
394 394 def stripbmrevset(repo, mark):
395 395 """
396 396 The revset to strip when strip is called with -B mark
397 397
398 398 Needs to live here so extensions can use it and wrap it even when strip is
399 399 not enabled or not present on a box.
400 400 """
401 401 return repo.revs("ancestors(bookmark(%s)) - "
402 402 "ancestors(head() and not bookmark(%s)) - "
403 403 "ancestors(bookmark() and not bookmark(%s))",
404 404 mark, mark, mark)
405 405
406 406 def deleteobsmarkers(obsstore, indices):
407 407 """Delete some obsmarkers from obsstore and return how many were deleted
408 408
409 409 'indices' is a list of ints which are the indices
410 410 of the markers to be deleted.
411 411
412 412 Every invocation of this function completely rewrites the obsstore file,
413 413 skipping the markers we want to be removed. The new temporary file is
414 414 created, remaining markers are written there and on .close() this file
415 415 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 416 if not indices:
417 417 # we don't want to rewrite the obsstore with the same content
418 418 return
419 419
420 420 left = []
421 421 current = obsstore._all
422 422 n = 0
423 423 for i, m in enumerate(current):
424 424 if i in indices:
425 425 n += 1
426 426 continue
427 427 left.append(m)
428 428
429 429 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 430 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 431 newobsstorefile.write(bytes)
432 432 newobsstorefile.close()
433 433 return n
General Comments 0
You need to be logged in to leave comments. Login now