##// END OF EJS Templates
strip: respect the backup option in stripcallback...
Jun Wu -
r33108:208de153 default
parent child Browse files
Show More
@@ -1,433 +1,433 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 util,
23 util,
24 )
24 )
25
25
26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
26 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 """create a bundle with the specified revisions as a backup"""
27 """create a bundle with the specified revisions as a backup"""
28
28
29 backupdir = "strip-backup"
29 backupdir = "strip-backup"
30 vfs = repo.vfs
30 vfs = repo.vfs
31 if not vfs.isdir(backupdir):
31 if not vfs.isdir(backupdir):
32 vfs.mkdir(backupdir)
32 vfs.mkdir(backupdir)
33
33
34 # Include a hash of all the nodes in the filename for uniqueness
34 # Include a hash of all the nodes in the filename for uniqueness
35 allcommits = repo.set('%ln::%ln', bases, heads)
35 allcommits = repo.set('%ln::%ln', bases, heads)
36 allhashes = sorted(c.hex() for c in allcommits)
36 allhashes = sorted(c.hex() for c in allcommits)
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
37 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
38 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39
39
40 cgversion = changegroup.safeversion(repo)
40 cgversion = changegroup.safeversion(repo)
41 comp = None
41 comp = None
42 if cgversion != '01':
42 if cgversion != '01':
43 bundletype = "HG20"
43 bundletype = "HG20"
44 if compress:
44 if compress:
45 comp = 'BZ'
45 comp = 'BZ'
46 elif compress:
46 elif compress:
47 bundletype = "HG10BZ"
47 bundletype = "HG10BZ"
48 else:
48 else:
49 bundletype = "HG10UN"
49 bundletype = "HG10UN"
50
50
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
51 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 contentopts = {
52 contentopts = {
53 'cg.version': cgversion,
53 'cg.version': cgversion,
54 'obsolescence': obsolescence,
54 'obsolescence': obsolescence,
55 'phases': True,
55 'phases': True,
56 }
56 }
57 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
57 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 outgoing, contentopts, vfs, compression=comp)
58 outgoing, contentopts, vfs, compression=comp)
59
59
60 def _collectfiles(repo, striprev):
60 def _collectfiles(repo, striprev):
61 """find out the filelogs affected by the strip"""
61 """find out the filelogs affected by the strip"""
62 files = set()
62 files = set()
63
63
64 for x in xrange(striprev, len(repo)):
64 for x in xrange(striprev, len(repo)):
65 files.update(repo[x].files())
65 files.update(repo[x].files())
66
66
67 return sorted(files)
67 return sorted(files)
68
68
69 def _collectbrokencsets(repo, files, striprev):
69 def _collectbrokencsets(repo, files, striprev):
70 """return the changesets which will be broken by the truncation"""
70 """return the changesets which will be broken by the truncation"""
71 s = set()
71 s = set()
72 def collectone(revlog):
72 def collectone(revlog):
73 _, brokenset = revlog.getstrippoint(striprev)
73 _, brokenset = revlog.getstrippoint(striprev)
74 s.update([revlog.linkrev(r) for r in brokenset])
74 s.update([revlog.linkrev(r) for r in brokenset])
75
75
76 collectone(repo.manifestlog._revlog)
76 collectone(repo.manifestlog._revlog)
77 for fname in files:
77 for fname in files:
78 collectone(repo.file(fname))
78 collectone(repo.file(fname))
79
79
80 return s
80 return s
81
81
82 def strip(ui, repo, nodelist, backup=True, topic='backup'):
82 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 # This function requires the caller to lock the repo, but it operates
83 # This function requires the caller to lock the repo, but it operates
84 # within a transaction of its own, and thus requires there to be no current
84 # within a transaction of its own, and thus requires there to be no current
85 # transaction when it is called.
85 # transaction when it is called.
86 if repo.currenttransaction() is not None:
86 if repo.currenttransaction() is not None:
87 raise error.ProgrammingError('cannot strip from inside a transaction')
87 raise error.ProgrammingError('cannot strip from inside a transaction')
88
88
89 # Simple way to maintain backwards compatibility for this
89 # Simple way to maintain backwards compatibility for this
90 # argument.
90 # argument.
91 if backup in ['none', 'strip']:
91 if backup in ['none', 'strip']:
92 backup = False
92 backup = False
93
93
94 repo = repo.unfiltered()
94 repo = repo.unfiltered()
95 repo.destroying()
95 repo.destroying()
96
96
97 cl = repo.changelog
97 cl = repo.changelog
98 # TODO handle undo of merge sets
98 # TODO handle undo of merge sets
99 if isinstance(nodelist, str):
99 if isinstance(nodelist, str):
100 nodelist = [nodelist]
100 nodelist = [nodelist]
101 striplist = [cl.rev(node) for node in nodelist]
101 striplist = [cl.rev(node) for node in nodelist]
102 striprev = min(striplist)
102 striprev = min(striplist)
103
103
104 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
106
106
107 # Some revisions with rev > striprev may not be descendants of striprev.
107 # Some revisions with rev > striprev may not be descendants of striprev.
108 # We have to find these revisions and put them in a bundle, so that
108 # We have to find these revisions and put them in a bundle, so that
109 # we can restore them after the truncations.
109 # we can restore them after the truncations.
110 # To create the bundle we use repo.changegroupsubset which requires
110 # To create the bundle we use repo.changegroupsubset which requires
111 # the list of heads and bases of the set of interesting revisions.
111 # the list of heads and bases of the set of interesting revisions.
112 # (head = revision in the set that has no descendant in the set;
112 # (head = revision in the set that has no descendant in the set;
113 # base = revision in the set that has no ancestor in the set)
113 # base = revision in the set that has no ancestor in the set)
114 tostrip = set(striplist)
114 tostrip = set(striplist)
115 saveheads = set(saverevs)
115 saveheads = set(saverevs)
116 for r in cl.revs(start=striprev + 1):
116 for r in cl.revs(start=striprev + 1):
117 if any(p in tostrip for p in cl.parentrevs(r)):
117 if any(p in tostrip for p in cl.parentrevs(r)):
118 tostrip.add(r)
118 tostrip.add(r)
119
119
120 if r not in tostrip:
120 if r not in tostrip:
121 saverevs.add(r)
121 saverevs.add(r)
122 saveheads.difference_update(cl.parentrevs(r))
122 saveheads.difference_update(cl.parentrevs(r))
123 saveheads.add(r)
123 saveheads.add(r)
124 saveheads = [cl.node(r) for r in saveheads]
124 saveheads = [cl.node(r) for r in saveheads]
125
125
126 # compute base nodes
126 # compute base nodes
127 if saverevs:
127 if saverevs:
128 descendants = set(cl.descendants(saverevs))
128 descendants = set(cl.descendants(saverevs))
129 saverevs.difference_update(descendants)
129 saverevs.difference_update(descendants)
130 savebases = [cl.node(r) for r in saverevs]
130 savebases = [cl.node(r) for r in saverevs]
131 stripbases = [cl.node(r) for r in tostrip]
131 stripbases = [cl.node(r) for r in tostrip]
132
132
133 stripobsidx = obsmarkers = ()
133 stripobsidx = obsmarkers = ()
134 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
134 if repo.ui.configbool('devel', 'strip-obsmarkers', True):
135 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
135 obsmarkers = obsolete.exclusivemarkers(repo, stripbases)
136 if obsmarkers:
136 if obsmarkers:
137 stripobsidx = [i for i, m in enumerate(repo.obsstore)
137 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 if m in obsmarkers]
138 if m in obsmarkers]
139
139
140 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
140 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 # is much faster
141 # is much faster
142 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
142 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 if newbmtarget:
143 if newbmtarget:
144 newbmtarget = repo[newbmtarget.first()].node()
144 newbmtarget = repo[newbmtarget.first()].node()
145 else:
145 else:
146 newbmtarget = '.'
146 newbmtarget = '.'
147
147
148 bm = repo._bookmarks
148 bm = repo._bookmarks
149 updatebm = []
149 updatebm = []
150 for m in bm:
150 for m in bm:
151 rev = repo[bm[m]].rev()
151 rev = repo[bm[m]].rev()
152 if rev in tostrip:
152 if rev in tostrip:
153 updatebm.append(m)
153 updatebm.append(m)
154
154
155 # create a changegroup for all the branches we need to keep
155 # create a changegroup for all the branches we need to keep
156 backupfile = None
156 backupfile = None
157 vfs = repo.vfs
157 vfs = repo.vfs
158 node = nodelist[-1]
158 node = nodelist[-1]
159 if backup:
159 if backup:
160 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
160 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 repo.ui.status(_("saved backup bundle to %s\n") %
161 repo.ui.status(_("saved backup bundle to %s\n") %
162 vfs.join(backupfile))
162 vfs.join(backupfile))
163 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
163 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 vfs.join(backupfile))
164 vfs.join(backupfile))
165 tmpbundlefile = None
165 tmpbundlefile = None
166 if saveheads:
166 if saveheads:
167 # do not compress temporary bundle if we remove it from disk later
167 # do not compress temporary bundle if we remove it from disk later
168 #
168 #
169 # We do not include obsolescence, it might re-introduce prune markers
169 # We do not include obsolescence, it might re-introduce prune markers
170 # we are trying to strip. This is harmless since the stripped markers
170 # we are trying to strip. This is harmless since the stripped markers
171 # are already backed up and we did not touched the markers for the
171 # are already backed up and we did not touched the markers for the
172 # saved changesets.
172 # saved changesets.
173 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
173 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 compress=False, obsolescence=False)
174 compress=False, obsolescence=False)
175
175
176 mfst = repo.manifestlog._revlog
176 mfst = repo.manifestlog._revlog
177
177
178 try:
178 try:
179 with repo.transaction("strip") as tr:
179 with repo.transaction("strip") as tr:
180 offset = len(tr.entries)
180 offset = len(tr.entries)
181
181
182 tr.startgroup()
182 tr.startgroup()
183 cl.strip(striprev, tr)
183 cl.strip(striprev, tr)
184 mfst.strip(striprev, tr)
184 mfst.strip(striprev, tr)
185 striptrees(repo, tr, striprev, files)
185 striptrees(repo, tr, striprev, files)
186
186
187 for fn in files:
187 for fn in files:
188 repo.file(fn).strip(striprev, tr)
188 repo.file(fn).strip(striprev, tr)
189 tr.endgroup()
189 tr.endgroup()
190
190
191 for i in xrange(offset, len(tr.entries)):
191 for i in xrange(offset, len(tr.entries)):
192 file, troffset, ignore = tr.entries[i]
192 file, troffset, ignore = tr.entries[i]
193 with repo.svfs(file, 'a', checkambig=True) as fp:
193 with repo.svfs(file, 'a', checkambig=True) as fp:
194 fp.truncate(troffset)
194 fp.truncate(troffset)
195 if troffset == 0:
195 if troffset == 0:
196 repo.store.markremoved(file)
196 repo.store.markremoved(file)
197
197
198 deleteobsmarkers(repo.obsstore, stripobsidx)
198 deleteobsmarkers(repo.obsstore, stripobsidx)
199 del repo.obsstore
199 del repo.obsstore
200
200
201 repo._phasecache.filterunknown(repo)
201 repo._phasecache.filterunknown(repo)
202 if tmpbundlefile:
202 if tmpbundlefile:
203 ui.note(_("adding branch\n"))
203 ui.note(_("adding branch\n"))
204 f = vfs.open(tmpbundlefile, "rb")
204 f = vfs.open(tmpbundlefile, "rb")
205 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
205 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 if not repo.ui.verbose:
206 if not repo.ui.verbose:
207 # silence internal shuffling chatter
207 # silence internal shuffling chatter
208 repo.ui.pushbuffer()
208 repo.ui.pushbuffer()
209 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
209 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 txnname = 'strip'
210 txnname = 'strip'
211 if not isinstance(gen, bundle2.unbundle20):
211 if not isinstance(gen, bundle2.unbundle20):
212 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
212 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 with repo.transaction(txnname) as tr:
213 with repo.transaction(txnname) as tr:
214 bundle2.applybundle(repo, gen, tr, source='strip',
214 bundle2.applybundle(repo, gen, tr, source='strip',
215 url=tmpbundleurl, emptyok=True)
215 url=tmpbundleurl, emptyok=True)
216 if not repo.ui.verbose:
216 if not repo.ui.verbose:
217 repo.ui.popbuffer()
217 repo.ui.popbuffer()
218 f.close()
218 f.close()
219 repo._phasecache.invalidate()
219 repo._phasecache.invalidate()
220
220
221 for m in updatebm:
221 for m in updatebm:
222 bm[m] = repo[newbmtarget].node()
222 bm[m] = repo[newbmtarget].node()
223
223
224 with repo.transaction('repair') as tr:
224 with repo.transaction('repair') as tr:
225 bm.recordchange(tr)
225 bm.recordchange(tr)
226
226
227 # remove undo files
227 # remove undo files
228 for undovfs, undofile in repo.undofiles():
228 for undovfs, undofile in repo.undofiles():
229 try:
229 try:
230 undovfs.unlink(undofile)
230 undovfs.unlink(undofile)
231 except OSError as e:
231 except OSError as e:
232 if e.errno != errno.ENOENT:
232 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
233 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
234 (undovfs.join(undofile), str(e)))
235
235
236 except: # re-raises
236 except: # re-raises
237 if backupfile:
237 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
239 % vfs.join(backupfile))
240 if tmpbundlefile:
240 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
242 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
243 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
245 raise
246 else:
246 else:
247 if tmpbundlefile:
247 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
248 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
249 vfs.unlink(tmpbundlefile)
250
250
251 repo.destroyed()
251 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
252 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
253 # extensions can use it
254 return backupfile
254 return backupfile
255
255
256 def safestriproots(ui, repo, nodes):
256 def safestriproots(ui, repo, nodes):
257 """return list of roots of nodes where descendants are covered by nodes"""
257 """return list of roots of nodes where descendants are covered by nodes"""
258 torev = repo.unfiltered().changelog.rev
258 torev = repo.unfiltered().changelog.rev
259 revs = set(torev(n) for n in nodes)
259 revs = set(torev(n) for n in nodes)
260 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
260 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # orphaned = affected - wanted
261 # orphaned = affected - wanted
262 # affected = descendants(roots(wanted))
262 # affected = descendants(roots(wanted))
263 # wanted = revs
263 # wanted = revs
264 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
264 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 notstrip = revs - tostrip
265 notstrip = revs - tostrip
266 if notstrip:
266 if notstrip:
267 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
267 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 ui.warn(_('warning: orphaned descendants detected, '
268 ui.warn(_('warning: orphaned descendants detected, '
269 'not stripping %s\n') % nodestr)
269 'not stripping %s\n') % nodestr)
270 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
270 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271
271
272 class stripcallback(object):
272 class stripcallback(object):
273 """used as a transaction postclose callback"""
273 """used as a transaction postclose callback"""
274
274
275 def __init__(self, ui, repo, backup, topic):
275 def __init__(self, ui, repo, backup, topic):
276 self.ui = ui
276 self.ui = ui
277 self.repo = repo
277 self.repo = repo
278 self.backup = backup
278 self.backup = backup
279 self.topic = topic or 'backup'
279 self.topic = topic or 'backup'
280 self.nodelist = []
280 self.nodelist = []
281
281
282 def addnodes(self, nodes):
282 def addnodes(self, nodes):
283 self.nodelist.extend(nodes)
283 self.nodelist.extend(nodes)
284
284
285 def __call__(self, tr):
285 def __call__(self, tr):
286 roots = safestriproots(self.ui, self.repo, self.nodelist)
286 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 if roots:
287 if roots:
288 strip(self.ui, self.repo, roots, True, self.topic)
288 strip(self.ui, self.repo, roots, self.backup, self.topic)
289
289
290 def delayedstrip(ui, repo, nodelist, topic=None):
290 def delayedstrip(ui, repo, nodelist, topic=None):
291 """like strip, but works inside transaction and won't strip irreverent revs
291 """like strip, but works inside transaction and won't strip irreverent revs
292
292
293 nodelist must explicitly contain all descendants. Otherwise a warning will
293 nodelist must explicitly contain all descendants. Otherwise a warning will
294 be printed that some nodes are not stripped.
294 be printed that some nodes are not stripped.
295
295
296 Always do a backup. The last non-None "topic" will be used as the backup
296 Always do a backup. The last non-None "topic" will be used as the backup
297 topic name. The default backup topic name is "backup".
297 topic name. The default backup topic name is "backup".
298 """
298 """
299 tr = repo.currenttransaction()
299 tr = repo.currenttransaction()
300 if not tr:
300 if not tr:
301 nodes = safestriproots(ui, repo, nodelist)
301 nodes = safestriproots(ui, repo, nodelist)
302 return strip(ui, repo, nodes, True, topic)
302 return strip(ui, repo, nodes, True, topic)
303 # transaction postclose callbacks are called in alphabet order.
303 # transaction postclose callbacks are called in alphabet order.
304 # use '\xff' as prefix so we are likely to be called last.
304 # use '\xff' as prefix so we are likely to be called last.
305 callback = tr.getpostclose('\xffstrip')
305 callback = tr.getpostclose('\xffstrip')
306 if callback is None:
306 if callback is None:
307 callback = stripcallback(ui, repo, True, topic)
307 callback = stripcallback(ui, repo, True, topic)
308 tr.addpostclose('\xffstrip', callback)
308 tr.addpostclose('\xffstrip', callback)
309 if topic:
309 if topic:
310 callback.topic = topic
310 callback.topic = topic
311 callback.addnodes(nodelist)
311 callback.addnodes(nodelist)
312
312
313 def striptrees(repo, tr, striprev, files):
313 def striptrees(repo, tr, striprev, files):
314 if 'treemanifest' in repo.requirements: # safe but unnecessary
314 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 # otherwise
315 # otherwise
316 for unencoded, encoded, size in repo.store.datafiles():
316 for unencoded, encoded, size in repo.store.datafiles():
317 if (unencoded.startswith('meta/') and
317 if (unencoded.startswith('meta/') and
318 unencoded.endswith('00manifest.i')):
318 unencoded.endswith('00manifest.i')):
319 dir = unencoded[5:-12]
319 dir = unencoded[5:-12]
320 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
320 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321
321
322 def rebuildfncache(ui, repo):
322 def rebuildfncache(ui, repo):
323 """Rebuilds the fncache file from repo history.
323 """Rebuilds the fncache file from repo history.
324
324
325 Missing entries will be added. Extra entries will be removed.
325 Missing entries will be added. Extra entries will be removed.
326 """
326 """
327 repo = repo.unfiltered()
327 repo = repo.unfiltered()
328
328
329 if 'fncache' not in repo.requirements:
329 if 'fncache' not in repo.requirements:
330 ui.warn(_('(not rebuilding fncache because repository does not '
330 ui.warn(_('(not rebuilding fncache because repository does not '
331 'support fncache)\n'))
331 'support fncache)\n'))
332 return
332 return
333
333
334 with repo.lock():
334 with repo.lock():
335 fnc = repo.store.fncache
335 fnc = repo.store.fncache
336 # Trigger load of fncache.
336 # Trigger load of fncache.
337 if 'irrelevant' in fnc:
337 if 'irrelevant' in fnc:
338 pass
338 pass
339
339
340 oldentries = set(fnc.entries)
340 oldentries = set(fnc.entries)
341 newentries = set()
341 newentries = set()
342 seenfiles = set()
342 seenfiles = set()
343
343
344 repolen = len(repo)
344 repolen = len(repo)
345 for rev in repo:
345 for rev in repo:
346 ui.progress(_('rebuilding'), rev, total=repolen,
346 ui.progress(_('rebuilding'), rev, total=repolen,
347 unit=_('changesets'))
347 unit=_('changesets'))
348
348
349 ctx = repo[rev]
349 ctx = repo[rev]
350 for f in ctx.files():
350 for f in ctx.files():
351 # This is to minimize I/O.
351 # This is to minimize I/O.
352 if f in seenfiles:
352 if f in seenfiles:
353 continue
353 continue
354 seenfiles.add(f)
354 seenfiles.add(f)
355
355
356 i = 'data/%s.i' % f
356 i = 'data/%s.i' % f
357 d = 'data/%s.d' % f
357 d = 'data/%s.d' % f
358
358
359 if repo.store._exists(i):
359 if repo.store._exists(i):
360 newentries.add(i)
360 newentries.add(i)
361 if repo.store._exists(d):
361 if repo.store._exists(d):
362 newentries.add(d)
362 newentries.add(d)
363
363
364 ui.progress(_('rebuilding'), None)
364 ui.progress(_('rebuilding'), None)
365
365
366 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
366 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 for dir in util.dirs(seenfiles):
367 for dir in util.dirs(seenfiles):
368 i = 'meta/%s/00manifest.i' % dir
368 i = 'meta/%s/00manifest.i' % dir
369 d = 'meta/%s/00manifest.d' % dir
369 d = 'meta/%s/00manifest.d' % dir
370
370
371 if repo.store._exists(i):
371 if repo.store._exists(i):
372 newentries.add(i)
372 newentries.add(i)
373 if repo.store._exists(d):
373 if repo.store._exists(d):
374 newentries.add(d)
374 newentries.add(d)
375
375
376 addcount = len(newentries - oldentries)
376 addcount = len(newentries - oldentries)
377 removecount = len(oldentries - newentries)
377 removecount = len(oldentries - newentries)
378 for p in sorted(oldentries - newentries):
378 for p in sorted(oldentries - newentries):
379 ui.write(_('removing %s\n') % p)
379 ui.write(_('removing %s\n') % p)
380 for p in sorted(newentries - oldentries):
380 for p in sorted(newentries - oldentries):
381 ui.write(_('adding %s\n') % p)
381 ui.write(_('adding %s\n') % p)
382
382
383 if addcount or removecount:
383 if addcount or removecount:
384 ui.write(_('%d items added, %d removed from fncache\n') %
384 ui.write(_('%d items added, %d removed from fncache\n') %
385 (addcount, removecount))
385 (addcount, removecount))
386 fnc.entries = newentries
386 fnc.entries = newentries
387 fnc._dirty = True
387 fnc._dirty = True
388
388
389 with repo.transaction('fncache') as tr:
389 with repo.transaction('fncache') as tr:
390 fnc.write(tr)
390 fnc.write(tr)
391 else:
391 else:
392 ui.write(_('fncache already up to date\n'))
392 ui.write(_('fncache already up to date\n'))
393
393
394 def stripbmrevset(repo, mark):
394 def stripbmrevset(repo, mark):
395 """
395 """
396 The revset to strip when strip is called with -B mark
396 The revset to strip when strip is called with -B mark
397
397
398 Needs to live here so extensions can use it and wrap it even when strip is
398 Needs to live here so extensions can use it and wrap it even when strip is
399 not enabled or not present on a box.
399 not enabled or not present on a box.
400 """
400 """
401 return repo.revs("ancestors(bookmark(%s)) - "
401 return repo.revs("ancestors(bookmark(%s)) - "
402 "ancestors(head() and not bookmark(%s)) - "
402 "ancestors(head() and not bookmark(%s)) - "
403 "ancestors(bookmark() and not bookmark(%s))",
403 "ancestors(bookmark() and not bookmark(%s))",
404 mark, mark, mark)
404 mark, mark, mark)
405
405
406 def deleteobsmarkers(obsstore, indices):
406 def deleteobsmarkers(obsstore, indices):
407 """Delete some obsmarkers from obsstore and return how many were deleted
407 """Delete some obsmarkers from obsstore and return how many were deleted
408
408
409 'indices' is a list of ints which are the indices
409 'indices' is a list of ints which are the indices
410 of the markers to be deleted.
410 of the markers to be deleted.
411
411
412 Every invocation of this function completely rewrites the obsstore file,
412 Every invocation of this function completely rewrites the obsstore file,
413 skipping the markers we want to be removed. The new temporary file is
413 skipping the markers we want to be removed. The new temporary file is
414 created, remaining markers are written there and on .close() this file
414 created, remaining markers are written there and on .close() this file
415 gets atomically renamed to obsstore, thus guaranteeing consistency."""
415 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 if not indices:
416 if not indices:
417 # we don't want to rewrite the obsstore with the same content
417 # we don't want to rewrite the obsstore with the same content
418 return
418 return
419
419
420 left = []
420 left = []
421 current = obsstore._all
421 current = obsstore._all
422 n = 0
422 n = 0
423 for i, m in enumerate(current):
423 for i, m in enumerate(current):
424 if i in indices:
424 if i in indices:
425 n += 1
425 n += 1
426 continue
426 continue
427 left.append(m)
427 left.append(m)
428
428
429 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
429 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
430 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 newobsstorefile.write(bytes)
431 newobsstorefile.write(bytes)
432 newobsstorefile.close()
432 newobsstorefile.close()
433 return n
433 return n
General Comments 0
You need to be logged in to leave comments. Login now