##// END OF EJS Templates
repair: use progress helper...
Martin von Zweigbergk -
r38413:f0b0c853 default
parent child Browse files
Show More
@@ -1,435 +1,435
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 util,
27 util,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 stringutil,
30 stringutil,
31 )
31 )
32
32
33 def backupbundle(repo, bases, heads, node, suffix, compress=True,
33 def backupbundle(repo, bases, heads, node, suffix, compress=True,
34 obsolescence=True):
34 obsolescence=True):
35 """create a bundle with the specified revisions as a backup"""
35 """create a bundle with the specified revisions as a backup"""
36
36
37 backupdir = "strip-backup"
37 backupdir = "strip-backup"
38 vfs = repo.vfs
38 vfs = repo.vfs
39 if not vfs.isdir(backupdir):
39 if not vfs.isdir(backupdir):
40 vfs.mkdir(backupdir)
40 vfs.mkdir(backupdir)
41
41
42 # Include a hash of all the nodes in the filename for uniqueness
42 # Include a hash of all the nodes in the filename for uniqueness
43 allcommits = repo.set('%ln::%ln', bases, heads)
43 allcommits = repo.set('%ln::%ln', bases, heads)
44 allhashes = sorted(c.hex() for c in allcommits)
44 allhashes = sorted(c.hex() for c in allcommits)
45 totalhash = hashlib.sha1(''.join(allhashes)).digest()
45 totalhash = hashlib.sha1(''.join(allhashes)).digest()
46 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
46 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
47 hex(totalhash[:4]), suffix)
47 hex(totalhash[:4]), suffix)
48
48
49 cgversion = changegroup.localversion(repo)
49 cgversion = changegroup.localversion(repo)
50 comp = None
50 comp = None
51 if cgversion != '01':
51 if cgversion != '01':
52 bundletype = "HG20"
52 bundletype = "HG20"
53 if compress:
53 if compress:
54 comp = 'BZ'
54 comp = 'BZ'
55 elif compress:
55 elif compress:
56 bundletype = "HG10BZ"
56 bundletype = "HG10BZ"
57 else:
57 else:
58 bundletype = "HG10UN"
58 bundletype = "HG10UN"
59
59
60 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
60 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
61 contentopts = {
61 contentopts = {
62 'cg.version': cgversion,
62 'cg.version': cgversion,
63 'obsolescence': obsolescence,
63 'obsolescence': obsolescence,
64 'phases': True,
64 'phases': True,
65 }
65 }
66 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
66 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
67 outgoing, contentopts, vfs, compression=comp)
67 outgoing, contentopts, vfs, compression=comp)
68
68
69 def _collectfiles(repo, striprev):
69 def _collectfiles(repo, striprev):
70 """find out the filelogs affected by the strip"""
70 """find out the filelogs affected by the strip"""
71 files = set()
71 files = set()
72
72
73 for x in xrange(striprev, len(repo)):
73 for x in xrange(striprev, len(repo)):
74 files.update(repo[x].files())
74 files.update(repo[x].files())
75
75
76 return sorted(files)
76 return sorted(files)
77
77
78 def _collectrevlog(revlog, striprev):
78 def _collectrevlog(revlog, striprev):
79 _, brokenset = revlog.getstrippoint(striprev)
79 _, brokenset = revlog.getstrippoint(striprev)
80 return [revlog.linkrev(r) for r in brokenset]
80 return [revlog.linkrev(r) for r in brokenset]
81
81
82 def _collectmanifest(repo, striprev):
82 def _collectmanifest(repo, striprev):
83 return _collectrevlog(repo.manifestlog._revlog, striprev)
83 return _collectrevlog(repo.manifestlog._revlog, striprev)
84
84
85 def _collectbrokencsets(repo, files, striprev):
85 def _collectbrokencsets(repo, files, striprev):
86 """return the changesets which will be broken by the truncation"""
86 """return the changesets which will be broken by the truncation"""
87 s = set()
87 s = set()
88
88
89 s.update(_collectmanifest(repo, striprev))
89 s.update(_collectmanifest(repo, striprev))
90 for fname in files:
90 for fname in files:
91 s.update(_collectrevlog(repo.file(fname), striprev))
91 s.update(_collectrevlog(repo.file(fname), striprev))
92
92
93 return s
93 return s
94
94
95 def strip(ui, repo, nodelist, backup=True, topic='backup'):
95 def strip(ui, repo, nodelist, backup=True, topic='backup'):
96 # This function requires the caller to lock the repo, but it operates
96 # This function requires the caller to lock the repo, but it operates
97 # within a transaction of its own, and thus requires there to be no current
97 # within a transaction of its own, and thus requires there to be no current
98 # transaction when it is called.
98 # transaction when it is called.
99 if repo.currenttransaction() is not None:
99 if repo.currenttransaction() is not None:
100 raise error.ProgrammingError('cannot strip from inside a transaction')
100 raise error.ProgrammingError('cannot strip from inside a transaction')
101
101
102 # Simple way to maintain backwards compatibility for this
102 # Simple way to maintain backwards compatibility for this
103 # argument.
103 # argument.
104 if backup in ['none', 'strip']:
104 if backup in ['none', 'strip']:
105 backup = False
105 backup = False
106
106
107 repo = repo.unfiltered()
107 repo = repo.unfiltered()
108 repo.destroying()
108 repo.destroying()
109
109
110 cl = repo.changelog
110 cl = repo.changelog
111 # TODO handle undo of merge sets
111 # TODO handle undo of merge sets
112 if isinstance(nodelist, str):
112 if isinstance(nodelist, str):
113 nodelist = [nodelist]
113 nodelist = [nodelist]
114 striplist = [cl.rev(node) for node in nodelist]
114 striplist = [cl.rev(node) for node in nodelist]
115 striprev = min(striplist)
115 striprev = min(striplist)
116
116
117 files = _collectfiles(repo, striprev)
117 files = _collectfiles(repo, striprev)
118 saverevs = _collectbrokencsets(repo, files, striprev)
118 saverevs = _collectbrokencsets(repo, files, striprev)
119
119
120 # Some revisions with rev > striprev may not be descendants of striprev.
120 # Some revisions with rev > striprev may not be descendants of striprev.
121 # We have to find these revisions and put them in a bundle, so that
121 # We have to find these revisions and put them in a bundle, so that
122 # we can restore them after the truncations.
122 # we can restore them after the truncations.
123 # To create the bundle we use repo.changegroupsubset which requires
123 # To create the bundle we use repo.changegroupsubset which requires
124 # the list of heads and bases of the set of interesting revisions.
124 # the list of heads and bases of the set of interesting revisions.
125 # (head = revision in the set that has no descendant in the set;
125 # (head = revision in the set that has no descendant in the set;
126 # base = revision in the set that has no ancestor in the set)
126 # base = revision in the set that has no ancestor in the set)
127 tostrip = set(striplist)
127 tostrip = set(striplist)
128 saveheads = set(saverevs)
128 saveheads = set(saverevs)
129 for r in cl.revs(start=striprev + 1):
129 for r in cl.revs(start=striprev + 1):
130 if any(p in tostrip for p in cl.parentrevs(r)):
130 if any(p in tostrip for p in cl.parentrevs(r)):
131 tostrip.add(r)
131 tostrip.add(r)
132
132
133 if r not in tostrip:
133 if r not in tostrip:
134 saverevs.add(r)
134 saverevs.add(r)
135 saveheads.difference_update(cl.parentrevs(r))
135 saveheads.difference_update(cl.parentrevs(r))
136 saveheads.add(r)
136 saveheads.add(r)
137 saveheads = [cl.node(r) for r in saveheads]
137 saveheads = [cl.node(r) for r in saveheads]
138
138
139 # compute base nodes
139 # compute base nodes
140 if saverevs:
140 if saverevs:
141 descendants = set(cl.descendants(saverevs))
141 descendants = set(cl.descendants(saverevs))
142 saverevs.difference_update(descendants)
142 saverevs.difference_update(descendants)
143 savebases = [cl.node(r) for r in saverevs]
143 savebases = [cl.node(r) for r in saverevs]
144 stripbases = [cl.node(r) for r in tostrip]
144 stripbases = [cl.node(r) for r in tostrip]
145
145
146 stripobsidx = obsmarkers = ()
146 stripobsidx = obsmarkers = ()
147 if repo.ui.configbool('devel', 'strip-obsmarkers'):
147 if repo.ui.configbool('devel', 'strip-obsmarkers'):
148 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
148 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
149 if obsmarkers:
149 if obsmarkers:
150 stripobsidx = [i for i, m in enumerate(repo.obsstore)
150 stripobsidx = [i for i, m in enumerate(repo.obsstore)
151 if m in obsmarkers]
151 if m in obsmarkers]
152
152
153 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
153 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
154 # is much faster
154 # is much faster
155 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
155 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
156 if newbmtarget:
156 if newbmtarget:
157 newbmtarget = repo[newbmtarget.first()].node()
157 newbmtarget = repo[newbmtarget.first()].node()
158 else:
158 else:
159 newbmtarget = '.'
159 newbmtarget = '.'
160
160
161 bm = repo._bookmarks
161 bm = repo._bookmarks
162 updatebm = []
162 updatebm = []
163 for m in bm:
163 for m in bm:
164 rev = repo[bm[m]].rev()
164 rev = repo[bm[m]].rev()
165 if rev in tostrip:
165 if rev in tostrip:
166 updatebm.append(m)
166 updatebm.append(m)
167
167
168 # create a changegroup for all the branches we need to keep
168 # create a changegroup for all the branches we need to keep
169 backupfile = None
169 backupfile = None
170 vfs = repo.vfs
170 vfs = repo.vfs
171 node = nodelist[-1]
171 node = nodelist[-1]
172 if backup:
172 if backup:
173 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
173 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
174 repo.ui.status(_("saved backup bundle to %s\n") %
174 repo.ui.status(_("saved backup bundle to %s\n") %
175 vfs.join(backupfile))
175 vfs.join(backupfile))
176 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
176 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
177 vfs.join(backupfile))
177 vfs.join(backupfile))
178 tmpbundlefile = None
178 tmpbundlefile = None
179 if saveheads:
179 if saveheads:
180 # do not compress temporary bundle if we remove it from disk later
180 # do not compress temporary bundle if we remove it from disk later
181 #
181 #
182 # We do not include obsolescence, it might re-introduce prune markers
182 # We do not include obsolescence, it might re-introduce prune markers
183 # we are trying to strip. This is harmless since the stripped markers
183 # we are trying to strip. This is harmless since the stripped markers
184 # are already backed up and we did not touched the markers for the
184 # are already backed up and we did not touched the markers for the
185 # saved changesets.
185 # saved changesets.
186 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
186 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
187 compress=False, obsolescence=False)
187 compress=False, obsolescence=False)
188
188
189 try:
189 try:
190 with repo.transaction("strip") as tr:
190 with repo.transaction("strip") as tr:
191 offset = len(tr.entries)
191 offset = len(tr.entries)
192
192
193 tr.startgroup()
193 tr.startgroup()
194 cl.strip(striprev, tr)
194 cl.strip(striprev, tr)
195 stripmanifest(repo, striprev, tr, files)
195 stripmanifest(repo, striprev, tr, files)
196
196
197 for fn in files:
197 for fn in files:
198 repo.file(fn).strip(striprev, tr)
198 repo.file(fn).strip(striprev, tr)
199 tr.endgroup()
199 tr.endgroup()
200
200
201 for i in xrange(offset, len(tr.entries)):
201 for i in xrange(offset, len(tr.entries)):
202 file, troffset, ignore = tr.entries[i]
202 file, troffset, ignore = tr.entries[i]
203 with repo.svfs(file, 'a', checkambig=True) as fp:
203 with repo.svfs(file, 'a', checkambig=True) as fp:
204 fp.truncate(troffset)
204 fp.truncate(troffset)
205 if troffset == 0:
205 if troffset == 0:
206 repo.store.markremoved(file)
206 repo.store.markremoved(file)
207
207
208 deleteobsmarkers(repo.obsstore, stripobsidx)
208 deleteobsmarkers(repo.obsstore, stripobsidx)
209 del repo.obsstore
209 del repo.obsstore
210 repo.invalidatevolatilesets()
210 repo.invalidatevolatilesets()
211 repo._phasecache.filterunknown(repo)
211 repo._phasecache.filterunknown(repo)
212
212
213 if tmpbundlefile:
213 if tmpbundlefile:
214 ui.note(_("adding branch\n"))
214 ui.note(_("adding branch\n"))
215 f = vfs.open(tmpbundlefile, "rb")
215 f = vfs.open(tmpbundlefile, "rb")
216 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
216 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
217 if not repo.ui.verbose:
217 if not repo.ui.verbose:
218 # silence internal shuffling chatter
218 # silence internal shuffling chatter
219 repo.ui.pushbuffer()
219 repo.ui.pushbuffer()
220 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
220 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
221 txnname = 'strip'
221 txnname = 'strip'
222 if not isinstance(gen, bundle2.unbundle20):
222 if not isinstance(gen, bundle2.unbundle20):
223 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
223 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
224 with repo.transaction(txnname) as tr:
224 with repo.transaction(txnname) as tr:
225 bundle2.applybundle(repo, gen, tr, source='strip',
225 bundle2.applybundle(repo, gen, tr, source='strip',
226 url=tmpbundleurl)
226 url=tmpbundleurl)
227 if not repo.ui.verbose:
227 if not repo.ui.verbose:
228 repo.ui.popbuffer()
228 repo.ui.popbuffer()
229 f.close()
229 f.close()
230
230
231 with repo.transaction('repair') as tr:
231 with repo.transaction('repair') as tr:
232 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
232 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
233 bm.applychanges(repo, tr, bmchanges)
233 bm.applychanges(repo, tr, bmchanges)
234
234
235 # remove undo files
235 # remove undo files
236 for undovfs, undofile in repo.undofiles():
236 for undovfs, undofile in repo.undofiles():
237 try:
237 try:
238 undovfs.unlink(undofile)
238 undovfs.unlink(undofile)
239 except OSError as e:
239 except OSError as e:
240 if e.errno != errno.ENOENT:
240 if e.errno != errno.ENOENT:
241 ui.warn(_('error removing %s: %s\n') %
241 ui.warn(_('error removing %s: %s\n') %
242 (undovfs.join(undofile),
242 (undovfs.join(undofile),
243 stringutil.forcebytestr(e)))
243 stringutil.forcebytestr(e)))
244
244
245 except: # re-raises
245 except: # re-raises
246 if backupfile:
246 if backupfile:
247 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
247 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
248 % vfs.join(backupfile))
248 % vfs.join(backupfile))
249 if tmpbundlefile:
249 if tmpbundlefile:
250 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
250 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
251 % vfs.join(tmpbundlefile))
251 % vfs.join(tmpbundlefile))
252 ui.warn(_("(fix the problem, then recover the changesets with "
252 ui.warn(_("(fix the problem, then recover the changesets with "
253 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
253 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
254 raise
254 raise
255 else:
255 else:
256 if tmpbundlefile:
256 if tmpbundlefile:
257 # Remove temporary bundle only if there were no exceptions
257 # Remove temporary bundle only if there were no exceptions
258 vfs.unlink(tmpbundlefile)
258 vfs.unlink(tmpbundlefile)
259
259
260 repo.destroyed()
260 repo.destroyed()
261 # return the backup file path (or None if 'backup' was False) so
261 # return the backup file path (or None if 'backup' was False) so
262 # extensions can use it
262 # extensions can use it
263 return backupfile
263 return backupfile
264
264
265 def safestriproots(ui, repo, nodes):
265 def safestriproots(ui, repo, nodes):
266 """return list of roots of nodes where descendants are covered by nodes"""
266 """return list of roots of nodes where descendants are covered by nodes"""
267 torev = repo.unfiltered().changelog.rev
267 torev = repo.unfiltered().changelog.rev
268 revs = set(torev(n) for n in nodes)
268 revs = set(torev(n) for n in nodes)
269 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
269 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
270 # orphaned = affected - wanted
270 # orphaned = affected - wanted
271 # affected = descendants(roots(wanted))
271 # affected = descendants(roots(wanted))
272 # wanted = revs
272 # wanted = revs
273 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
273 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
274 notstrip = revs - tostrip
274 notstrip = revs - tostrip
275 if notstrip:
275 if notstrip:
276 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
276 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
277 ui.warn(_('warning: orphaned descendants detected, '
277 ui.warn(_('warning: orphaned descendants detected, '
278 'not stripping %s\n') % nodestr)
278 'not stripping %s\n') % nodestr)
279 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
279 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
280
280
281 class stripcallback(object):
281 class stripcallback(object):
282 """used as a transaction postclose callback"""
282 """used as a transaction postclose callback"""
283
283
284 def __init__(self, ui, repo, backup, topic):
284 def __init__(self, ui, repo, backup, topic):
285 self.ui = ui
285 self.ui = ui
286 self.repo = repo
286 self.repo = repo
287 self.backup = backup
287 self.backup = backup
288 self.topic = topic or 'backup'
288 self.topic = topic or 'backup'
289 self.nodelist = []
289 self.nodelist = []
290
290
291 def addnodes(self, nodes):
291 def addnodes(self, nodes):
292 self.nodelist.extend(nodes)
292 self.nodelist.extend(nodes)
293
293
294 def __call__(self, tr):
294 def __call__(self, tr):
295 roots = safestriproots(self.ui, self.repo, self.nodelist)
295 roots = safestriproots(self.ui, self.repo, self.nodelist)
296 if roots:
296 if roots:
297 strip(self.ui, self.repo, roots, self.backup, self.topic)
297 strip(self.ui, self.repo, roots, self.backup, self.topic)
298
298
299 def delayedstrip(ui, repo, nodelist, topic=None):
299 def delayedstrip(ui, repo, nodelist, topic=None):
300 """like strip, but works inside transaction and won't strip irreverent revs
300 """like strip, but works inside transaction and won't strip irreverent revs
301
301
302 nodelist must explicitly contain all descendants. Otherwise a warning will
302 nodelist must explicitly contain all descendants. Otherwise a warning will
303 be printed that some nodes are not stripped.
303 be printed that some nodes are not stripped.
304
304
305 Always do a backup. The last non-None "topic" will be used as the backup
305 Always do a backup. The last non-None "topic" will be used as the backup
306 topic name. The default backup topic name is "backup".
306 topic name. The default backup topic name is "backup".
307 """
307 """
308 tr = repo.currenttransaction()
308 tr = repo.currenttransaction()
309 if not tr:
309 if not tr:
310 nodes = safestriproots(ui, repo, nodelist)
310 nodes = safestriproots(ui, repo, nodelist)
311 return strip(ui, repo, nodes, True, topic)
311 return strip(ui, repo, nodes, True, topic)
312 # transaction postclose callbacks are called in alphabet order.
312 # transaction postclose callbacks are called in alphabet order.
313 # use '\xff' as prefix so we are likely to be called last.
313 # use '\xff' as prefix so we are likely to be called last.
314 callback = tr.getpostclose('\xffstrip')
314 callback = tr.getpostclose('\xffstrip')
315 if callback is None:
315 if callback is None:
316 callback = stripcallback(ui, repo, True, topic)
316 callback = stripcallback(ui, repo, True, topic)
317 tr.addpostclose('\xffstrip', callback)
317 tr.addpostclose('\xffstrip', callback)
318 if topic:
318 if topic:
319 callback.topic = topic
319 callback.topic = topic
320 callback.addnodes(nodelist)
320 callback.addnodes(nodelist)
321
321
322 def stripmanifest(repo, striprev, tr, files):
322 def stripmanifest(repo, striprev, tr, files):
323 revlog = repo.manifestlog._revlog
323 revlog = repo.manifestlog._revlog
324 revlog.strip(striprev, tr)
324 revlog.strip(striprev, tr)
325 striptrees(repo, tr, striprev, files)
325 striptrees(repo, tr, striprev, files)
326
326
327 def striptrees(repo, tr, striprev, files):
327 def striptrees(repo, tr, striprev, files):
328 if 'treemanifest' in repo.requirements: # safe but unnecessary
328 if 'treemanifest' in repo.requirements: # safe but unnecessary
329 # otherwise
329 # otherwise
330 for unencoded, encoded, size in repo.store.datafiles():
330 for unencoded, encoded, size in repo.store.datafiles():
331 if (unencoded.startswith('meta/') and
331 if (unencoded.startswith('meta/') and
332 unencoded.endswith('00manifest.i')):
332 unencoded.endswith('00manifest.i')):
333 dir = unencoded[5:-12]
333 dir = unencoded[5:-12]
334 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
334 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
335
335
336 def rebuildfncache(ui, repo):
336 def rebuildfncache(ui, repo):
337 """Rebuilds the fncache file from repo history.
337 """Rebuilds the fncache file from repo history.
338
338
339 Missing entries will be added. Extra entries will be removed.
339 Missing entries will be added. Extra entries will be removed.
340 """
340 """
341 repo = repo.unfiltered()
341 repo = repo.unfiltered()
342
342
343 if 'fncache' not in repo.requirements:
343 if 'fncache' not in repo.requirements:
344 ui.warn(_('(not rebuilding fncache because repository does not '
344 ui.warn(_('(not rebuilding fncache because repository does not '
345 'support fncache)\n'))
345 'support fncache)\n'))
346 return
346 return
347
347
348 with repo.lock():
348 with repo.lock():
349 fnc = repo.store.fncache
349 fnc = repo.store.fncache
350 # Trigger load of fncache.
350 # Trigger load of fncache.
351 if 'irrelevant' in fnc:
351 if 'irrelevant' in fnc:
352 pass
352 pass
353
353
354 oldentries = set(fnc.entries)
354 oldentries = set(fnc.entries)
355 newentries = set()
355 newentries = set()
356 seenfiles = set()
356 seenfiles = set()
357
357
358 repolen = len(repo)
358 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
359 total=len(repo))
359 for rev in repo:
360 for rev in repo:
360 ui.progress(_('rebuilding'), rev, total=repolen,
361 progress.update(rev)
361 unit=_('changesets'))
362
362
363 ctx = repo[rev]
363 ctx = repo[rev]
364 for f in ctx.files():
364 for f in ctx.files():
365 # This is to minimize I/O.
365 # This is to minimize I/O.
366 if f in seenfiles:
366 if f in seenfiles:
367 continue
367 continue
368 seenfiles.add(f)
368 seenfiles.add(f)
369
369
370 i = 'data/%s.i' % f
370 i = 'data/%s.i' % f
371 d = 'data/%s.d' % f
371 d = 'data/%s.d' % f
372
372
373 if repo.store._exists(i):
373 if repo.store._exists(i):
374 newentries.add(i)
374 newentries.add(i)
375 if repo.store._exists(d):
375 if repo.store._exists(d):
376 newentries.add(d)
376 newentries.add(d)
377
377
378 ui.progress(_('rebuilding'), None)
378 progress.complete()
379
379
380 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
380 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
381 for dir in util.dirs(seenfiles):
381 for dir in util.dirs(seenfiles):
382 i = 'meta/%s/00manifest.i' % dir
382 i = 'meta/%s/00manifest.i' % dir
383 d = 'meta/%s/00manifest.d' % dir
383 d = 'meta/%s/00manifest.d' % dir
384
384
385 if repo.store._exists(i):
385 if repo.store._exists(i):
386 newentries.add(i)
386 newentries.add(i)
387 if repo.store._exists(d):
387 if repo.store._exists(d):
388 newentries.add(d)
388 newentries.add(d)
389
389
390 addcount = len(newentries - oldentries)
390 addcount = len(newentries - oldentries)
391 removecount = len(oldentries - newentries)
391 removecount = len(oldentries - newentries)
392 for p in sorted(oldentries - newentries):
392 for p in sorted(oldentries - newentries):
393 ui.write(_('removing %s\n') % p)
393 ui.write(_('removing %s\n') % p)
394 for p in sorted(newentries - oldentries):
394 for p in sorted(newentries - oldentries):
395 ui.write(_('adding %s\n') % p)
395 ui.write(_('adding %s\n') % p)
396
396
397 if addcount or removecount:
397 if addcount or removecount:
398 ui.write(_('%d items added, %d removed from fncache\n') %
398 ui.write(_('%d items added, %d removed from fncache\n') %
399 (addcount, removecount))
399 (addcount, removecount))
400 fnc.entries = newentries
400 fnc.entries = newentries
401 fnc._dirty = True
401 fnc._dirty = True
402
402
403 with repo.transaction('fncache') as tr:
403 with repo.transaction('fncache') as tr:
404 fnc.write(tr)
404 fnc.write(tr)
405 else:
405 else:
406 ui.write(_('fncache already up to date\n'))
406 ui.write(_('fncache already up to date\n'))
407
407
408 def deleteobsmarkers(obsstore, indices):
408 def deleteobsmarkers(obsstore, indices):
409 """Delete some obsmarkers from obsstore and return how many were deleted
409 """Delete some obsmarkers from obsstore and return how many were deleted
410
410
411 'indices' is a list of ints which are the indices
411 'indices' is a list of ints which are the indices
412 of the markers to be deleted.
412 of the markers to be deleted.
413
413
414 Every invocation of this function completely rewrites the obsstore file,
414 Every invocation of this function completely rewrites the obsstore file,
415 skipping the markers we want to be removed. The new temporary file is
415 skipping the markers we want to be removed. The new temporary file is
416 created, remaining markers are written there and on .close() this file
416 created, remaining markers are written there and on .close() this file
417 gets atomically renamed to obsstore, thus guaranteeing consistency."""
417 gets atomically renamed to obsstore, thus guaranteeing consistency."""
418 if not indices:
418 if not indices:
419 # we don't want to rewrite the obsstore with the same content
419 # we don't want to rewrite the obsstore with the same content
420 return
420 return
421
421
422 left = []
422 left = []
423 current = obsstore._all
423 current = obsstore._all
424 n = 0
424 n = 0
425 for i, m in enumerate(current):
425 for i, m in enumerate(current):
426 if i in indices:
426 if i in indices:
427 n += 1
427 n += 1
428 continue
428 continue
429 left.append(m)
429 left.append(m)
430
430
431 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
431 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
432 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
432 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
433 newobsstorefile.write(bytes)
433 newobsstorefile.write(bytes)
434 newobsstorefile.close()
434 newobsstorefile.close()
435 return n
435 return n
General Comments 0
You need to be logged in to leave comments. Login now