##// END OF EJS Templates
repair: mark the critical section of strip() as unsafe...
Augie Fackler -
r38546:6e0c66ef default
parent child Browse files
Show More
@@ -1,435 +1,436
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 util,
27 util,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 stringutil,
30 stringutil,
31 )
31 )
32
32
33 def backupbundle(repo, bases, heads, node, suffix, compress=True,
33 def backupbundle(repo, bases, heads, node, suffix, compress=True,
34 obsolescence=True):
34 obsolescence=True):
35 """create a bundle with the specified revisions as a backup"""
35 """create a bundle with the specified revisions as a backup"""
36
36
37 backupdir = "strip-backup"
37 backupdir = "strip-backup"
38 vfs = repo.vfs
38 vfs = repo.vfs
39 if not vfs.isdir(backupdir):
39 if not vfs.isdir(backupdir):
40 vfs.mkdir(backupdir)
40 vfs.mkdir(backupdir)
41
41
42 # Include a hash of all the nodes in the filename for uniqueness
42 # Include a hash of all the nodes in the filename for uniqueness
43 allcommits = repo.set('%ln::%ln', bases, heads)
43 allcommits = repo.set('%ln::%ln', bases, heads)
44 allhashes = sorted(c.hex() for c in allcommits)
44 allhashes = sorted(c.hex() for c in allcommits)
45 totalhash = hashlib.sha1(''.join(allhashes)).digest()
45 totalhash = hashlib.sha1(''.join(allhashes)).digest()
46 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
46 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
47 hex(totalhash[:4]), suffix)
47 hex(totalhash[:4]), suffix)
48
48
49 cgversion = changegroup.localversion(repo)
49 cgversion = changegroup.localversion(repo)
50 comp = None
50 comp = None
51 if cgversion != '01':
51 if cgversion != '01':
52 bundletype = "HG20"
52 bundletype = "HG20"
53 if compress:
53 if compress:
54 comp = 'BZ'
54 comp = 'BZ'
55 elif compress:
55 elif compress:
56 bundletype = "HG10BZ"
56 bundletype = "HG10BZ"
57 else:
57 else:
58 bundletype = "HG10UN"
58 bundletype = "HG10UN"
59
59
60 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
60 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
61 contentopts = {
61 contentopts = {
62 'cg.version': cgversion,
62 'cg.version': cgversion,
63 'obsolescence': obsolescence,
63 'obsolescence': obsolescence,
64 'phases': True,
64 'phases': True,
65 }
65 }
66 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
66 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
67 outgoing, contentopts, vfs, compression=comp)
67 outgoing, contentopts, vfs, compression=comp)
68
68
69 def _collectfiles(repo, striprev):
69 def _collectfiles(repo, striprev):
70 """find out the filelogs affected by the strip"""
70 """find out the filelogs affected by the strip"""
71 files = set()
71 files = set()
72
72
73 for x in xrange(striprev, len(repo)):
73 for x in xrange(striprev, len(repo)):
74 files.update(repo[x].files())
74 files.update(repo[x].files())
75
75
76 return sorted(files)
76 return sorted(files)
77
77
78 def _collectrevlog(revlog, striprev):
78 def _collectrevlog(revlog, striprev):
79 _, brokenset = revlog.getstrippoint(striprev)
79 _, brokenset = revlog.getstrippoint(striprev)
80 return [revlog.linkrev(r) for r in brokenset]
80 return [revlog.linkrev(r) for r in brokenset]
81
81
82 def _collectmanifest(repo, striprev):
82 def _collectmanifest(repo, striprev):
83 return _collectrevlog(repo.manifestlog._revlog, striprev)
83 return _collectrevlog(repo.manifestlog._revlog, striprev)
84
84
85 def _collectbrokencsets(repo, files, striprev):
85 def _collectbrokencsets(repo, files, striprev):
86 """return the changesets which will be broken by the truncation"""
86 """return the changesets which will be broken by the truncation"""
87 s = set()
87 s = set()
88
88
89 s.update(_collectmanifest(repo, striprev))
89 s.update(_collectmanifest(repo, striprev))
90 for fname in files:
90 for fname in files:
91 s.update(_collectrevlog(repo.file(fname), striprev))
91 s.update(_collectrevlog(repo.file(fname), striprev))
92
92
93 return s
93 return s
94
94
95 def strip(ui, repo, nodelist, backup=True, topic='backup'):
95 def strip(ui, repo, nodelist, backup=True, topic='backup'):
96 # This function requires the caller to lock the repo, but it operates
96 # This function requires the caller to lock the repo, but it operates
97 # within a transaction of its own, and thus requires there to be no current
97 # within a transaction of its own, and thus requires there to be no current
98 # transaction when it is called.
98 # transaction when it is called.
99 if repo.currenttransaction() is not None:
99 if repo.currenttransaction() is not None:
100 raise error.ProgrammingError('cannot strip from inside a transaction')
100 raise error.ProgrammingError('cannot strip from inside a transaction')
101
101
102 # Simple way to maintain backwards compatibility for this
102 # Simple way to maintain backwards compatibility for this
103 # argument.
103 # argument.
104 if backup in ['none', 'strip']:
104 if backup in ['none', 'strip']:
105 backup = False
105 backup = False
106
106
107 repo = repo.unfiltered()
107 repo = repo.unfiltered()
108 repo.destroying()
108 repo.destroying()
109
109
110 cl = repo.changelog
110 cl = repo.changelog
111 # TODO handle undo of merge sets
111 # TODO handle undo of merge sets
112 if isinstance(nodelist, str):
112 if isinstance(nodelist, str):
113 nodelist = [nodelist]
113 nodelist = [nodelist]
114 striplist = [cl.rev(node) for node in nodelist]
114 striplist = [cl.rev(node) for node in nodelist]
115 striprev = min(striplist)
115 striprev = min(striplist)
116
116
117 files = _collectfiles(repo, striprev)
117 files = _collectfiles(repo, striprev)
118 saverevs = _collectbrokencsets(repo, files, striprev)
118 saverevs = _collectbrokencsets(repo, files, striprev)
119
119
120 # Some revisions with rev > striprev may not be descendants of striprev.
120 # Some revisions with rev > striprev may not be descendants of striprev.
121 # We have to find these revisions and put them in a bundle, so that
121 # We have to find these revisions and put them in a bundle, so that
122 # we can restore them after the truncations.
122 # we can restore them after the truncations.
123 # To create the bundle we use repo.changegroupsubset which requires
123 # To create the bundle we use repo.changegroupsubset which requires
124 # the list of heads and bases of the set of interesting revisions.
124 # the list of heads and bases of the set of interesting revisions.
125 # (head = revision in the set that has no descendant in the set;
125 # (head = revision in the set that has no descendant in the set;
126 # base = revision in the set that has no ancestor in the set)
126 # base = revision in the set that has no ancestor in the set)
127 tostrip = set(striplist)
127 tostrip = set(striplist)
128 saveheads = set(saverevs)
128 saveheads = set(saverevs)
129 for r in cl.revs(start=striprev + 1):
129 for r in cl.revs(start=striprev + 1):
130 if any(p in tostrip for p in cl.parentrevs(r)):
130 if any(p in tostrip for p in cl.parentrevs(r)):
131 tostrip.add(r)
131 tostrip.add(r)
132
132
133 if r not in tostrip:
133 if r not in tostrip:
134 saverevs.add(r)
134 saverevs.add(r)
135 saveheads.difference_update(cl.parentrevs(r))
135 saveheads.difference_update(cl.parentrevs(r))
136 saveheads.add(r)
136 saveheads.add(r)
137 saveheads = [cl.node(r) for r in saveheads]
137 saveheads = [cl.node(r) for r in saveheads]
138
138
139 # compute base nodes
139 # compute base nodes
140 if saverevs:
140 if saverevs:
141 descendants = set(cl.descendants(saverevs))
141 descendants = set(cl.descendants(saverevs))
142 saverevs.difference_update(descendants)
142 saverevs.difference_update(descendants)
143 savebases = [cl.node(r) for r in saverevs]
143 savebases = [cl.node(r) for r in saverevs]
144 stripbases = [cl.node(r) for r in tostrip]
144 stripbases = [cl.node(r) for r in tostrip]
145
145
146 stripobsidx = obsmarkers = ()
146 stripobsidx = obsmarkers = ()
147 if repo.ui.configbool('devel', 'strip-obsmarkers'):
147 if repo.ui.configbool('devel', 'strip-obsmarkers'):
148 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
148 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
149 if obsmarkers:
149 if obsmarkers:
150 stripobsidx = [i for i, m in enumerate(repo.obsstore)
150 stripobsidx = [i for i, m in enumerate(repo.obsstore)
151 if m in obsmarkers]
151 if m in obsmarkers]
152
152
153 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
153 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
154 # is much faster
154 # is much faster
155 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
155 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
156 if newbmtarget:
156 if newbmtarget:
157 newbmtarget = repo[newbmtarget.first()].node()
157 newbmtarget = repo[newbmtarget.first()].node()
158 else:
158 else:
159 newbmtarget = '.'
159 newbmtarget = '.'
160
160
161 bm = repo._bookmarks
161 bm = repo._bookmarks
162 updatebm = []
162 updatebm = []
163 for m in bm:
163 for m in bm:
164 rev = repo[bm[m]].rev()
164 rev = repo[bm[m]].rev()
165 if rev in tostrip:
165 if rev in tostrip:
166 updatebm.append(m)
166 updatebm.append(m)
167
167
168 # create a changegroup for all the branches we need to keep
168 # create a changegroup for all the branches we need to keep
169 backupfile = None
169 backupfile = None
170 vfs = repo.vfs
170 vfs = repo.vfs
171 node = nodelist[-1]
171 node = nodelist[-1]
172 if backup:
172 if backup:
173 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
173 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
174 repo.ui.status(_("saved backup bundle to %s\n") %
174 repo.ui.status(_("saved backup bundle to %s\n") %
175 vfs.join(backupfile))
175 vfs.join(backupfile))
176 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
176 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
177 vfs.join(backupfile))
177 vfs.join(backupfile))
178 tmpbundlefile = None
178 tmpbundlefile = None
179 if saveheads:
179 if saveheads:
180 # do not compress temporary bundle if we remove it from disk later
180 # do not compress temporary bundle if we remove it from disk later
181 #
181 #
182 # We do not include obsolescence, it might re-introduce prune markers
182 # We do not include obsolescence, it might re-introduce prune markers
183 # we are trying to strip. This is harmless since the stripped markers
183 # we are trying to strip. This is harmless since the stripped markers
184 # are already backed up and we did not touched the markers for the
184 # are already backed up and we did not touched the markers for the
185 # saved changesets.
185 # saved changesets.
186 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
186 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
187 compress=False, obsolescence=False)
187 compress=False, obsolescence=False)
188
188
189 try:
189 with ui.uninterruptable():
190 with repo.transaction("strip") as tr:
190 try:
191 offset = len(tr.entries)
191 with repo.transaction("strip") as tr:
192 offset = len(tr.entries)
192
193
193 tr.startgroup()
194 tr.startgroup()
194 cl.strip(striprev, tr)
195 cl.strip(striprev, tr)
195 stripmanifest(repo, striprev, tr, files)
196 stripmanifest(repo, striprev, tr, files)
196
197 for fn in files:
198 repo.file(fn).strip(striprev, tr)
199 tr.endgroup()
200
197
201 for i in xrange(offset, len(tr.entries)):
198 for fn in files:
202 file, troffset, ignore = tr.entries[i]
199 repo.file(fn).strip(striprev, tr)
203 with repo.svfs(file, 'a', checkambig=True) as fp:
200 tr.endgroup()
204 fp.truncate(troffset)
205 if troffset == 0:
206 repo.store.markremoved(file)
207
201
208 deleteobsmarkers(repo.obsstore, stripobsidx)
202 for i in xrange(offset, len(tr.entries)):
209 del repo.obsstore
203 file, troffset, ignore = tr.entries[i]
210 repo.invalidatevolatilesets()
204 with repo.svfs(file, 'a', checkambig=True) as fp:
211 repo._phasecache.filterunknown(repo)
205 fp.truncate(troffset)
206 if troffset == 0:
207 repo.store.markremoved(file)
208
209 deleteobsmarkers(repo.obsstore, stripobsidx)
210 del repo.obsstore
211 repo.invalidatevolatilesets()
212 repo._phasecache.filterunknown(repo)
212
213
213 if tmpbundlefile:
214 if tmpbundlefile:
214 ui.note(_("adding branch\n"))
215 ui.note(_("adding branch\n"))
215 f = vfs.open(tmpbundlefile, "rb")
216 f = vfs.open(tmpbundlefile, "rb")
216 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
217 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
217 if not repo.ui.verbose:
218 if not repo.ui.verbose:
218 # silence internal shuffling chatter
219 # silence internal shuffling chatter
219 repo.ui.pushbuffer()
220 repo.ui.pushbuffer()
220 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
221 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
221 txnname = 'strip'
222 txnname = 'strip'
222 if not isinstance(gen, bundle2.unbundle20):
223 if not isinstance(gen, bundle2.unbundle20):
223 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
224 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
224 with repo.transaction(txnname) as tr:
225 with repo.transaction(txnname) as tr:
225 bundle2.applybundle(repo, gen, tr, source='strip',
226 bundle2.applybundle(repo, gen, tr, source='strip',
226 url=tmpbundleurl)
227 url=tmpbundleurl)
227 if not repo.ui.verbose:
228 if not repo.ui.verbose:
228 repo.ui.popbuffer()
229 repo.ui.popbuffer()
229 f.close()
230 f.close()
230
231
231 with repo.transaction('repair') as tr:
232 with repo.transaction('repair') as tr:
232 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
233 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
233 bm.applychanges(repo, tr, bmchanges)
234 bm.applychanges(repo, tr, bmchanges)
234
235
235 # remove undo files
236 # remove undo files
236 for undovfs, undofile in repo.undofiles():
237 for undovfs, undofile in repo.undofiles():
237 try:
238 try:
238 undovfs.unlink(undofile)
239 undovfs.unlink(undofile)
239 except OSError as e:
240 except OSError as e:
240 if e.errno != errno.ENOENT:
241 if e.errno != errno.ENOENT:
241 ui.warn(_('error removing %s: %s\n') %
242 ui.warn(_('error removing %s: %s\n') %
242 (undovfs.join(undofile),
243 (undovfs.join(undofile),
243 stringutil.forcebytestr(e)))
244 stringutil.forcebytestr(e)))
244
245
245 except: # re-raises
246 except: # re-raises
246 if backupfile:
247 if backupfile:
247 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
248 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
248 % vfs.join(backupfile))
249 % vfs.join(backupfile))
249 if tmpbundlefile:
250 if tmpbundlefile:
250 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
251 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
251 % vfs.join(tmpbundlefile))
252 % vfs.join(tmpbundlefile))
252 ui.warn(_("(fix the problem, then recover the changesets with "
253 ui.warn(_("(fix the problem, then recover the changesets with "
253 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
254 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
254 raise
255 raise
255 else:
256 else:
256 if tmpbundlefile:
257 if tmpbundlefile:
257 # Remove temporary bundle only if there were no exceptions
258 # Remove temporary bundle only if there were no exceptions
258 vfs.unlink(tmpbundlefile)
259 vfs.unlink(tmpbundlefile)
259
260
260 repo.destroyed()
261 repo.destroyed()
261 # return the backup file path (or None if 'backup' was False) so
262 # return the backup file path (or None if 'backup' was False) so
262 # extensions can use it
263 # extensions can use it
263 return backupfile
264 return backupfile
264
265
265 def safestriproots(ui, repo, nodes):
266 def safestriproots(ui, repo, nodes):
266 """return list of roots of nodes where descendants are covered by nodes"""
267 """return list of roots of nodes where descendants are covered by nodes"""
267 torev = repo.unfiltered().changelog.rev
268 torev = repo.unfiltered().changelog.rev
268 revs = set(torev(n) for n in nodes)
269 revs = set(torev(n) for n in nodes)
269 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
270 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
270 # orphaned = affected - wanted
271 # orphaned = affected - wanted
271 # affected = descendants(roots(wanted))
272 # affected = descendants(roots(wanted))
272 # wanted = revs
273 # wanted = revs
273 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
274 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
274 notstrip = revs - tostrip
275 notstrip = revs - tostrip
275 if notstrip:
276 if notstrip:
276 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
277 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
277 ui.warn(_('warning: orphaned descendants detected, '
278 ui.warn(_('warning: orphaned descendants detected, '
278 'not stripping %s\n') % nodestr)
279 'not stripping %s\n') % nodestr)
279 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
280 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
280
281
281 class stripcallback(object):
282 class stripcallback(object):
282 """used as a transaction postclose callback"""
283 """used as a transaction postclose callback"""
283
284
284 def __init__(self, ui, repo, backup, topic):
285 def __init__(self, ui, repo, backup, topic):
285 self.ui = ui
286 self.ui = ui
286 self.repo = repo
287 self.repo = repo
287 self.backup = backup
288 self.backup = backup
288 self.topic = topic or 'backup'
289 self.topic = topic or 'backup'
289 self.nodelist = []
290 self.nodelist = []
290
291
291 def addnodes(self, nodes):
292 def addnodes(self, nodes):
292 self.nodelist.extend(nodes)
293 self.nodelist.extend(nodes)
293
294
294 def __call__(self, tr):
295 def __call__(self, tr):
295 roots = safestriproots(self.ui, self.repo, self.nodelist)
296 roots = safestriproots(self.ui, self.repo, self.nodelist)
296 if roots:
297 if roots:
297 strip(self.ui, self.repo, roots, self.backup, self.topic)
298 strip(self.ui, self.repo, roots, self.backup, self.topic)
298
299
299 def delayedstrip(ui, repo, nodelist, topic=None):
300 def delayedstrip(ui, repo, nodelist, topic=None):
300 """like strip, but works inside transaction and won't strip irreverent revs
301 """like strip, but works inside transaction and won't strip irreverent revs
301
302
302 nodelist must explicitly contain all descendants. Otherwise a warning will
303 nodelist must explicitly contain all descendants. Otherwise a warning will
303 be printed that some nodes are not stripped.
304 be printed that some nodes are not stripped.
304
305
305 Always do a backup. The last non-None "topic" will be used as the backup
306 Always do a backup. The last non-None "topic" will be used as the backup
306 topic name. The default backup topic name is "backup".
307 topic name. The default backup topic name is "backup".
307 """
308 """
308 tr = repo.currenttransaction()
309 tr = repo.currenttransaction()
309 if not tr:
310 if not tr:
310 nodes = safestriproots(ui, repo, nodelist)
311 nodes = safestriproots(ui, repo, nodelist)
311 return strip(ui, repo, nodes, True, topic)
312 return strip(ui, repo, nodes, True, topic)
312 # transaction postclose callbacks are called in alphabet order.
313 # transaction postclose callbacks are called in alphabet order.
313 # use '\xff' as prefix so we are likely to be called last.
314 # use '\xff' as prefix so we are likely to be called last.
314 callback = tr.getpostclose('\xffstrip')
315 callback = tr.getpostclose('\xffstrip')
315 if callback is None:
316 if callback is None:
316 callback = stripcallback(ui, repo, True, topic)
317 callback = stripcallback(ui, repo, True, topic)
317 tr.addpostclose('\xffstrip', callback)
318 tr.addpostclose('\xffstrip', callback)
318 if topic:
319 if topic:
319 callback.topic = topic
320 callback.topic = topic
320 callback.addnodes(nodelist)
321 callback.addnodes(nodelist)
321
322
322 def stripmanifest(repo, striprev, tr, files):
323 def stripmanifest(repo, striprev, tr, files):
323 revlog = repo.manifestlog._revlog
324 revlog = repo.manifestlog._revlog
324 revlog.strip(striprev, tr)
325 revlog.strip(striprev, tr)
325 striptrees(repo, tr, striprev, files)
326 striptrees(repo, tr, striprev, files)
326
327
327 def striptrees(repo, tr, striprev, files):
328 def striptrees(repo, tr, striprev, files):
328 if 'treemanifest' in repo.requirements: # safe but unnecessary
329 if 'treemanifest' in repo.requirements: # safe but unnecessary
329 # otherwise
330 # otherwise
330 for unencoded, encoded, size in repo.store.datafiles():
331 for unencoded, encoded, size in repo.store.datafiles():
331 if (unencoded.startswith('meta/') and
332 if (unencoded.startswith('meta/') and
332 unencoded.endswith('00manifest.i')):
333 unencoded.endswith('00manifest.i')):
333 dir = unencoded[5:-12]
334 dir = unencoded[5:-12]
334 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
335 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
335
336
336 def rebuildfncache(ui, repo):
337 def rebuildfncache(ui, repo):
337 """Rebuilds the fncache file from repo history.
338 """Rebuilds the fncache file from repo history.
338
339
339 Missing entries will be added. Extra entries will be removed.
340 Missing entries will be added. Extra entries will be removed.
340 """
341 """
341 repo = repo.unfiltered()
342 repo = repo.unfiltered()
342
343
343 if 'fncache' not in repo.requirements:
344 if 'fncache' not in repo.requirements:
344 ui.warn(_('(not rebuilding fncache because repository does not '
345 ui.warn(_('(not rebuilding fncache because repository does not '
345 'support fncache)\n'))
346 'support fncache)\n'))
346 return
347 return
347
348
348 with repo.lock():
349 with repo.lock():
349 fnc = repo.store.fncache
350 fnc = repo.store.fncache
350 # Trigger load of fncache.
351 # Trigger load of fncache.
351 if 'irrelevant' in fnc:
352 if 'irrelevant' in fnc:
352 pass
353 pass
353
354
354 oldentries = set(fnc.entries)
355 oldentries = set(fnc.entries)
355 newentries = set()
356 newentries = set()
356 seenfiles = set()
357 seenfiles = set()
357
358
358 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
359 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
359 total=len(repo))
360 total=len(repo))
360 for rev in repo:
361 for rev in repo:
361 progress.update(rev)
362 progress.update(rev)
362
363
363 ctx = repo[rev]
364 ctx = repo[rev]
364 for f in ctx.files():
365 for f in ctx.files():
365 # This is to minimize I/O.
366 # This is to minimize I/O.
366 if f in seenfiles:
367 if f in seenfiles:
367 continue
368 continue
368 seenfiles.add(f)
369 seenfiles.add(f)
369
370
370 i = 'data/%s.i' % f
371 i = 'data/%s.i' % f
371 d = 'data/%s.d' % f
372 d = 'data/%s.d' % f
372
373
373 if repo.store._exists(i):
374 if repo.store._exists(i):
374 newentries.add(i)
375 newentries.add(i)
375 if repo.store._exists(d):
376 if repo.store._exists(d):
376 newentries.add(d)
377 newentries.add(d)
377
378
378 progress.complete()
379 progress.complete()
379
380
380 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
381 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
381 for dir in util.dirs(seenfiles):
382 for dir in util.dirs(seenfiles):
382 i = 'meta/%s/00manifest.i' % dir
383 i = 'meta/%s/00manifest.i' % dir
383 d = 'meta/%s/00manifest.d' % dir
384 d = 'meta/%s/00manifest.d' % dir
384
385
385 if repo.store._exists(i):
386 if repo.store._exists(i):
386 newentries.add(i)
387 newentries.add(i)
387 if repo.store._exists(d):
388 if repo.store._exists(d):
388 newentries.add(d)
389 newentries.add(d)
389
390
390 addcount = len(newentries - oldentries)
391 addcount = len(newentries - oldentries)
391 removecount = len(oldentries - newentries)
392 removecount = len(oldentries - newentries)
392 for p in sorted(oldentries - newentries):
393 for p in sorted(oldentries - newentries):
393 ui.write(_('removing %s\n') % p)
394 ui.write(_('removing %s\n') % p)
394 for p in sorted(newentries - oldentries):
395 for p in sorted(newentries - oldentries):
395 ui.write(_('adding %s\n') % p)
396 ui.write(_('adding %s\n') % p)
396
397
397 if addcount or removecount:
398 if addcount or removecount:
398 ui.write(_('%d items added, %d removed from fncache\n') %
399 ui.write(_('%d items added, %d removed from fncache\n') %
399 (addcount, removecount))
400 (addcount, removecount))
400 fnc.entries = newentries
401 fnc.entries = newentries
401 fnc._dirty = True
402 fnc._dirty = True
402
403
403 with repo.transaction('fncache') as tr:
404 with repo.transaction('fncache') as tr:
404 fnc.write(tr)
405 fnc.write(tr)
405 else:
406 else:
406 ui.write(_('fncache already up to date\n'))
407 ui.write(_('fncache already up to date\n'))
407
408
408 def deleteobsmarkers(obsstore, indices):
409 def deleteobsmarkers(obsstore, indices):
409 """Delete some obsmarkers from obsstore and return how many were deleted
410 """Delete some obsmarkers from obsstore and return how many were deleted
410
411
411 'indices' is a list of ints which are the indices
412 'indices' is a list of ints which are the indices
412 of the markers to be deleted.
413 of the markers to be deleted.
413
414
414 Every invocation of this function completely rewrites the obsstore file,
415 Every invocation of this function completely rewrites the obsstore file,
415 skipping the markers we want to be removed. The new temporary file is
416 skipping the markers we want to be removed. The new temporary file is
416 created, remaining markers are written there and on .close() this file
417 created, remaining markers are written there and on .close() this file
417 gets atomically renamed to obsstore, thus guaranteeing consistency."""
418 gets atomically renamed to obsstore, thus guaranteeing consistency."""
418 if not indices:
419 if not indices:
419 # we don't want to rewrite the obsstore with the same content
420 # we don't want to rewrite the obsstore with the same content
420 return
421 return
421
422
422 left = []
423 left = []
423 current = obsstore._all
424 current = obsstore._all
424 n = 0
425 n = 0
425 for i, m in enumerate(current):
426 for i, m in enumerate(current):
426 if i in indices:
427 if i in indices:
427 n += 1
428 n += 1
428 continue
429 continue
429 left.append(m)
430 left.append(m)
430
431
431 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
432 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
432 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
433 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
433 newobsstorefile.write(bytes)
434 newobsstorefile.write(bytes)
434 newobsstorefile.close()
435 newobsstorefile.close()
435 return n
436 return n
General Comments 0
You need to be logged in to leave comments. Login now