##// END OF EJS Templates
repair: refactor broken linkrev collection...
Durham Goode -
r33690:6626d12e default
parent child Browse files
Show More
@@ -1,433 +1,437 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 obsutil,
23 obsutil,
24 util,
24 util,
25 )
25 )
26
26
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
28 """create a bundle with the specified revisions as a backup"""
28 """create a bundle with the specified revisions as a backup"""
29
29
30 backupdir = "strip-backup"
30 backupdir = "strip-backup"
31 vfs = repo.vfs
31 vfs = repo.vfs
32 if not vfs.isdir(backupdir):
32 if not vfs.isdir(backupdir):
33 vfs.mkdir(backupdir)
33 vfs.mkdir(backupdir)
34
34
35 # Include a hash of all the nodes in the filename for uniqueness
35 # Include a hash of all the nodes in the filename for uniqueness
36 allcommits = repo.set('%ln::%ln', bases, heads)
36 allcommits = repo.set('%ln::%ln', bases, heads)
37 allhashes = sorted(c.hex() for c in allcommits)
37 allhashes = sorted(c.hex() for c in allcommits)
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40
40
41 cgversion = changegroup.safeversion(repo)
41 cgversion = changegroup.safeversion(repo)
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51
51
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
53 contentopts = {
53 contentopts = {
54 'cg.version': cgversion,
54 'cg.version': cgversion,
55 'obsolescence': obsolescence,
55 'obsolescence': obsolescence,
56 'phases': True,
56 'phases': True,
57 }
57 }
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
59 outgoing, contentopts, vfs, compression=comp)
59 outgoing, contentopts, vfs, compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectrevlog(revlog, striprev):
71 _, brokenset = revlog.getstrippoint(striprev)
72 return [revlog.linkrev(r) for r in brokenset]
73
74 def _collectmanifest(repo, striprev):
75 return _collectrevlog(repo.manifestlog._revlog, striprev)
76
70 def _collectbrokencsets(repo, files, striprev):
77 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
78 """return the changesets which will be broken by the truncation"""
72 s = set()
79 s = set()
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
76
80
77 collectone(repo.manifestlog._revlog)
81 s.update(_collectmanifest(repo, striprev))
78 for fname in files:
82 for fname in files:
79 collectone(repo.file(fname))
83 s.update(_collectrevlog(repo.file(fname), striprev))
80
84
81 return s
85 return s
82
86
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
87 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function requires the caller to lock the repo, but it operates
88 # This function requires the caller to lock the repo, but it operates
85 # within a transaction of its own, and thus requires there to be no current
89 # within a transaction of its own, and thus requires there to be no current
86 # transaction when it is called.
90 # transaction when it is called.
87 if repo.currenttransaction() is not None:
91 if repo.currenttransaction() is not None:
88 raise error.ProgrammingError('cannot strip from inside a transaction')
92 raise error.ProgrammingError('cannot strip from inside a transaction')
89
93
90 # Simple way to maintain backwards compatibility for this
94 # Simple way to maintain backwards compatibility for this
91 # argument.
95 # argument.
92 if backup in ['none', 'strip']:
96 if backup in ['none', 'strip']:
93 backup = False
97 backup = False
94
98
95 repo = repo.unfiltered()
99 repo = repo.unfiltered()
96 repo.destroying()
100 repo.destroying()
97
101
98 cl = repo.changelog
102 cl = repo.changelog
99 # TODO handle undo of merge sets
103 # TODO handle undo of merge sets
100 if isinstance(nodelist, str):
104 if isinstance(nodelist, str):
101 nodelist = [nodelist]
105 nodelist = [nodelist]
102 striplist = [cl.rev(node) for node in nodelist]
106 striplist = [cl.rev(node) for node in nodelist]
103 striprev = min(striplist)
107 striprev = min(striplist)
104
108
105 files = _collectfiles(repo, striprev)
109 files = _collectfiles(repo, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
110 saverevs = _collectbrokencsets(repo, files, striprev)
107
111
108 # Some revisions with rev > striprev may not be descendants of striprev.
112 # Some revisions with rev > striprev may not be descendants of striprev.
109 # We have to find these revisions and put them in a bundle, so that
113 # We have to find these revisions and put them in a bundle, so that
110 # we can restore them after the truncations.
114 # we can restore them after the truncations.
111 # To create the bundle we use repo.changegroupsubset which requires
115 # To create the bundle we use repo.changegroupsubset which requires
112 # the list of heads and bases of the set of interesting revisions.
116 # the list of heads and bases of the set of interesting revisions.
113 # (head = revision in the set that has no descendant in the set;
117 # (head = revision in the set that has no descendant in the set;
114 # base = revision in the set that has no ancestor in the set)
118 # base = revision in the set that has no ancestor in the set)
115 tostrip = set(striplist)
119 tostrip = set(striplist)
116 saveheads = set(saverevs)
120 saveheads = set(saverevs)
117 for r in cl.revs(start=striprev + 1):
121 for r in cl.revs(start=striprev + 1):
118 if any(p in tostrip for p in cl.parentrevs(r)):
122 if any(p in tostrip for p in cl.parentrevs(r)):
119 tostrip.add(r)
123 tostrip.add(r)
120
124
121 if r not in tostrip:
125 if r not in tostrip:
122 saverevs.add(r)
126 saverevs.add(r)
123 saveheads.difference_update(cl.parentrevs(r))
127 saveheads.difference_update(cl.parentrevs(r))
124 saveheads.add(r)
128 saveheads.add(r)
125 saveheads = [cl.node(r) for r in saveheads]
129 saveheads = [cl.node(r) for r in saveheads]
126
130
127 # compute base nodes
131 # compute base nodes
128 if saverevs:
132 if saverevs:
129 descendants = set(cl.descendants(saverevs))
133 descendants = set(cl.descendants(saverevs))
130 saverevs.difference_update(descendants)
134 saverevs.difference_update(descendants)
131 savebases = [cl.node(r) for r in saverevs]
135 savebases = [cl.node(r) for r in saverevs]
132 stripbases = [cl.node(r) for r in tostrip]
136 stripbases = [cl.node(r) for r in tostrip]
133
137
134 stripobsidx = obsmarkers = ()
138 stripobsidx = obsmarkers = ()
135 if repo.ui.configbool('devel', 'strip-obsmarkers'):
139 if repo.ui.configbool('devel', 'strip-obsmarkers'):
136 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
140 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
137 if obsmarkers:
141 if obsmarkers:
138 stripobsidx = [i for i, m in enumerate(repo.obsstore)
142 stripobsidx = [i for i, m in enumerate(repo.obsstore)
139 if m in obsmarkers]
143 if m in obsmarkers]
140
144
141 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
145 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
142 # is much faster
146 # is much faster
143 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
147 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
144 if newbmtarget:
148 if newbmtarget:
145 newbmtarget = repo[newbmtarget.first()].node()
149 newbmtarget = repo[newbmtarget.first()].node()
146 else:
150 else:
147 newbmtarget = '.'
151 newbmtarget = '.'
148
152
149 bm = repo._bookmarks
153 bm = repo._bookmarks
150 updatebm = []
154 updatebm = []
151 for m in bm:
155 for m in bm:
152 rev = repo[bm[m]].rev()
156 rev = repo[bm[m]].rev()
153 if rev in tostrip:
157 if rev in tostrip:
154 updatebm.append(m)
158 updatebm.append(m)
155
159
156 # create a changegroup for all the branches we need to keep
160 # create a changegroup for all the branches we need to keep
157 backupfile = None
161 backupfile = None
158 vfs = repo.vfs
162 vfs = repo.vfs
159 node = nodelist[-1]
163 node = nodelist[-1]
160 if backup:
164 if backup:
161 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
165 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
162 repo.ui.status(_("saved backup bundle to %s\n") %
166 repo.ui.status(_("saved backup bundle to %s\n") %
163 vfs.join(backupfile))
167 vfs.join(backupfile))
164 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
168 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
165 vfs.join(backupfile))
169 vfs.join(backupfile))
166 tmpbundlefile = None
170 tmpbundlefile = None
167 if saveheads:
171 if saveheads:
168 # do not compress temporary bundle if we remove it from disk later
172 # do not compress temporary bundle if we remove it from disk later
169 #
173 #
170 # We do not include obsolescence, it might re-introduce prune markers
174 # We do not include obsolescence, it might re-introduce prune markers
171 # we are trying to strip. This is harmless since the stripped markers
175 # we are trying to strip. This is harmless since the stripped markers
172 # are already backed up and we did not touched the markers for the
176 # are already backed up and we did not touched the markers for the
173 # saved changesets.
177 # saved changesets.
174 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
178 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
175 compress=False, obsolescence=False)
179 compress=False, obsolescence=False)
176
180
177 mfst = repo.manifestlog._revlog
181 mfst = repo.manifestlog._revlog
178
182
179 try:
183 try:
180 with repo.transaction("strip") as tr:
184 with repo.transaction("strip") as tr:
181 offset = len(tr.entries)
185 offset = len(tr.entries)
182
186
183 tr.startgroup()
187 tr.startgroup()
184 cl.strip(striprev, tr)
188 cl.strip(striprev, tr)
185 mfst.strip(striprev, tr)
189 mfst.strip(striprev, tr)
186 striptrees(repo, tr, striprev, files)
190 striptrees(repo, tr, striprev, files)
187
191
188 for fn in files:
192 for fn in files:
189 repo.file(fn).strip(striprev, tr)
193 repo.file(fn).strip(striprev, tr)
190 tr.endgroup()
194 tr.endgroup()
191
195
192 for i in xrange(offset, len(tr.entries)):
196 for i in xrange(offset, len(tr.entries)):
193 file, troffset, ignore = tr.entries[i]
197 file, troffset, ignore = tr.entries[i]
194 with repo.svfs(file, 'a', checkambig=True) as fp:
198 with repo.svfs(file, 'a', checkambig=True) as fp:
195 fp.truncate(troffset)
199 fp.truncate(troffset)
196 if troffset == 0:
200 if troffset == 0:
197 repo.store.markremoved(file)
201 repo.store.markremoved(file)
198
202
199 deleteobsmarkers(repo.obsstore, stripobsidx)
203 deleteobsmarkers(repo.obsstore, stripobsidx)
200 del repo.obsstore
204 del repo.obsstore
201
205
202 repo._phasecache.filterunknown(repo)
206 repo._phasecache.filterunknown(repo)
203 if tmpbundlefile:
207 if tmpbundlefile:
204 ui.note(_("adding branch\n"))
208 ui.note(_("adding branch\n"))
205 f = vfs.open(tmpbundlefile, "rb")
209 f = vfs.open(tmpbundlefile, "rb")
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
210 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 if not repo.ui.verbose:
211 if not repo.ui.verbose:
208 # silence internal shuffling chatter
212 # silence internal shuffling chatter
209 repo.ui.pushbuffer()
213 repo.ui.pushbuffer()
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
214 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 txnname = 'strip'
215 txnname = 'strip'
212 if not isinstance(gen, bundle2.unbundle20):
216 if not isinstance(gen, bundle2.unbundle20):
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
217 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 with repo.transaction(txnname) as tr:
218 with repo.transaction(txnname) as tr:
215 bundle2.applybundle(repo, gen, tr, source='strip',
219 bundle2.applybundle(repo, gen, tr, source='strip',
216 url=tmpbundleurl)
220 url=tmpbundleurl)
217 if not repo.ui.verbose:
221 if not repo.ui.verbose:
218 repo.ui.popbuffer()
222 repo.ui.popbuffer()
219 f.close()
223 f.close()
220 repo._phasecache.invalidate()
224 repo._phasecache.invalidate()
221
225
222
226
223 with repo.transaction('repair') as tr:
227 with repo.transaction('repair') as tr:
224 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
228 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
225 bm.applychanges(repo, tr, bmchanges)
229 bm.applychanges(repo, tr, bmchanges)
226
230
227 # remove undo files
231 # remove undo files
228 for undovfs, undofile in repo.undofiles():
232 for undovfs, undofile in repo.undofiles():
229 try:
233 try:
230 undovfs.unlink(undofile)
234 undovfs.unlink(undofile)
231 except OSError as e:
235 except OSError as e:
232 if e.errno != errno.ENOENT:
236 if e.errno != errno.ENOENT:
233 ui.warn(_('error removing %s: %s\n') %
237 ui.warn(_('error removing %s: %s\n') %
234 (undovfs.join(undofile), str(e)))
238 (undovfs.join(undofile), str(e)))
235
239
236 except: # re-raises
240 except: # re-raises
237 if backupfile:
241 if backupfile:
238 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
242 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 % vfs.join(backupfile))
243 % vfs.join(backupfile))
240 if tmpbundlefile:
244 if tmpbundlefile:
241 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
245 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 % vfs.join(tmpbundlefile))
246 % vfs.join(tmpbundlefile))
243 ui.warn(_("(fix the problem, then recover the changesets with "
247 ui.warn(_("(fix the problem, then recover the changesets with "
244 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
248 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 raise
249 raise
246 else:
250 else:
247 if tmpbundlefile:
251 if tmpbundlefile:
248 # Remove temporary bundle only if there were no exceptions
252 # Remove temporary bundle only if there were no exceptions
249 vfs.unlink(tmpbundlefile)
253 vfs.unlink(tmpbundlefile)
250
254
251 repo.destroyed()
255 repo.destroyed()
252 # return the backup file path (or None if 'backup' was False) so
256 # return the backup file path (or None if 'backup' was False) so
253 # extensions can use it
257 # extensions can use it
254 return backupfile
258 return backupfile
255
259
256 def safestriproots(ui, repo, nodes):
260 def safestriproots(ui, repo, nodes):
257 """return list of roots of nodes where descendants are covered by nodes"""
261 """return list of roots of nodes where descendants are covered by nodes"""
258 torev = repo.unfiltered().changelog.rev
262 torev = repo.unfiltered().changelog.rev
259 revs = set(torev(n) for n in nodes)
263 revs = set(torev(n) for n in nodes)
260 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
264 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # orphaned = affected - wanted
265 # orphaned = affected - wanted
262 # affected = descendants(roots(wanted))
266 # affected = descendants(roots(wanted))
263 # wanted = revs
267 # wanted = revs
264 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
268 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 notstrip = revs - tostrip
269 notstrip = revs - tostrip
266 if notstrip:
270 if notstrip:
267 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
271 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 ui.warn(_('warning: orphaned descendants detected, '
272 ui.warn(_('warning: orphaned descendants detected, '
269 'not stripping %s\n') % nodestr)
273 'not stripping %s\n') % nodestr)
270 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
274 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271
275
272 class stripcallback(object):
276 class stripcallback(object):
273 """used as a transaction postclose callback"""
277 """used as a transaction postclose callback"""
274
278
275 def __init__(self, ui, repo, backup, topic):
279 def __init__(self, ui, repo, backup, topic):
276 self.ui = ui
280 self.ui = ui
277 self.repo = repo
281 self.repo = repo
278 self.backup = backup
282 self.backup = backup
279 self.topic = topic or 'backup'
283 self.topic = topic or 'backup'
280 self.nodelist = []
284 self.nodelist = []
281
285
282 def addnodes(self, nodes):
286 def addnodes(self, nodes):
283 self.nodelist.extend(nodes)
287 self.nodelist.extend(nodes)
284
288
285 def __call__(self, tr):
289 def __call__(self, tr):
286 roots = safestriproots(self.ui, self.repo, self.nodelist)
290 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 if roots:
291 if roots:
288 strip(self.ui, self.repo, roots, self.backup, self.topic)
292 strip(self.ui, self.repo, roots, self.backup, self.topic)
289
293
290 def delayedstrip(ui, repo, nodelist, topic=None):
294 def delayedstrip(ui, repo, nodelist, topic=None):
291 """like strip, but works inside transaction and won't strip irreverent revs
295 """like strip, but works inside transaction and won't strip irreverent revs
292
296
293 nodelist must explicitly contain all descendants. Otherwise a warning will
297 nodelist must explicitly contain all descendants. Otherwise a warning will
294 be printed that some nodes are not stripped.
298 be printed that some nodes are not stripped.
295
299
296 Always do a backup. The last non-None "topic" will be used as the backup
300 Always do a backup. The last non-None "topic" will be used as the backup
297 topic name. The default backup topic name is "backup".
301 topic name. The default backup topic name is "backup".
298 """
302 """
299 tr = repo.currenttransaction()
303 tr = repo.currenttransaction()
300 if not tr:
304 if not tr:
301 nodes = safestriproots(ui, repo, nodelist)
305 nodes = safestriproots(ui, repo, nodelist)
302 return strip(ui, repo, nodes, True, topic)
306 return strip(ui, repo, nodes, True, topic)
303 # transaction postclose callbacks are called in alphabet order.
307 # transaction postclose callbacks are called in alphabet order.
304 # use '\xff' as prefix so we are likely to be called last.
308 # use '\xff' as prefix so we are likely to be called last.
305 callback = tr.getpostclose('\xffstrip')
309 callback = tr.getpostclose('\xffstrip')
306 if callback is None:
310 if callback is None:
307 callback = stripcallback(ui, repo, True, topic)
311 callback = stripcallback(ui, repo, True, topic)
308 tr.addpostclose('\xffstrip', callback)
312 tr.addpostclose('\xffstrip', callback)
309 if topic:
313 if topic:
310 callback.topic = topic
314 callback.topic = topic
311 callback.addnodes(nodelist)
315 callback.addnodes(nodelist)
312
316
313 def striptrees(repo, tr, striprev, files):
317 def striptrees(repo, tr, striprev, files):
314 if 'treemanifest' in repo.requirements: # safe but unnecessary
318 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 # otherwise
319 # otherwise
316 for unencoded, encoded, size in repo.store.datafiles():
320 for unencoded, encoded, size in repo.store.datafiles():
317 if (unencoded.startswith('meta/') and
321 if (unencoded.startswith('meta/') and
318 unencoded.endswith('00manifest.i')):
322 unencoded.endswith('00manifest.i')):
319 dir = unencoded[5:-12]
323 dir = unencoded[5:-12]
320 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
324 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321
325
322 def rebuildfncache(ui, repo):
326 def rebuildfncache(ui, repo):
323 """Rebuilds the fncache file from repo history.
327 """Rebuilds the fncache file from repo history.
324
328
325 Missing entries will be added. Extra entries will be removed.
329 Missing entries will be added. Extra entries will be removed.
326 """
330 """
327 repo = repo.unfiltered()
331 repo = repo.unfiltered()
328
332
329 if 'fncache' not in repo.requirements:
333 if 'fncache' not in repo.requirements:
330 ui.warn(_('(not rebuilding fncache because repository does not '
334 ui.warn(_('(not rebuilding fncache because repository does not '
331 'support fncache)\n'))
335 'support fncache)\n'))
332 return
336 return
333
337
334 with repo.lock():
338 with repo.lock():
335 fnc = repo.store.fncache
339 fnc = repo.store.fncache
336 # Trigger load of fncache.
340 # Trigger load of fncache.
337 if 'irrelevant' in fnc:
341 if 'irrelevant' in fnc:
338 pass
342 pass
339
343
340 oldentries = set(fnc.entries)
344 oldentries = set(fnc.entries)
341 newentries = set()
345 newentries = set()
342 seenfiles = set()
346 seenfiles = set()
343
347
344 repolen = len(repo)
348 repolen = len(repo)
345 for rev in repo:
349 for rev in repo:
346 ui.progress(_('rebuilding'), rev, total=repolen,
350 ui.progress(_('rebuilding'), rev, total=repolen,
347 unit=_('changesets'))
351 unit=_('changesets'))
348
352
349 ctx = repo[rev]
353 ctx = repo[rev]
350 for f in ctx.files():
354 for f in ctx.files():
351 # This is to minimize I/O.
355 # This is to minimize I/O.
352 if f in seenfiles:
356 if f in seenfiles:
353 continue
357 continue
354 seenfiles.add(f)
358 seenfiles.add(f)
355
359
356 i = 'data/%s.i' % f
360 i = 'data/%s.i' % f
357 d = 'data/%s.d' % f
361 d = 'data/%s.d' % f
358
362
359 if repo.store._exists(i):
363 if repo.store._exists(i):
360 newentries.add(i)
364 newentries.add(i)
361 if repo.store._exists(d):
365 if repo.store._exists(d):
362 newentries.add(d)
366 newentries.add(d)
363
367
364 ui.progress(_('rebuilding'), None)
368 ui.progress(_('rebuilding'), None)
365
369
366 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
370 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 for dir in util.dirs(seenfiles):
371 for dir in util.dirs(seenfiles):
368 i = 'meta/%s/00manifest.i' % dir
372 i = 'meta/%s/00manifest.i' % dir
369 d = 'meta/%s/00manifest.d' % dir
373 d = 'meta/%s/00manifest.d' % dir
370
374
371 if repo.store._exists(i):
375 if repo.store._exists(i):
372 newentries.add(i)
376 newentries.add(i)
373 if repo.store._exists(d):
377 if repo.store._exists(d):
374 newentries.add(d)
378 newentries.add(d)
375
379
376 addcount = len(newentries - oldentries)
380 addcount = len(newentries - oldentries)
377 removecount = len(oldentries - newentries)
381 removecount = len(oldentries - newentries)
378 for p in sorted(oldentries - newentries):
382 for p in sorted(oldentries - newentries):
379 ui.write(_('removing %s\n') % p)
383 ui.write(_('removing %s\n') % p)
380 for p in sorted(newentries - oldentries):
384 for p in sorted(newentries - oldentries):
381 ui.write(_('adding %s\n') % p)
385 ui.write(_('adding %s\n') % p)
382
386
383 if addcount or removecount:
387 if addcount or removecount:
384 ui.write(_('%d items added, %d removed from fncache\n') %
388 ui.write(_('%d items added, %d removed from fncache\n') %
385 (addcount, removecount))
389 (addcount, removecount))
386 fnc.entries = newentries
390 fnc.entries = newentries
387 fnc._dirty = True
391 fnc._dirty = True
388
392
389 with repo.transaction('fncache') as tr:
393 with repo.transaction('fncache') as tr:
390 fnc.write(tr)
394 fnc.write(tr)
391 else:
395 else:
392 ui.write(_('fncache already up to date\n'))
396 ui.write(_('fncache already up to date\n'))
393
397
394 def stripbmrevset(repo, mark):
398 def stripbmrevset(repo, mark):
395 """
399 """
396 The revset to strip when strip is called with -B mark
400 The revset to strip when strip is called with -B mark
397
401
398 Needs to live here so extensions can use it and wrap it even when strip is
402 Needs to live here so extensions can use it and wrap it even when strip is
399 not enabled or not present on a box.
403 not enabled or not present on a box.
400 """
404 """
401 return repo.revs("ancestors(bookmark(%s)) - "
405 return repo.revs("ancestors(bookmark(%s)) - "
402 "ancestors(head() and not bookmark(%s)) - "
406 "ancestors(head() and not bookmark(%s)) - "
403 "ancestors(bookmark() and not bookmark(%s))",
407 "ancestors(bookmark() and not bookmark(%s))",
404 mark, mark, mark)
408 mark, mark, mark)
405
409
406 def deleteobsmarkers(obsstore, indices):
410 def deleteobsmarkers(obsstore, indices):
407 """Delete some obsmarkers from obsstore and return how many were deleted
411 """Delete some obsmarkers from obsstore and return how many were deleted
408
412
409 'indices' is a list of ints which are the indices
413 'indices' is a list of ints which are the indices
410 of the markers to be deleted.
414 of the markers to be deleted.
411
415
412 Every invocation of this function completely rewrites the obsstore file,
416 Every invocation of this function completely rewrites the obsstore file,
413 skipping the markers we want to be removed. The new temporary file is
417 skipping the markers we want to be removed. The new temporary file is
414 created, remaining markers are written there and on .close() this file
418 created, remaining markers are written there and on .close() this file
415 gets atomically renamed to obsstore, thus guaranteeing consistency."""
419 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 if not indices:
420 if not indices:
417 # we don't want to rewrite the obsstore with the same content
421 # we don't want to rewrite the obsstore with the same content
418 return
422 return
419
423
420 left = []
424 left = []
421 current = obsstore._all
425 current = obsstore._all
422 n = 0
426 n = 0
423 for i, m in enumerate(current):
427 for i, m in enumerate(current):
424 if i in indices:
428 if i in indices:
425 n += 1
429 n += 1
426 continue
430 continue
427 left.append(m)
431 left.append(m)
428
432
429 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
433 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
434 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 newobsstorefile.write(bytes)
435 newobsstorefile.write(bytes)
432 newobsstorefile.close()
436 newobsstorefile.close()
433 return n
437 return n
General Comments 0
You need to be logged in to leave comments. Login now