##// END OF EJS Templates
repair: move manifest strip to a separate function...
Durham Goode -
r33691:86ea201e default
parent child Browse files
Show More
@@ -1,437 +1,439 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 obsutil,
23 obsutil,
24 util,
24 util,
25 )
25 )
26
26
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
28 """create a bundle with the specified revisions as a backup"""
28 """create a bundle with the specified revisions as a backup"""
29
29
30 backupdir = "strip-backup"
30 backupdir = "strip-backup"
31 vfs = repo.vfs
31 vfs = repo.vfs
32 if not vfs.isdir(backupdir):
32 if not vfs.isdir(backupdir):
33 vfs.mkdir(backupdir)
33 vfs.mkdir(backupdir)
34
34
35 # Include a hash of all the nodes in the filename for uniqueness
35 # Include a hash of all the nodes in the filename for uniqueness
36 allcommits = repo.set('%ln::%ln', bases, heads)
36 allcommits = repo.set('%ln::%ln', bases, heads)
37 allhashes = sorted(c.hex() for c in allcommits)
37 allhashes = sorted(c.hex() for c in allcommits)
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40
40
41 cgversion = changegroup.safeversion(repo)
41 cgversion = changegroup.safeversion(repo)
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51
51
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
53 contentopts = {
53 contentopts = {
54 'cg.version': cgversion,
54 'cg.version': cgversion,
55 'obsolescence': obsolescence,
55 'obsolescence': obsolescence,
56 'phases': True,
56 'phases': True,
57 }
57 }
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
59 outgoing, contentopts, vfs, compression=comp)
59 outgoing, contentopts, vfs, compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectrevlog(revlog, striprev):
70 def _collectrevlog(revlog, striprev):
71 _, brokenset = revlog.getstrippoint(striprev)
71 _, brokenset = revlog.getstrippoint(striprev)
72 return [revlog.linkrev(r) for r in brokenset]
72 return [revlog.linkrev(r) for r in brokenset]
73
73
74 def _collectmanifest(repo, striprev):
74 def _collectmanifest(repo, striprev):
75 return _collectrevlog(repo.manifestlog._revlog, striprev)
75 return _collectrevlog(repo.manifestlog._revlog, striprev)
76
76
77 def _collectbrokencsets(repo, files, striprev):
77 def _collectbrokencsets(repo, files, striprev):
78 """return the changesets which will be broken by the truncation"""
78 """return the changesets which will be broken by the truncation"""
79 s = set()
79 s = set()
80
80
81 s.update(_collectmanifest(repo, striprev))
81 s.update(_collectmanifest(repo, striprev))
82 for fname in files:
82 for fname in files:
83 s.update(_collectrevlog(repo.file(fname), striprev))
83 s.update(_collectrevlog(repo.file(fname), striprev))
84
84
85 return s
85 return s
86
86
87 def strip(ui, repo, nodelist, backup=True, topic='backup'):
87 def strip(ui, repo, nodelist, backup=True, topic='backup'):
88 # This function requires the caller to lock the repo, but it operates
88 # This function requires the caller to lock the repo, but it operates
89 # within a transaction of its own, and thus requires there to be no current
89 # within a transaction of its own, and thus requires there to be no current
90 # transaction when it is called.
90 # transaction when it is called.
91 if repo.currenttransaction() is not None:
91 if repo.currenttransaction() is not None:
92 raise error.ProgrammingError('cannot strip from inside a transaction')
92 raise error.ProgrammingError('cannot strip from inside a transaction')
93
93
94 # Simple way to maintain backwards compatibility for this
94 # Simple way to maintain backwards compatibility for this
95 # argument.
95 # argument.
96 if backup in ['none', 'strip']:
96 if backup in ['none', 'strip']:
97 backup = False
97 backup = False
98
98
99 repo = repo.unfiltered()
99 repo = repo.unfiltered()
100 repo.destroying()
100 repo.destroying()
101
101
102 cl = repo.changelog
102 cl = repo.changelog
103 # TODO handle undo of merge sets
103 # TODO handle undo of merge sets
104 if isinstance(nodelist, str):
104 if isinstance(nodelist, str):
105 nodelist = [nodelist]
105 nodelist = [nodelist]
106 striplist = [cl.rev(node) for node in nodelist]
106 striplist = [cl.rev(node) for node in nodelist]
107 striprev = min(striplist)
107 striprev = min(striplist)
108
108
109 files = _collectfiles(repo, striprev)
109 files = _collectfiles(repo, striprev)
110 saverevs = _collectbrokencsets(repo, files, striprev)
110 saverevs = _collectbrokencsets(repo, files, striprev)
111
111
112 # Some revisions with rev > striprev may not be descendants of striprev.
112 # Some revisions with rev > striprev may not be descendants of striprev.
113 # We have to find these revisions and put them in a bundle, so that
113 # We have to find these revisions and put them in a bundle, so that
114 # we can restore them after the truncations.
114 # we can restore them after the truncations.
115 # To create the bundle we use repo.changegroupsubset which requires
115 # To create the bundle we use repo.changegroupsubset which requires
116 # the list of heads and bases of the set of interesting revisions.
116 # the list of heads and bases of the set of interesting revisions.
117 # (head = revision in the set that has no descendant in the set;
117 # (head = revision in the set that has no descendant in the set;
118 # base = revision in the set that has no ancestor in the set)
118 # base = revision in the set that has no ancestor in the set)
119 tostrip = set(striplist)
119 tostrip = set(striplist)
120 saveheads = set(saverevs)
120 saveheads = set(saverevs)
121 for r in cl.revs(start=striprev + 1):
121 for r in cl.revs(start=striprev + 1):
122 if any(p in tostrip for p in cl.parentrevs(r)):
122 if any(p in tostrip for p in cl.parentrevs(r)):
123 tostrip.add(r)
123 tostrip.add(r)
124
124
125 if r not in tostrip:
125 if r not in tostrip:
126 saverevs.add(r)
126 saverevs.add(r)
127 saveheads.difference_update(cl.parentrevs(r))
127 saveheads.difference_update(cl.parentrevs(r))
128 saveheads.add(r)
128 saveheads.add(r)
129 saveheads = [cl.node(r) for r in saveheads]
129 saveheads = [cl.node(r) for r in saveheads]
130
130
131 # compute base nodes
131 # compute base nodes
132 if saverevs:
132 if saverevs:
133 descendants = set(cl.descendants(saverevs))
133 descendants = set(cl.descendants(saverevs))
134 saverevs.difference_update(descendants)
134 saverevs.difference_update(descendants)
135 savebases = [cl.node(r) for r in saverevs]
135 savebases = [cl.node(r) for r in saverevs]
136 stripbases = [cl.node(r) for r in tostrip]
136 stripbases = [cl.node(r) for r in tostrip]
137
137
138 stripobsidx = obsmarkers = ()
138 stripobsidx = obsmarkers = ()
139 if repo.ui.configbool('devel', 'strip-obsmarkers'):
139 if repo.ui.configbool('devel', 'strip-obsmarkers'):
140 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
140 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
141 if obsmarkers:
141 if obsmarkers:
142 stripobsidx = [i for i, m in enumerate(repo.obsstore)
142 stripobsidx = [i for i, m in enumerate(repo.obsstore)
143 if m in obsmarkers]
143 if m in obsmarkers]
144
144
145 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
145 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
146 # is much faster
146 # is much faster
147 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
147 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
148 if newbmtarget:
148 if newbmtarget:
149 newbmtarget = repo[newbmtarget.first()].node()
149 newbmtarget = repo[newbmtarget.first()].node()
150 else:
150 else:
151 newbmtarget = '.'
151 newbmtarget = '.'
152
152
153 bm = repo._bookmarks
153 bm = repo._bookmarks
154 updatebm = []
154 updatebm = []
155 for m in bm:
155 for m in bm:
156 rev = repo[bm[m]].rev()
156 rev = repo[bm[m]].rev()
157 if rev in tostrip:
157 if rev in tostrip:
158 updatebm.append(m)
158 updatebm.append(m)
159
159
160 # create a changegroup for all the branches we need to keep
160 # create a changegroup for all the branches we need to keep
161 backupfile = None
161 backupfile = None
162 vfs = repo.vfs
162 vfs = repo.vfs
163 node = nodelist[-1]
163 node = nodelist[-1]
164 if backup:
164 if backup:
165 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
165 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
166 repo.ui.status(_("saved backup bundle to %s\n") %
166 repo.ui.status(_("saved backup bundle to %s\n") %
167 vfs.join(backupfile))
167 vfs.join(backupfile))
168 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
168 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
169 vfs.join(backupfile))
169 vfs.join(backupfile))
170 tmpbundlefile = None
170 tmpbundlefile = None
171 if saveheads:
171 if saveheads:
172 # do not compress temporary bundle if we remove it from disk later
172 # do not compress temporary bundle if we remove it from disk later
173 #
173 #
174 # We do not include obsolescence, it might re-introduce prune markers
174 # We do not include obsolescence, it might re-introduce prune markers
175 # we are trying to strip. This is harmless since the stripped markers
175 # we are trying to strip. This is harmless since the stripped markers
176 # are already backed up and we did not touched the markers for the
176 # are already backed up and we did not touched the markers for the
177 # saved changesets.
177 # saved changesets.
178 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
178 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
179 compress=False, obsolescence=False)
179 compress=False, obsolescence=False)
180
180
181 mfst = repo.manifestlog._revlog
182
183 try:
181 try:
184 with repo.transaction("strip") as tr:
182 with repo.transaction("strip") as tr:
185 offset = len(tr.entries)
183 offset = len(tr.entries)
186
184
187 tr.startgroup()
185 tr.startgroup()
188 cl.strip(striprev, tr)
186 cl.strip(striprev, tr)
189 mfst.strip(striprev, tr)
187 stripmanifest(repo, striprev, tr, files)
190 striptrees(repo, tr, striprev, files)
191
188
192 for fn in files:
189 for fn in files:
193 repo.file(fn).strip(striprev, tr)
190 repo.file(fn).strip(striprev, tr)
194 tr.endgroup()
191 tr.endgroup()
195
192
196 for i in xrange(offset, len(tr.entries)):
193 for i in xrange(offset, len(tr.entries)):
197 file, troffset, ignore = tr.entries[i]
194 file, troffset, ignore = tr.entries[i]
198 with repo.svfs(file, 'a', checkambig=True) as fp:
195 with repo.svfs(file, 'a', checkambig=True) as fp:
199 fp.truncate(troffset)
196 fp.truncate(troffset)
200 if troffset == 0:
197 if troffset == 0:
201 repo.store.markremoved(file)
198 repo.store.markremoved(file)
202
199
203 deleteobsmarkers(repo.obsstore, stripobsidx)
200 deleteobsmarkers(repo.obsstore, stripobsidx)
204 del repo.obsstore
201 del repo.obsstore
205
202
206 repo._phasecache.filterunknown(repo)
203 repo._phasecache.filterunknown(repo)
207 if tmpbundlefile:
204 if tmpbundlefile:
208 ui.note(_("adding branch\n"))
205 ui.note(_("adding branch\n"))
209 f = vfs.open(tmpbundlefile, "rb")
206 f = vfs.open(tmpbundlefile, "rb")
210 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
211 if not repo.ui.verbose:
208 if not repo.ui.verbose:
212 # silence internal shuffling chatter
209 # silence internal shuffling chatter
213 repo.ui.pushbuffer()
210 repo.ui.pushbuffer()
214 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
215 txnname = 'strip'
212 txnname = 'strip'
216 if not isinstance(gen, bundle2.unbundle20):
213 if not isinstance(gen, bundle2.unbundle20):
217 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
218 with repo.transaction(txnname) as tr:
215 with repo.transaction(txnname) as tr:
219 bundle2.applybundle(repo, gen, tr, source='strip',
216 bundle2.applybundle(repo, gen, tr, source='strip',
220 url=tmpbundleurl)
217 url=tmpbundleurl)
221 if not repo.ui.verbose:
218 if not repo.ui.verbose:
222 repo.ui.popbuffer()
219 repo.ui.popbuffer()
223 f.close()
220 f.close()
224 repo._phasecache.invalidate()
221 repo._phasecache.invalidate()
225
222
226
223
227 with repo.transaction('repair') as tr:
224 with repo.transaction('repair') as tr:
228 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
229 bm.applychanges(repo, tr, bmchanges)
226 bm.applychanges(repo, tr, bmchanges)
230
227
231 # remove undo files
228 # remove undo files
232 for undovfs, undofile in repo.undofiles():
229 for undovfs, undofile in repo.undofiles():
233 try:
230 try:
234 undovfs.unlink(undofile)
231 undovfs.unlink(undofile)
235 except OSError as e:
232 except OSError as e:
236 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
237 ui.warn(_('error removing %s: %s\n') %
234 ui.warn(_('error removing %s: %s\n') %
238 (undovfs.join(undofile), str(e)))
235 (undovfs.join(undofile), str(e)))
239
236
240 except: # re-raises
237 except: # re-raises
241 if backupfile:
238 if backupfile:
242 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
243 % vfs.join(backupfile))
240 % vfs.join(backupfile))
244 if tmpbundlefile:
241 if tmpbundlefile:
245 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
246 % vfs.join(tmpbundlefile))
243 % vfs.join(tmpbundlefile))
247 ui.warn(_("(fix the problem, then recover the changesets with "
244 ui.warn(_("(fix the problem, then recover the changesets with "
248 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
249 raise
246 raise
250 else:
247 else:
251 if tmpbundlefile:
248 if tmpbundlefile:
252 # Remove temporary bundle only if there were no exceptions
249 # Remove temporary bundle only if there were no exceptions
253 vfs.unlink(tmpbundlefile)
250 vfs.unlink(tmpbundlefile)
254
251
255 repo.destroyed()
252 repo.destroyed()
256 # return the backup file path (or None if 'backup' was False) so
253 # return the backup file path (or None if 'backup' was False) so
257 # extensions can use it
254 # extensions can use it
258 return backupfile
255 return backupfile
259
256
260 def safestriproots(ui, repo, nodes):
257 def safestriproots(ui, repo, nodes):
261 """return list of roots of nodes where descendants are covered by nodes"""
258 """return list of roots of nodes where descendants are covered by nodes"""
262 torev = repo.unfiltered().changelog.rev
259 torev = repo.unfiltered().changelog.rev
263 revs = set(torev(n) for n in nodes)
260 revs = set(torev(n) for n in nodes)
264 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
265 # orphaned = affected - wanted
262 # orphaned = affected - wanted
266 # affected = descendants(roots(wanted))
263 # affected = descendants(roots(wanted))
267 # wanted = revs
264 # wanted = revs
268 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
269 notstrip = revs - tostrip
266 notstrip = revs - tostrip
270 if notstrip:
267 if notstrip:
271 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
272 ui.warn(_('warning: orphaned descendants detected, '
269 ui.warn(_('warning: orphaned descendants detected, '
273 'not stripping %s\n') % nodestr)
270 'not stripping %s\n') % nodestr)
274 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
275
272
276 class stripcallback(object):
273 class stripcallback(object):
277 """used as a transaction postclose callback"""
274 """used as a transaction postclose callback"""
278
275
279 def __init__(self, ui, repo, backup, topic):
276 def __init__(self, ui, repo, backup, topic):
280 self.ui = ui
277 self.ui = ui
281 self.repo = repo
278 self.repo = repo
282 self.backup = backup
279 self.backup = backup
283 self.topic = topic or 'backup'
280 self.topic = topic or 'backup'
284 self.nodelist = []
281 self.nodelist = []
285
282
286 def addnodes(self, nodes):
283 def addnodes(self, nodes):
287 self.nodelist.extend(nodes)
284 self.nodelist.extend(nodes)
288
285
289 def __call__(self, tr):
286 def __call__(self, tr):
290 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 roots = safestriproots(self.ui, self.repo, self.nodelist)
291 if roots:
288 if roots:
292 strip(self.ui, self.repo, roots, self.backup, self.topic)
289 strip(self.ui, self.repo, roots, self.backup, self.topic)
293
290
294 def delayedstrip(ui, repo, nodelist, topic=None):
291 def delayedstrip(ui, repo, nodelist, topic=None):
295 """like strip, but works inside transaction and won't strip irreverent revs
292 """like strip, but works inside transaction and won't strip irreverent revs
296
293
297 nodelist must explicitly contain all descendants. Otherwise a warning will
294 nodelist must explicitly contain all descendants. Otherwise a warning will
298 be printed that some nodes are not stripped.
295 be printed that some nodes are not stripped.
299
296
300 Always do a backup. The last non-None "topic" will be used as the backup
297 Always do a backup. The last non-None "topic" will be used as the backup
301 topic name. The default backup topic name is "backup".
298 topic name. The default backup topic name is "backup".
302 """
299 """
303 tr = repo.currenttransaction()
300 tr = repo.currenttransaction()
304 if not tr:
301 if not tr:
305 nodes = safestriproots(ui, repo, nodelist)
302 nodes = safestriproots(ui, repo, nodelist)
306 return strip(ui, repo, nodes, True, topic)
303 return strip(ui, repo, nodes, True, topic)
307 # transaction postclose callbacks are called in alphabet order.
304 # transaction postclose callbacks are called in alphabet order.
308 # use '\xff' as prefix so we are likely to be called last.
305 # use '\xff' as prefix so we are likely to be called last.
309 callback = tr.getpostclose('\xffstrip')
306 callback = tr.getpostclose('\xffstrip')
310 if callback is None:
307 if callback is None:
311 callback = stripcallback(ui, repo, True, topic)
308 callback = stripcallback(ui, repo, True, topic)
312 tr.addpostclose('\xffstrip', callback)
309 tr.addpostclose('\xffstrip', callback)
313 if topic:
310 if topic:
314 callback.topic = topic
311 callback.topic = topic
315 callback.addnodes(nodelist)
312 callback.addnodes(nodelist)
316
313
314 def stripmanifest(repo, striprev, tr, files):
315 revlog = repo.manifestlog._revlog
316 revlog.strip(striprev, tr)
317 striptrees(repo, tr, striprev, files)
318
317 def striptrees(repo, tr, striprev, files):
319 def striptrees(repo, tr, striprev, files):
318 if 'treemanifest' in repo.requirements: # safe but unnecessary
320 if 'treemanifest' in repo.requirements: # safe but unnecessary
319 # otherwise
321 # otherwise
320 for unencoded, encoded, size in repo.store.datafiles():
322 for unencoded, encoded, size in repo.store.datafiles():
321 if (unencoded.startswith('meta/') and
323 if (unencoded.startswith('meta/') and
322 unencoded.endswith('00manifest.i')):
324 unencoded.endswith('00manifest.i')):
323 dir = unencoded[5:-12]
325 dir = unencoded[5:-12]
324 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
326 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
325
327
326 def rebuildfncache(ui, repo):
328 def rebuildfncache(ui, repo):
327 """Rebuilds the fncache file from repo history.
329 """Rebuilds the fncache file from repo history.
328
330
329 Missing entries will be added. Extra entries will be removed.
331 Missing entries will be added. Extra entries will be removed.
330 """
332 """
331 repo = repo.unfiltered()
333 repo = repo.unfiltered()
332
334
333 if 'fncache' not in repo.requirements:
335 if 'fncache' not in repo.requirements:
334 ui.warn(_('(not rebuilding fncache because repository does not '
336 ui.warn(_('(not rebuilding fncache because repository does not '
335 'support fncache)\n'))
337 'support fncache)\n'))
336 return
338 return
337
339
338 with repo.lock():
340 with repo.lock():
339 fnc = repo.store.fncache
341 fnc = repo.store.fncache
340 # Trigger load of fncache.
342 # Trigger load of fncache.
341 if 'irrelevant' in fnc:
343 if 'irrelevant' in fnc:
342 pass
344 pass
343
345
344 oldentries = set(fnc.entries)
346 oldentries = set(fnc.entries)
345 newentries = set()
347 newentries = set()
346 seenfiles = set()
348 seenfiles = set()
347
349
348 repolen = len(repo)
350 repolen = len(repo)
349 for rev in repo:
351 for rev in repo:
350 ui.progress(_('rebuilding'), rev, total=repolen,
352 ui.progress(_('rebuilding'), rev, total=repolen,
351 unit=_('changesets'))
353 unit=_('changesets'))
352
354
353 ctx = repo[rev]
355 ctx = repo[rev]
354 for f in ctx.files():
356 for f in ctx.files():
355 # This is to minimize I/O.
357 # This is to minimize I/O.
356 if f in seenfiles:
358 if f in seenfiles:
357 continue
359 continue
358 seenfiles.add(f)
360 seenfiles.add(f)
359
361
360 i = 'data/%s.i' % f
362 i = 'data/%s.i' % f
361 d = 'data/%s.d' % f
363 d = 'data/%s.d' % f
362
364
363 if repo.store._exists(i):
365 if repo.store._exists(i):
364 newentries.add(i)
366 newentries.add(i)
365 if repo.store._exists(d):
367 if repo.store._exists(d):
366 newentries.add(d)
368 newentries.add(d)
367
369
368 ui.progress(_('rebuilding'), None)
370 ui.progress(_('rebuilding'), None)
369
371
370 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
372 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
371 for dir in util.dirs(seenfiles):
373 for dir in util.dirs(seenfiles):
372 i = 'meta/%s/00manifest.i' % dir
374 i = 'meta/%s/00manifest.i' % dir
373 d = 'meta/%s/00manifest.d' % dir
375 d = 'meta/%s/00manifest.d' % dir
374
376
375 if repo.store._exists(i):
377 if repo.store._exists(i):
376 newentries.add(i)
378 newentries.add(i)
377 if repo.store._exists(d):
379 if repo.store._exists(d):
378 newentries.add(d)
380 newentries.add(d)
379
381
380 addcount = len(newentries - oldentries)
382 addcount = len(newentries - oldentries)
381 removecount = len(oldentries - newentries)
383 removecount = len(oldentries - newentries)
382 for p in sorted(oldentries - newentries):
384 for p in sorted(oldentries - newentries):
383 ui.write(_('removing %s\n') % p)
385 ui.write(_('removing %s\n') % p)
384 for p in sorted(newentries - oldentries):
386 for p in sorted(newentries - oldentries):
385 ui.write(_('adding %s\n') % p)
387 ui.write(_('adding %s\n') % p)
386
388
387 if addcount or removecount:
389 if addcount or removecount:
388 ui.write(_('%d items added, %d removed from fncache\n') %
390 ui.write(_('%d items added, %d removed from fncache\n') %
389 (addcount, removecount))
391 (addcount, removecount))
390 fnc.entries = newentries
392 fnc.entries = newentries
391 fnc._dirty = True
393 fnc._dirty = True
392
394
393 with repo.transaction('fncache') as tr:
395 with repo.transaction('fncache') as tr:
394 fnc.write(tr)
396 fnc.write(tr)
395 else:
397 else:
396 ui.write(_('fncache already up to date\n'))
398 ui.write(_('fncache already up to date\n'))
397
399
398 def stripbmrevset(repo, mark):
400 def stripbmrevset(repo, mark):
399 """
401 """
400 The revset to strip when strip is called with -B mark
402 The revset to strip when strip is called with -B mark
401
403
402 Needs to live here so extensions can use it and wrap it even when strip is
404 Needs to live here so extensions can use it and wrap it even when strip is
403 not enabled or not present on a box.
405 not enabled or not present on a box.
404 """
406 """
405 return repo.revs("ancestors(bookmark(%s)) - "
407 return repo.revs("ancestors(bookmark(%s)) - "
406 "ancestors(head() and not bookmark(%s)) - "
408 "ancestors(head() and not bookmark(%s)) - "
407 "ancestors(bookmark() and not bookmark(%s))",
409 "ancestors(bookmark() and not bookmark(%s))",
408 mark, mark, mark)
410 mark, mark, mark)
409
411
410 def deleteobsmarkers(obsstore, indices):
412 def deleteobsmarkers(obsstore, indices):
411 """Delete some obsmarkers from obsstore and return how many were deleted
413 """Delete some obsmarkers from obsstore and return how many were deleted
412
414
413 'indices' is a list of ints which are the indices
415 'indices' is a list of ints which are the indices
414 of the markers to be deleted.
416 of the markers to be deleted.
415
417
416 Every invocation of this function completely rewrites the obsstore file,
418 Every invocation of this function completely rewrites the obsstore file,
417 skipping the markers we want to be removed. The new temporary file is
419 skipping the markers we want to be removed. The new temporary file is
418 created, remaining markers are written there and on .close() this file
420 created, remaining markers are written there and on .close() this file
419 gets atomically renamed to obsstore, thus guaranteeing consistency."""
421 gets atomically renamed to obsstore, thus guaranteeing consistency."""
420 if not indices:
422 if not indices:
421 # we don't want to rewrite the obsstore with the same content
423 # we don't want to rewrite the obsstore with the same content
422 return
424 return
423
425
424 left = []
426 left = []
425 current = obsstore._all
427 current = obsstore._all
426 n = 0
428 n = 0
427 for i, m in enumerate(current):
429 for i, m in enumerate(current):
428 if i in indices:
430 if i in indices:
429 n += 1
431 n += 1
430 continue
432 continue
431 left.append(m)
433 left.append(m)
432
434
433 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
435 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
434 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
436 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
435 newobsstorefile.write(bytes)
437 newobsstorefile.write(bytes)
436 newobsstorefile.close()
438 newobsstorefile.close()
437 return n
439 return n
General Comments 0
You need to be logged in to leave comments. Login now