##// END OF EJS Templates
strip: don't allow empty changegroup in bundle1...
Martin von Zweigbergk -
r33307:665271d6 default
parent child Browse files
Show More
@@ -1,434 +1,434
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 discovery,
19 discovery,
20 error,
20 error,
21 exchange,
21 exchange,
22 obsolete,
22 obsolete,
23 obsutil,
23 obsutil,
24 util,
24 util,
25 )
25 )
26
26
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
28 """create a bundle with the specified revisions as a backup"""
28 """create a bundle with the specified revisions as a backup"""
29
29
30 backupdir = "strip-backup"
30 backupdir = "strip-backup"
31 vfs = repo.vfs
31 vfs = repo.vfs
32 if not vfs.isdir(backupdir):
32 if not vfs.isdir(backupdir):
33 vfs.mkdir(backupdir)
33 vfs.mkdir(backupdir)
34
34
35 # Include a hash of all the nodes in the filename for uniqueness
35 # Include a hash of all the nodes in the filename for uniqueness
36 allcommits = repo.set('%ln::%ln', bases, heads)
36 allcommits = repo.set('%ln::%ln', bases, heads)
37 allhashes = sorted(c.hex() for c in allcommits)
37 allhashes = sorted(c.hex() for c in allcommits)
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40
40
41 cgversion = changegroup.safeversion(repo)
41 cgversion = changegroup.safeversion(repo)
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51
51
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
53 contentopts = {
53 contentopts = {
54 'cg.version': cgversion,
54 'cg.version': cgversion,
55 'obsolescence': obsolescence,
55 'obsolescence': obsolescence,
56 'phases': True,
56 'phases': True,
57 }
57 }
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
59 outgoing, contentopts, vfs, compression=comp)
59 outgoing, contentopts, vfs, compression=comp)
60
60
61 def _collectfiles(repo, striprev):
61 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
62 """find out the filelogs affected by the strip"""
63 files = set()
63 files = set()
64
64
65 for x in xrange(striprev, len(repo)):
65 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
66 files.update(repo[x].files())
67
67
68 return sorted(files)
68 return sorted(files)
69
69
70 def _collectbrokencsets(repo, files, striprev):
70 def _collectbrokencsets(repo, files, striprev):
71 """return the changesets which will be broken by the truncation"""
71 """return the changesets which will be broken by the truncation"""
72 s = set()
72 s = set()
73 def collectone(revlog):
73 def collectone(revlog):
74 _, brokenset = revlog.getstrippoint(striprev)
74 _, brokenset = revlog.getstrippoint(striprev)
75 s.update([revlog.linkrev(r) for r in brokenset])
75 s.update([revlog.linkrev(r) for r in brokenset])
76
76
77 collectone(repo.manifestlog._revlog)
77 collectone(repo.manifestlog._revlog)
78 for fname in files:
78 for fname in files:
79 collectone(repo.file(fname))
79 collectone(repo.file(fname))
80
80
81 return s
81 return s
82
82
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
83 def strip(ui, repo, nodelist, backup=True, topic='backup'):
84 # This function requires the caller to lock the repo, but it operates
84 # This function requires the caller to lock the repo, but it operates
85 # within a transaction of its own, and thus requires there to be no current
85 # within a transaction of its own, and thus requires there to be no current
86 # transaction when it is called.
86 # transaction when it is called.
87 if repo.currenttransaction() is not None:
87 if repo.currenttransaction() is not None:
88 raise error.ProgrammingError('cannot strip from inside a transaction')
88 raise error.ProgrammingError('cannot strip from inside a transaction')
89
89
90 # Simple way to maintain backwards compatibility for this
90 # Simple way to maintain backwards compatibility for this
91 # argument.
91 # argument.
92 if backup in ['none', 'strip']:
92 if backup in ['none', 'strip']:
93 backup = False
93 backup = False
94
94
95 repo = repo.unfiltered()
95 repo = repo.unfiltered()
96 repo.destroying()
96 repo.destroying()
97
97
98 cl = repo.changelog
98 cl = repo.changelog
99 # TODO handle undo of merge sets
99 # TODO handle undo of merge sets
100 if isinstance(nodelist, str):
100 if isinstance(nodelist, str):
101 nodelist = [nodelist]
101 nodelist = [nodelist]
102 striplist = [cl.rev(node) for node in nodelist]
102 striplist = [cl.rev(node) for node in nodelist]
103 striprev = min(striplist)
103 striprev = min(striplist)
104
104
105 files = _collectfiles(repo, striprev)
105 files = _collectfiles(repo, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
107
107
108 # Some revisions with rev > striprev may not be descendants of striprev.
108 # Some revisions with rev > striprev may not be descendants of striprev.
109 # We have to find these revisions and put them in a bundle, so that
109 # We have to find these revisions and put them in a bundle, so that
110 # we can restore them after the truncations.
110 # we can restore them after the truncations.
111 # To create the bundle we use repo.changegroupsubset which requires
111 # To create the bundle we use repo.changegroupsubset which requires
112 # the list of heads and bases of the set of interesting revisions.
112 # the list of heads and bases of the set of interesting revisions.
113 # (head = revision in the set that has no descendant in the set;
113 # (head = revision in the set that has no descendant in the set;
114 # base = revision in the set that has no ancestor in the set)
114 # base = revision in the set that has no ancestor in the set)
115 tostrip = set(striplist)
115 tostrip = set(striplist)
116 saveheads = set(saverevs)
116 saveheads = set(saverevs)
117 for r in cl.revs(start=striprev + 1):
117 for r in cl.revs(start=striprev + 1):
118 if any(p in tostrip for p in cl.parentrevs(r)):
118 if any(p in tostrip for p in cl.parentrevs(r)):
119 tostrip.add(r)
119 tostrip.add(r)
120
120
121 if r not in tostrip:
121 if r not in tostrip:
122 saverevs.add(r)
122 saverevs.add(r)
123 saveheads.difference_update(cl.parentrevs(r))
123 saveheads.difference_update(cl.parentrevs(r))
124 saveheads.add(r)
124 saveheads.add(r)
125 saveheads = [cl.node(r) for r in saveheads]
125 saveheads = [cl.node(r) for r in saveheads]
126
126
127 # compute base nodes
127 # compute base nodes
128 if saverevs:
128 if saverevs:
129 descendants = set(cl.descendants(saverevs))
129 descendants = set(cl.descendants(saverevs))
130 saverevs.difference_update(descendants)
130 saverevs.difference_update(descendants)
131 savebases = [cl.node(r) for r in saverevs]
131 savebases = [cl.node(r) for r in saverevs]
132 stripbases = [cl.node(r) for r in tostrip]
132 stripbases = [cl.node(r) for r in tostrip]
133
133
134 stripobsidx = obsmarkers = ()
134 stripobsidx = obsmarkers = ()
135 if repo.ui.configbool('devel', 'strip-obsmarkers'):
135 if repo.ui.configbool('devel', 'strip-obsmarkers'):
136 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
136 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
137 if obsmarkers:
137 if obsmarkers:
138 stripobsidx = [i for i, m in enumerate(repo.obsstore)
138 stripobsidx = [i for i, m in enumerate(repo.obsstore)
139 if m in obsmarkers]
139 if m in obsmarkers]
140
140
141 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
141 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
142 # is much faster
142 # is much faster
143 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
143 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
144 if newbmtarget:
144 if newbmtarget:
145 newbmtarget = repo[newbmtarget.first()].node()
145 newbmtarget = repo[newbmtarget.first()].node()
146 else:
146 else:
147 newbmtarget = '.'
147 newbmtarget = '.'
148
148
149 bm = repo._bookmarks
149 bm = repo._bookmarks
150 updatebm = []
150 updatebm = []
151 for m in bm:
151 for m in bm:
152 rev = repo[bm[m]].rev()
152 rev = repo[bm[m]].rev()
153 if rev in tostrip:
153 if rev in tostrip:
154 updatebm.append(m)
154 updatebm.append(m)
155
155
156 # create a changegroup for all the branches we need to keep
156 # create a changegroup for all the branches we need to keep
157 backupfile = None
157 backupfile = None
158 vfs = repo.vfs
158 vfs = repo.vfs
159 node = nodelist[-1]
159 node = nodelist[-1]
160 if backup:
160 if backup:
161 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
161 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
162 repo.ui.status(_("saved backup bundle to %s\n") %
162 repo.ui.status(_("saved backup bundle to %s\n") %
163 vfs.join(backupfile))
163 vfs.join(backupfile))
164 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
164 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
165 vfs.join(backupfile))
165 vfs.join(backupfile))
166 tmpbundlefile = None
166 tmpbundlefile = None
167 if saveheads:
167 if saveheads:
168 # do not compress temporary bundle if we remove it from disk later
168 # do not compress temporary bundle if we remove it from disk later
169 #
169 #
170 # We do not include obsolescence, it might re-introduce prune markers
170 # We do not include obsolescence, it might re-introduce prune markers
171 # we are trying to strip. This is harmless since the stripped markers
171 # we are trying to strip. This is harmless since the stripped markers
172 # are already backed up and we did not touched the markers for the
172 # are already backed up and we did not touched the markers for the
173 # saved changesets.
173 # saved changesets.
174 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
174 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
175 compress=False, obsolescence=False)
175 compress=False, obsolescence=False)
176
176
177 mfst = repo.manifestlog._revlog
177 mfst = repo.manifestlog._revlog
178
178
179 try:
179 try:
180 with repo.transaction("strip") as tr:
180 with repo.transaction("strip") as tr:
181 offset = len(tr.entries)
181 offset = len(tr.entries)
182
182
183 tr.startgroup()
183 tr.startgroup()
184 cl.strip(striprev, tr)
184 cl.strip(striprev, tr)
185 mfst.strip(striprev, tr)
185 mfst.strip(striprev, tr)
186 striptrees(repo, tr, striprev, files)
186 striptrees(repo, tr, striprev, files)
187
187
188 for fn in files:
188 for fn in files:
189 repo.file(fn).strip(striprev, tr)
189 repo.file(fn).strip(striprev, tr)
190 tr.endgroup()
190 tr.endgroup()
191
191
192 for i in xrange(offset, len(tr.entries)):
192 for i in xrange(offset, len(tr.entries)):
193 file, troffset, ignore = tr.entries[i]
193 file, troffset, ignore = tr.entries[i]
194 with repo.svfs(file, 'a', checkambig=True) as fp:
194 with repo.svfs(file, 'a', checkambig=True) as fp:
195 fp.truncate(troffset)
195 fp.truncate(troffset)
196 if troffset == 0:
196 if troffset == 0:
197 repo.store.markremoved(file)
197 repo.store.markremoved(file)
198
198
199 deleteobsmarkers(repo.obsstore, stripobsidx)
199 deleteobsmarkers(repo.obsstore, stripobsidx)
200 del repo.obsstore
200 del repo.obsstore
201
201
202 repo._phasecache.filterunknown(repo)
202 repo._phasecache.filterunknown(repo)
203 if tmpbundlefile:
203 if tmpbundlefile:
204 ui.note(_("adding branch\n"))
204 ui.note(_("adding branch\n"))
205 f = vfs.open(tmpbundlefile, "rb")
205 f = vfs.open(tmpbundlefile, "rb")
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 if not repo.ui.verbose:
207 if not repo.ui.verbose:
208 # silence internal shuffling chatter
208 # silence internal shuffling chatter
209 repo.ui.pushbuffer()
209 repo.ui.pushbuffer()
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 txnname = 'strip'
211 txnname = 'strip'
212 if not isinstance(gen, bundle2.unbundle20):
212 if not isinstance(gen, bundle2.unbundle20):
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 with repo.transaction(txnname) as tr:
214 with repo.transaction(txnname) as tr:
215 bundle2.applybundle(repo, gen, tr, source='strip',
215 bundle2.applybundle(repo, gen, tr, source='strip',
216 url=tmpbundleurl, emptyok=True)
216 url=tmpbundleurl)
217 if not repo.ui.verbose:
217 if not repo.ui.verbose:
218 repo.ui.popbuffer()
218 repo.ui.popbuffer()
219 f.close()
219 f.close()
220 repo._phasecache.invalidate()
220 repo._phasecache.invalidate()
221
221
222 for m in updatebm:
222 for m in updatebm:
223 bm[m] = repo[newbmtarget].node()
223 bm[m] = repo[newbmtarget].node()
224
224
225 with repo.transaction('repair') as tr:
225 with repo.transaction('repair') as tr:
226 bm.recordchange(tr)
226 bm.recordchange(tr)
227
227
228 # remove undo files
228 # remove undo files
229 for undovfs, undofile in repo.undofiles():
229 for undovfs, undofile in repo.undofiles():
230 try:
230 try:
231 undovfs.unlink(undofile)
231 undovfs.unlink(undofile)
232 except OSError as e:
232 except OSError as e:
233 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
234 ui.warn(_('error removing %s: %s\n') %
234 ui.warn(_('error removing %s: %s\n') %
235 (undovfs.join(undofile), str(e)))
235 (undovfs.join(undofile), str(e)))
236
236
237 except: # re-raises
237 except: # re-raises
238 if backupfile:
238 if backupfile:
239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
240 % vfs.join(backupfile))
240 % vfs.join(backupfile))
241 if tmpbundlefile:
241 if tmpbundlefile:
242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
243 % vfs.join(tmpbundlefile))
243 % vfs.join(tmpbundlefile))
244 ui.warn(_("(fix the problem, then recover the changesets with "
244 ui.warn(_("(fix the problem, then recover the changesets with "
245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
246 raise
246 raise
247 else:
247 else:
248 if tmpbundlefile:
248 if tmpbundlefile:
249 # Remove temporary bundle only if there were no exceptions
249 # Remove temporary bundle only if there were no exceptions
250 vfs.unlink(tmpbundlefile)
250 vfs.unlink(tmpbundlefile)
251
251
252 repo.destroyed()
252 repo.destroyed()
253 # return the backup file path (or None if 'backup' was False) so
253 # return the backup file path (or None if 'backup' was False) so
254 # extensions can use it
254 # extensions can use it
255 return backupfile
255 return backupfile
256
256
257 def safestriproots(ui, repo, nodes):
257 def safestriproots(ui, repo, nodes):
258 """return list of roots of nodes where descendants are covered by nodes"""
258 """return list of roots of nodes where descendants are covered by nodes"""
259 torev = repo.unfiltered().changelog.rev
259 torev = repo.unfiltered().changelog.rev
260 revs = set(torev(n) for n in nodes)
260 revs = set(torev(n) for n in nodes)
261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
262 # orphaned = affected - wanted
262 # orphaned = affected - wanted
263 # affected = descendants(roots(wanted))
263 # affected = descendants(roots(wanted))
264 # wanted = revs
264 # wanted = revs
265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
266 notstrip = revs - tostrip
266 notstrip = revs - tostrip
267 if notstrip:
267 if notstrip:
268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
269 ui.warn(_('warning: orphaned descendants detected, '
269 ui.warn(_('warning: orphaned descendants detected, '
270 'not stripping %s\n') % nodestr)
270 'not stripping %s\n') % nodestr)
271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
272
272
273 class stripcallback(object):
273 class stripcallback(object):
274 """used as a transaction postclose callback"""
274 """used as a transaction postclose callback"""
275
275
276 def __init__(self, ui, repo, backup, topic):
276 def __init__(self, ui, repo, backup, topic):
277 self.ui = ui
277 self.ui = ui
278 self.repo = repo
278 self.repo = repo
279 self.backup = backup
279 self.backup = backup
280 self.topic = topic or 'backup'
280 self.topic = topic or 'backup'
281 self.nodelist = []
281 self.nodelist = []
282
282
283 def addnodes(self, nodes):
283 def addnodes(self, nodes):
284 self.nodelist.extend(nodes)
284 self.nodelist.extend(nodes)
285
285
286 def __call__(self, tr):
286 def __call__(self, tr):
287 roots = safestriproots(self.ui, self.repo, self.nodelist)
287 roots = safestriproots(self.ui, self.repo, self.nodelist)
288 if roots:
288 if roots:
289 strip(self.ui, self.repo, roots, self.backup, self.topic)
289 strip(self.ui, self.repo, roots, self.backup, self.topic)
290
290
291 def delayedstrip(ui, repo, nodelist, topic=None):
291 def delayedstrip(ui, repo, nodelist, topic=None):
292 """like strip, but works inside transaction and won't strip irreverent revs
292 """like strip, but works inside transaction and won't strip irreverent revs
293
293
294 nodelist must explicitly contain all descendants. Otherwise a warning will
294 nodelist must explicitly contain all descendants. Otherwise a warning will
295 be printed that some nodes are not stripped.
295 be printed that some nodes are not stripped.
296
296
297 Always do a backup. The last non-None "topic" will be used as the backup
297 Always do a backup. The last non-None "topic" will be used as the backup
298 topic name. The default backup topic name is "backup".
298 topic name. The default backup topic name is "backup".
299 """
299 """
300 tr = repo.currenttransaction()
300 tr = repo.currenttransaction()
301 if not tr:
301 if not tr:
302 nodes = safestriproots(ui, repo, nodelist)
302 nodes = safestriproots(ui, repo, nodelist)
303 return strip(ui, repo, nodes, True, topic)
303 return strip(ui, repo, nodes, True, topic)
304 # transaction postclose callbacks are called in alphabet order.
304 # transaction postclose callbacks are called in alphabet order.
305 # use '\xff' as prefix so we are likely to be called last.
305 # use '\xff' as prefix so we are likely to be called last.
306 callback = tr.getpostclose('\xffstrip')
306 callback = tr.getpostclose('\xffstrip')
307 if callback is None:
307 if callback is None:
308 callback = stripcallback(ui, repo, True, topic)
308 callback = stripcallback(ui, repo, True, topic)
309 tr.addpostclose('\xffstrip', callback)
309 tr.addpostclose('\xffstrip', callback)
310 if topic:
310 if topic:
311 callback.topic = topic
311 callback.topic = topic
312 callback.addnodes(nodelist)
312 callback.addnodes(nodelist)
313
313
314 def striptrees(repo, tr, striprev, files):
314 def striptrees(repo, tr, striprev, files):
315 if 'treemanifest' in repo.requirements: # safe but unnecessary
315 if 'treemanifest' in repo.requirements: # safe but unnecessary
316 # otherwise
316 # otherwise
317 for unencoded, encoded, size in repo.store.datafiles():
317 for unencoded, encoded, size in repo.store.datafiles():
318 if (unencoded.startswith('meta/') and
318 if (unencoded.startswith('meta/') and
319 unencoded.endswith('00manifest.i')):
319 unencoded.endswith('00manifest.i')):
320 dir = unencoded[5:-12]
320 dir = unencoded[5:-12]
321 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
321 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
322
322
323 def rebuildfncache(ui, repo):
323 def rebuildfncache(ui, repo):
324 """Rebuilds the fncache file from repo history.
324 """Rebuilds the fncache file from repo history.
325
325
326 Missing entries will be added. Extra entries will be removed.
326 Missing entries will be added. Extra entries will be removed.
327 """
327 """
328 repo = repo.unfiltered()
328 repo = repo.unfiltered()
329
329
330 if 'fncache' not in repo.requirements:
330 if 'fncache' not in repo.requirements:
331 ui.warn(_('(not rebuilding fncache because repository does not '
331 ui.warn(_('(not rebuilding fncache because repository does not '
332 'support fncache)\n'))
332 'support fncache)\n'))
333 return
333 return
334
334
335 with repo.lock():
335 with repo.lock():
336 fnc = repo.store.fncache
336 fnc = repo.store.fncache
337 # Trigger load of fncache.
337 # Trigger load of fncache.
338 if 'irrelevant' in fnc:
338 if 'irrelevant' in fnc:
339 pass
339 pass
340
340
341 oldentries = set(fnc.entries)
341 oldentries = set(fnc.entries)
342 newentries = set()
342 newentries = set()
343 seenfiles = set()
343 seenfiles = set()
344
344
345 repolen = len(repo)
345 repolen = len(repo)
346 for rev in repo:
346 for rev in repo:
347 ui.progress(_('rebuilding'), rev, total=repolen,
347 ui.progress(_('rebuilding'), rev, total=repolen,
348 unit=_('changesets'))
348 unit=_('changesets'))
349
349
350 ctx = repo[rev]
350 ctx = repo[rev]
351 for f in ctx.files():
351 for f in ctx.files():
352 # This is to minimize I/O.
352 # This is to minimize I/O.
353 if f in seenfiles:
353 if f in seenfiles:
354 continue
354 continue
355 seenfiles.add(f)
355 seenfiles.add(f)
356
356
357 i = 'data/%s.i' % f
357 i = 'data/%s.i' % f
358 d = 'data/%s.d' % f
358 d = 'data/%s.d' % f
359
359
360 if repo.store._exists(i):
360 if repo.store._exists(i):
361 newentries.add(i)
361 newentries.add(i)
362 if repo.store._exists(d):
362 if repo.store._exists(d):
363 newentries.add(d)
363 newentries.add(d)
364
364
365 ui.progress(_('rebuilding'), None)
365 ui.progress(_('rebuilding'), None)
366
366
367 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
367 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
368 for dir in util.dirs(seenfiles):
368 for dir in util.dirs(seenfiles):
369 i = 'meta/%s/00manifest.i' % dir
369 i = 'meta/%s/00manifest.i' % dir
370 d = 'meta/%s/00manifest.d' % dir
370 d = 'meta/%s/00manifest.d' % dir
371
371
372 if repo.store._exists(i):
372 if repo.store._exists(i):
373 newentries.add(i)
373 newentries.add(i)
374 if repo.store._exists(d):
374 if repo.store._exists(d):
375 newentries.add(d)
375 newentries.add(d)
376
376
377 addcount = len(newentries - oldentries)
377 addcount = len(newentries - oldentries)
378 removecount = len(oldentries - newentries)
378 removecount = len(oldentries - newentries)
379 for p in sorted(oldentries - newentries):
379 for p in sorted(oldentries - newentries):
380 ui.write(_('removing %s\n') % p)
380 ui.write(_('removing %s\n') % p)
381 for p in sorted(newentries - oldentries):
381 for p in sorted(newentries - oldentries):
382 ui.write(_('adding %s\n') % p)
382 ui.write(_('adding %s\n') % p)
383
383
384 if addcount or removecount:
384 if addcount or removecount:
385 ui.write(_('%d items added, %d removed from fncache\n') %
385 ui.write(_('%d items added, %d removed from fncache\n') %
386 (addcount, removecount))
386 (addcount, removecount))
387 fnc.entries = newentries
387 fnc.entries = newentries
388 fnc._dirty = True
388 fnc._dirty = True
389
389
390 with repo.transaction('fncache') as tr:
390 with repo.transaction('fncache') as tr:
391 fnc.write(tr)
391 fnc.write(tr)
392 else:
392 else:
393 ui.write(_('fncache already up to date\n'))
393 ui.write(_('fncache already up to date\n'))
394
394
395 def stripbmrevset(repo, mark):
395 def stripbmrevset(repo, mark):
396 """
396 """
397 The revset to strip when strip is called with -B mark
397 The revset to strip when strip is called with -B mark
398
398
399 Needs to live here so extensions can use it and wrap it even when strip is
399 Needs to live here so extensions can use it and wrap it even when strip is
400 not enabled or not present on a box.
400 not enabled or not present on a box.
401 """
401 """
402 return repo.revs("ancestors(bookmark(%s)) - "
402 return repo.revs("ancestors(bookmark(%s)) - "
403 "ancestors(head() and not bookmark(%s)) - "
403 "ancestors(head() and not bookmark(%s)) - "
404 "ancestors(bookmark() and not bookmark(%s))",
404 "ancestors(bookmark() and not bookmark(%s))",
405 mark, mark, mark)
405 mark, mark, mark)
406
406
407 def deleteobsmarkers(obsstore, indices):
407 def deleteobsmarkers(obsstore, indices):
408 """Delete some obsmarkers from obsstore and return how many were deleted
408 """Delete some obsmarkers from obsstore and return how many were deleted
409
409
410 'indices' is a list of ints which are the indices
410 'indices' is a list of ints which are the indices
411 of the markers to be deleted.
411 of the markers to be deleted.
412
412
413 Every invocation of this function completely rewrites the obsstore file,
413 Every invocation of this function completely rewrites the obsstore file,
414 skipping the markers we want to be removed. The new temporary file is
414 skipping the markers we want to be removed. The new temporary file is
415 created, remaining markers are written there and on .close() this file
415 created, remaining markers are written there and on .close() this file
416 gets atomically renamed to obsstore, thus guaranteeing consistency."""
416 gets atomically renamed to obsstore, thus guaranteeing consistency."""
417 if not indices:
417 if not indices:
418 # we don't want to rewrite the obsstore with the same content
418 # we don't want to rewrite the obsstore with the same content
419 return
419 return
420
420
421 left = []
421 left = []
422 current = obsstore._all
422 current = obsstore._all
423 n = 0
423 n = 0
424 for i, m in enumerate(current):
424 for i, m in enumerate(current):
425 if i in indices:
425 if i in indices:
426 n += 1
426 n += 1
427 continue
427 continue
428 left.append(m)
428 left.append(m)
429
429
430 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
430 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
431 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
431 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
432 newobsstorefile.write(bytes)
432 newobsstorefile.write(bytes)
433 newobsstorefile.close()
433 newobsstorefile.close()
434 return n
434 return n
General Comments 0
You need to be logged in to leave comments. Login now