##// END OF EJS Templates
repair: reliably obtain bytestr of node ids
Augie Fackler -
r34219:6193d810 default
parent child Browse files
Show More
@@ -1,439 +1,443 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import (
16 hex,
17 short,
18 )
16 from . import (
19 from . import (
17 bundle2,
20 bundle2,
18 changegroup,
21 changegroup,
19 discovery,
22 discovery,
20 error,
23 error,
21 exchange,
24 exchange,
22 obsolete,
25 obsolete,
23 obsutil,
26 obsutil,
24 util,
27 util,
25 )
28 )
26
29
27 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
30 def _bundle(repo, bases, heads, node, suffix, compress=True, obsolescence=True):
28 """create a bundle with the specified revisions as a backup"""
31 """create a bundle with the specified revisions as a backup"""
29
32
30 backupdir = "strip-backup"
33 backupdir = "strip-backup"
31 vfs = repo.vfs
34 vfs = repo.vfs
32 if not vfs.isdir(backupdir):
35 if not vfs.isdir(backupdir):
33 vfs.mkdir(backupdir)
36 vfs.mkdir(backupdir)
34
37
35 # Include a hash of all the nodes in the filename for uniqueness
38 # Include a hash of all the nodes in the filename for uniqueness
36 allcommits = repo.set('%ln::%ln', bases, heads)
39 allcommits = repo.set('%ln::%ln', bases, heads)
37 allhashes = sorted(c.hex() for c in allcommits)
40 allhashes = sorted(c.hex() for c in allcommits)
38 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
41 totalhash = hashlib.sha1(''.join(allhashes)).digest()
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
42 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
43 hex(totalhash[:4]), suffix)
40
44
41 cgversion = changegroup.safeversion(repo)
45 cgversion = changegroup.safeversion(repo)
42 comp = None
46 comp = None
43 if cgversion != '01':
47 if cgversion != '01':
44 bundletype = "HG20"
48 bundletype = "HG20"
45 if compress:
49 if compress:
46 comp = 'BZ'
50 comp = 'BZ'
47 elif compress:
51 elif compress:
48 bundletype = "HG10BZ"
52 bundletype = "HG10BZ"
49 else:
53 else:
50 bundletype = "HG10UN"
54 bundletype = "HG10UN"
51
55
52 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
56 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
53 contentopts = {
57 contentopts = {
54 'cg.version': cgversion,
58 'cg.version': cgversion,
55 'obsolescence': obsolescence,
59 'obsolescence': obsolescence,
56 'phases': True,
60 'phases': True,
57 }
61 }
58 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
62 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
59 outgoing, contentopts, vfs, compression=comp)
63 outgoing, contentopts, vfs, compression=comp)
60
64
61 def _collectfiles(repo, striprev):
65 def _collectfiles(repo, striprev):
62 """find out the filelogs affected by the strip"""
66 """find out the filelogs affected by the strip"""
63 files = set()
67 files = set()
64
68
65 for x in xrange(striprev, len(repo)):
69 for x in xrange(striprev, len(repo)):
66 files.update(repo[x].files())
70 files.update(repo[x].files())
67
71
68 return sorted(files)
72 return sorted(files)
69
73
70 def _collectrevlog(revlog, striprev):
74 def _collectrevlog(revlog, striprev):
71 _, brokenset = revlog.getstrippoint(striprev)
75 _, brokenset = revlog.getstrippoint(striprev)
72 return [revlog.linkrev(r) for r in brokenset]
76 return [revlog.linkrev(r) for r in brokenset]
73
77
74 def _collectmanifest(repo, striprev):
78 def _collectmanifest(repo, striprev):
75 return _collectrevlog(repo.manifestlog._revlog, striprev)
79 return _collectrevlog(repo.manifestlog._revlog, striprev)
76
80
77 def _collectbrokencsets(repo, files, striprev):
81 def _collectbrokencsets(repo, files, striprev):
78 """return the changesets which will be broken by the truncation"""
82 """return the changesets which will be broken by the truncation"""
79 s = set()
83 s = set()
80
84
81 s.update(_collectmanifest(repo, striprev))
85 s.update(_collectmanifest(repo, striprev))
82 for fname in files:
86 for fname in files:
83 s.update(_collectrevlog(repo.file(fname), striprev))
87 s.update(_collectrevlog(repo.file(fname), striprev))
84
88
85 return s
89 return s
86
90
87 def strip(ui, repo, nodelist, backup=True, topic='backup'):
91 def strip(ui, repo, nodelist, backup=True, topic='backup'):
88 # This function requires the caller to lock the repo, but it operates
92 # This function requires the caller to lock the repo, but it operates
89 # within a transaction of its own, and thus requires there to be no current
93 # within a transaction of its own, and thus requires there to be no current
90 # transaction when it is called.
94 # transaction when it is called.
91 if repo.currenttransaction() is not None:
95 if repo.currenttransaction() is not None:
92 raise error.ProgrammingError('cannot strip from inside a transaction')
96 raise error.ProgrammingError('cannot strip from inside a transaction')
93
97
94 # Simple way to maintain backwards compatibility for this
98 # Simple way to maintain backwards compatibility for this
95 # argument.
99 # argument.
96 if backup in ['none', 'strip']:
100 if backup in ['none', 'strip']:
97 backup = False
101 backup = False
98
102
99 repo = repo.unfiltered()
103 repo = repo.unfiltered()
100 repo.destroying()
104 repo.destroying()
101
105
102 cl = repo.changelog
106 cl = repo.changelog
103 # TODO handle undo of merge sets
107 # TODO handle undo of merge sets
104 if isinstance(nodelist, str):
108 if isinstance(nodelist, str):
105 nodelist = [nodelist]
109 nodelist = [nodelist]
106 striplist = [cl.rev(node) for node in nodelist]
110 striplist = [cl.rev(node) for node in nodelist]
107 striprev = min(striplist)
111 striprev = min(striplist)
108
112
109 files = _collectfiles(repo, striprev)
113 files = _collectfiles(repo, striprev)
110 saverevs = _collectbrokencsets(repo, files, striprev)
114 saverevs = _collectbrokencsets(repo, files, striprev)
111
115
112 # Some revisions with rev > striprev may not be descendants of striprev.
116 # Some revisions with rev > striprev may not be descendants of striprev.
113 # We have to find these revisions and put them in a bundle, so that
117 # We have to find these revisions and put them in a bundle, so that
114 # we can restore them after the truncations.
118 # we can restore them after the truncations.
115 # To create the bundle we use repo.changegroupsubset which requires
119 # To create the bundle we use repo.changegroupsubset which requires
116 # the list of heads and bases of the set of interesting revisions.
120 # the list of heads and bases of the set of interesting revisions.
117 # (head = revision in the set that has no descendant in the set;
121 # (head = revision in the set that has no descendant in the set;
118 # base = revision in the set that has no ancestor in the set)
122 # base = revision in the set that has no ancestor in the set)
119 tostrip = set(striplist)
123 tostrip = set(striplist)
120 saveheads = set(saverevs)
124 saveheads = set(saverevs)
121 for r in cl.revs(start=striprev + 1):
125 for r in cl.revs(start=striprev + 1):
122 if any(p in tostrip for p in cl.parentrevs(r)):
126 if any(p in tostrip for p in cl.parentrevs(r)):
123 tostrip.add(r)
127 tostrip.add(r)
124
128
125 if r not in tostrip:
129 if r not in tostrip:
126 saverevs.add(r)
130 saverevs.add(r)
127 saveheads.difference_update(cl.parentrevs(r))
131 saveheads.difference_update(cl.parentrevs(r))
128 saveheads.add(r)
132 saveheads.add(r)
129 saveheads = [cl.node(r) for r in saveheads]
133 saveheads = [cl.node(r) for r in saveheads]
130
134
131 # compute base nodes
135 # compute base nodes
132 if saverevs:
136 if saverevs:
133 descendants = set(cl.descendants(saverevs))
137 descendants = set(cl.descendants(saverevs))
134 saverevs.difference_update(descendants)
138 saverevs.difference_update(descendants)
135 savebases = [cl.node(r) for r in saverevs]
139 savebases = [cl.node(r) for r in saverevs]
136 stripbases = [cl.node(r) for r in tostrip]
140 stripbases = [cl.node(r) for r in tostrip]
137
141
138 stripobsidx = obsmarkers = ()
142 stripobsidx = obsmarkers = ()
139 if repo.ui.configbool('devel', 'strip-obsmarkers'):
143 if repo.ui.configbool('devel', 'strip-obsmarkers'):
140 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
144 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
141 if obsmarkers:
145 if obsmarkers:
142 stripobsidx = [i for i, m in enumerate(repo.obsstore)
146 stripobsidx = [i for i, m in enumerate(repo.obsstore)
143 if m in obsmarkers]
147 if m in obsmarkers]
144
148
145 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
149 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
146 # is much faster
150 # is much faster
147 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
151 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
148 if newbmtarget:
152 if newbmtarget:
149 newbmtarget = repo[newbmtarget.first()].node()
153 newbmtarget = repo[newbmtarget.first()].node()
150 else:
154 else:
151 newbmtarget = '.'
155 newbmtarget = '.'
152
156
153 bm = repo._bookmarks
157 bm = repo._bookmarks
154 updatebm = []
158 updatebm = []
155 for m in bm:
159 for m in bm:
156 rev = repo[bm[m]].rev()
160 rev = repo[bm[m]].rev()
157 if rev in tostrip:
161 if rev in tostrip:
158 updatebm.append(m)
162 updatebm.append(m)
159
163
160 # create a changegroup for all the branches we need to keep
164 # create a changegroup for all the branches we need to keep
161 backupfile = None
165 backupfile = None
162 vfs = repo.vfs
166 vfs = repo.vfs
163 node = nodelist[-1]
167 node = nodelist[-1]
164 if backup:
168 if backup:
165 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
169 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
166 repo.ui.status(_("saved backup bundle to %s\n") %
170 repo.ui.status(_("saved backup bundle to %s\n") %
167 vfs.join(backupfile))
171 vfs.join(backupfile))
168 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
172 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
169 vfs.join(backupfile))
173 vfs.join(backupfile))
170 tmpbundlefile = None
174 tmpbundlefile = None
171 if saveheads:
175 if saveheads:
172 # do not compress temporary bundle if we remove it from disk later
176 # do not compress temporary bundle if we remove it from disk later
173 #
177 #
174 # We do not include obsolescence, it might re-introduce prune markers
178 # We do not include obsolescence, it might re-introduce prune markers
175 # we are trying to strip. This is harmless since the stripped markers
179 # we are trying to strip. This is harmless since the stripped markers
176 # are already backed up and we did not touched the markers for the
180 # are already backed up and we did not touched the markers for the
177 # saved changesets.
181 # saved changesets.
178 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
182 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
179 compress=False, obsolescence=False)
183 compress=False, obsolescence=False)
180
184
181 try:
185 try:
182 with repo.transaction("strip") as tr:
186 with repo.transaction("strip") as tr:
183 offset = len(tr.entries)
187 offset = len(tr.entries)
184
188
185 tr.startgroup()
189 tr.startgroup()
186 cl.strip(striprev, tr)
190 cl.strip(striprev, tr)
187 stripmanifest(repo, striprev, tr, files)
191 stripmanifest(repo, striprev, tr, files)
188
192
189 for fn in files:
193 for fn in files:
190 repo.file(fn).strip(striprev, tr)
194 repo.file(fn).strip(striprev, tr)
191 tr.endgroup()
195 tr.endgroup()
192
196
193 for i in xrange(offset, len(tr.entries)):
197 for i in xrange(offset, len(tr.entries)):
194 file, troffset, ignore = tr.entries[i]
198 file, troffset, ignore = tr.entries[i]
195 with repo.svfs(file, 'a', checkambig=True) as fp:
199 with repo.svfs(file, 'a', checkambig=True) as fp:
196 fp.truncate(troffset)
200 fp.truncate(troffset)
197 if troffset == 0:
201 if troffset == 0:
198 repo.store.markremoved(file)
202 repo.store.markremoved(file)
199
203
200 deleteobsmarkers(repo.obsstore, stripobsidx)
204 deleteobsmarkers(repo.obsstore, stripobsidx)
201 del repo.obsstore
205 del repo.obsstore
202
206
203 repo._phasecache.filterunknown(repo)
207 repo._phasecache.filterunknown(repo)
204 if tmpbundlefile:
208 if tmpbundlefile:
205 ui.note(_("adding branch\n"))
209 ui.note(_("adding branch\n"))
206 f = vfs.open(tmpbundlefile, "rb")
210 f = vfs.open(tmpbundlefile, "rb")
207 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
211 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
208 if not repo.ui.verbose:
212 if not repo.ui.verbose:
209 # silence internal shuffling chatter
213 # silence internal shuffling chatter
210 repo.ui.pushbuffer()
214 repo.ui.pushbuffer()
211 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
215 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
212 txnname = 'strip'
216 txnname = 'strip'
213 if not isinstance(gen, bundle2.unbundle20):
217 if not isinstance(gen, bundle2.unbundle20):
214 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
218 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
215 with repo.transaction(txnname) as tr:
219 with repo.transaction(txnname) as tr:
216 bundle2.applybundle(repo, gen, tr, source='strip',
220 bundle2.applybundle(repo, gen, tr, source='strip',
217 url=tmpbundleurl)
221 url=tmpbundleurl)
218 if not repo.ui.verbose:
222 if not repo.ui.verbose:
219 repo.ui.popbuffer()
223 repo.ui.popbuffer()
220 f.close()
224 f.close()
221 repo._phasecache.invalidate()
225 repo._phasecache.invalidate()
222
226
223
227
224 with repo.transaction('repair') as tr:
228 with repo.transaction('repair') as tr:
225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
229 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
226 bm.applychanges(repo, tr, bmchanges)
230 bm.applychanges(repo, tr, bmchanges)
227
231
228 # remove undo files
232 # remove undo files
229 for undovfs, undofile in repo.undofiles():
233 for undovfs, undofile in repo.undofiles():
230 try:
234 try:
231 undovfs.unlink(undofile)
235 undovfs.unlink(undofile)
232 except OSError as e:
236 except OSError as e:
233 if e.errno != errno.ENOENT:
237 if e.errno != errno.ENOENT:
234 ui.warn(_('error removing %s: %s\n') %
238 ui.warn(_('error removing %s: %s\n') %
235 (undovfs.join(undofile), str(e)))
239 (undovfs.join(undofile), str(e)))
236
240
237 except: # re-raises
241 except: # re-raises
238 if backupfile:
242 if backupfile:
239 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
243 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
240 % vfs.join(backupfile))
244 % vfs.join(backupfile))
241 if tmpbundlefile:
245 if tmpbundlefile:
242 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
246 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
243 % vfs.join(tmpbundlefile))
247 % vfs.join(tmpbundlefile))
244 ui.warn(_("(fix the problem, then recover the changesets with "
248 ui.warn(_("(fix the problem, then recover the changesets with "
245 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
249 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
246 raise
250 raise
247 else:
251 else:
248 if tmpbundlefile:
252 if tmpbundlefile:
249 # Remove temporary bundle only if there were no exceptions
253 # Remove temporary bundle only if there were no exceptions
250 vfs.unlink(tmpbundlefile)
254 vfs.unlink(tmpbundlefile)
251
255
252 repo.destroyed()
256 repo.destroyed()
253 # return the backup file path (or None if 'backup' was False) so
257 # return the backup file path (or None if 'backup' was False) so
254 # extensions can use it
258 # extensions can use it
255 return backupfile
259 return backupfile
256
260
257 def safestriproots(ui, repo, nodes):
261 def safestriproots(ui, repo, nodes):
258 """return list of roots of nodes where descendants are covered by nodes"""
262 """return list of roots of nodes where descendants are covered by nodes"""
259 torev = repo.unfiltered().changelog.rev
263 torev = repo.unfiltered().changelog.rev
260 revs = set(torev(n) for n in nodes)
264 revs = set(torev(n) for n in nodes)
261 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
265 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
262 # orphaned = affected - wanted
266 # orphaned = affected - wanted
263 # affected = descendants(roots(wanted))
267 # affected = descendants(roots(wanted))
264 # wanted = revs
268 # wanted = revs
265 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
269 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
266 notstrip = revs - tostrip
270 notstrip = revs - tostrip
267 if notstrip:
271 if notstrip:
268 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
272 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
269 ui.warn(_('warning: orphaned descendants detected, '
273 ui.warn(_('warning: orphaned descendants detected, '
270 'not stripping %s\n') % nodestr)
274 'not stripping %s\n') % nodestr)
271 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
275 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
272
276
273 class stripcallback(object):
277 class stripcallback(object):
274 """used as a transaction postclose callback"""
278 """used as a transaction postclose callback"""
275
279
276 def __init__(self, ui, repo, backup, topic):
280 def __init__(self, ui, repo, backup, topic):
277 self.ui = ui
281 self.ui = ui
278 self.repo = repo
282 self.repo = repo
279 self.backup = backup
283 self.backup = backup
280 self.topic = topic or 'backup'
284 self.topic = topic or 'backup'
281 self.nodelist = []
285 self.nodelist = []
282
286
283 def addnodes(self, nodes):
287 def addnodes(self, nodes):
284 self.nodelist.extend(nodes)
288 self.nodelist.extend(nodes)
285
289
286 def __call__(self, tr):
290 def __call__(self, tr):
287 roots = safestriproots(self.ui, self.repo, self.nodelist)
291 roots = safestriproots(self.ui, self.repo, self.nodelist)
288 if roots:
292 if roots:
289 strip(self.ui, self.repo, roots, self.backup, self.topic)
293 strip(self.ui, self.repo, roots, self.backup, self.topic)
290
294
291 def delayedstrip(ui, repo, nodelist, topic=None):
295 def delayedstrip(ui, repo, nodelist, topic=None):
292 """like strip, but works inside transaction and won't strip irreverent revs
296 """like strip, but works inside transaction and won't strip irreverent revs
293
297
294 nodelist must explicitly contain all descendants. Otherwise a warning will
298 nodelist must explicitly contain all descendants. Otherwise a warning will
295 be printed that some nodes are not stripped.
299 be printed that some nodes are not stripped.
296
300
297 Always do a backup. The last non-None "topic" will be used as the backup
301 Always do a backup. The last non-None "topic" will be used as the backup
298 topic name. The default backup topic name is "backup".
302 topic name. The default backup topic name is "backup".
299 """
303 """
300 tr = repo.currenttransaction()
304 tr = repo.currenttransaction()
301 if not tr:
305 if not tr:
302 nodes = safestriproots(ui, repo, nodelist)
306 nodes = safestriproots(ui, repo, nodelist)
303 return strip(ui, repo, nodes, True, topic)
307 return strip(ui, repo, nodes, True, topic)
304 # transaction postclose callbacks are called in alphabet order.
308 # transaction postclose callbacks are called in alphabet order.
305 # use '\xff' as prefix so we are likely to be called last.
309 # use '\xff' as prefix so we are likely to be called last.
306 callback = tr.getpostclose('\xffstrip')
310 callback = tr.getpostclose('\xffstrip')
307 if callback is None:
311 if callback is None:
308 callback = stripcallback(ui, repo, True, topic)
312 callback = stripcallback(ui, repo, True, topic)
309 tr.addpostclose('\xffstrip', callback)
313 tr.addpostclose('\xffstrip', callback)
310 if topic:
314 if topic:
311 callback.topic = topic
315 callback.topic = topic
312 callback.addnodes(nodelist)
316 callback.addnodes(nodelist)
313
317
314 def stripmanifest(repo, striprev, tr, files):
318 def stripmanifest(repo, striprev, tr, files):
315 revlog = repo.manifestlog._revlog
319 revlog = repo.manifestlog._revlog
316 revlog.strip(striprev, tr)
320 revlog.strip(striprev, tr)
317 striptrees(repo, tr, striprev, files)
321 striptrees(repo, tr, striprev, files)
318
322
319 def striptrees(repo, tr, striprev, files):
323 def striptrees(repo, tr, striprev, files):
320 if 'treemanifest' in repo.requirements: # safe but unnecessary
324 if 'treemanifest' in repo.requirements: # safe but unnecessary
321 # otherwise
325 # otherwise
322 for unencoded, encoded, size in repo.store.datafiles():
326 for unencoded, encoded, size in repo.store.datafiles():
323 if (unencoded.startswith('meta/') and
327 if (unencoded.startswith('meta/') and
324 unencoded.endswith('00manifest.i')):
328 unencoded.endswith('00manifest.i')):
325 dir = unencoded[5:-12]
329 dir = unencoded[5:-12]
326 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
330 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
327
331
328 def rebuildfncache(ui, repo):
332 def rebuildfncache(ui, repo):
329 """Rebuilds the fncache file from repo history.
333 """Rebuilds the fncache file from repo history.
330
334
331 Missing entries will be added. Extra entries will be removed.
335 Missing entries will be added. Extra entries will be removed.
332 """
336 """
333 repo = repo.unfiltered()
337 repo = repo.unfiltered()
334
338
335 if 'fncache' not in repo.requirements:
339 if 'fncache' not in repo.requirements:
336 ui.warn(_('(not rebuilding fncache because repository does not '
340 ui.warn(_('(not rebuilding fncache because repository does not '
337 'support fncache)\n'))
341 'support fncache)\n'))
338 return
342 return
339
343
340 with repo.lock():
344 with repo.lock():
341 fnc = repo.store.fncache
345 fnc = repo.store.fncache
342 # Trigger load of fncache.
346 # Trigger load of fncache.
343 if 'irrelevant' in fnc:
347 if 'irrelevant' in fnc:
344 pass
348 pass
345
349
346 oldentries = set(fnc.entries)
350 oldentries = set(fnc.entries)
347 newentries = set()
351 newentries = set()
348 seenfiles = set()
352 seenfiles = set()
349
353
350 repolen = len(repo)
354 repolen = len(repo)
351 for rev in repo:
355 for rev in repo:
352 ui.progress(_('rebuilding'), rev, total=repolen,
356 ui.progress(_('rebuilding'), rev, total=repolen,
353 unit=_('changesets'))
357 unit=_('changesets'))
354
358
355 ctx = repo[rev]
359 ctx = repo[rev]
356 for f in ctx.files():
360 for f in ctx.files():
357 # This is to minimize I/O.
361 # This is to minimize I/O.
358 if f in seenfiles:
362 if f in seenfiles:
359 continue
363 continue
360 seenfiles.add(f)
364 seenfiles.add(f)
361
365
362 i = 'data/%s.i' % f
366 i = 'data/%s.i' % f
363 d = 'data/%s.d' % f
367 d = 'data/%s.d' % f
364
368
365 if repo.store._exists(i):
369 if repo.store._exists(i):
366 newentries.add(i)
370 newentries.add(i)
367 if repo.store._exists(d):
371 if repo.store._exists(d):
368 newentries.add(d)
372 newentries.add(d)
369
373
370 ui.progress(_('rebuilding'), None)
374 ui.progress(_('rebuilding'), None)
371
375
372 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
376 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
373 for dir in util.dirs(seenfiles):
377 for dir in util.dirs(seenfiles):
374 i = 'meta/%s/00manifest.i' % dir
378 i = 'meta/%s/00manifest.i' % dir
375 d = 'meta/%s/00manifest.d' % dir
379 d = 'meta/%s/00manifest.d' % dir
376
380
377 if repo.store._exists(i):
381 if repo.store._exists(i):
378 newentries.add(i)
382 newentries.add(i)
379 if repo.store._exists(d):
383 if repo.store._exists(d):
380 newentries.add(d)
384 newentries.add(d)
381
385
382 addcount = len(newentries - oldentries)
386 addcount = len(newentries - oldentries)
383 removecount = len(oldentries - newentries)
387 removecount = len(oldentries - newentries)
384 for p in sorted(oldentries - newentries):
388 for p in sorted(oldentries - newentries):
385 ui.write(_('removing %s\n') % p)
389 ui.write(_('removing %s\n') % p)
386 for p in sorted(newentries - oldentries):
390 for p in sorted(newentries - oldentries):
387 ui.write(_('adding %s\n') % p)
391 ui.write(_('adding %s\n') % p)
388
392
389 if addcount or removecount:
393 if addcount or removecount:
390 ui.write(_('%d items added, %d removed from fncache\n') %
394 ui.write(_('%d items added, %d removed from fncache\n') %
391 (addcount, removecount))
395 (addcount, removecount))
392 fnc.entries = newentries
396 fnc.entries = newentries
393 fnc._dirty = True
397 fnc._dirty = True
394
398
395 with repo.transaction('fncache') as tr:
399 with repo.transaction('fncache') as tr:
396 fnc.write(tr)
400 fnc.write(tr)
397 else:
401 else:
398 ui.write(_('fncache already up to date\n'))
402 ui.write(_('fncache already up to date\n'))
399
403
400 def stripbmrevset(repo, mark):
404 def stripbmrevset(repo, mark):
401 """
405 """
402 The revset to strip when strip is called with -B mark
406 The revset to strip when strip is called with -B mark
403
407
404 Needs to live here so extensions can use it and wrap it even when strip is
408 Needs to live here so extensions can use it and wrap it even when strip is
405 not enabled or not present on a box.
409 not enabled or not present on a box.
406 """
410 """
407 return repo.revs("ancestors(bookmark(%s)) - "
411 return repo.revs("ancestors(bookmark(%s)) - "
408 "ancestors(head() and not bookmark(%s)) - "
412 "ancestors(head() and not bookmark(%s)) - "
409 "ancestors(bookmark() and not bookmark(%s))",
413 "ancestors(bookmark() and not bookmark(%s))",
410 mark, mark, mark)
414 mark, mark, mark)
411
415
412 def deleteobsmarkers(obsstore, indices):
416 def deleteobsmarkers(obsstore, indices):
413 """Delete some obsmarkers from obsstore and return how many were deleted
417 """Delete some obsmarkers from obsstore and return how many were deleted
414
418
415 'indices' is a list of ints which are the indices
419 'indices' is a list of ints which are the indices
416 of the markers to be deleted.
420 of the markers to be deleted.
417
421
418 Every invocation of this function completely rewrites the obsstore file,
422 Every invocation of this function completely rewrites the obsstore file,
419 skipping the markers we want to be removed. The new temporary file is
423 skipping the markers we want to be removed. The new temporary file is
420 created, remaining markers are written there and on .close() this file
424 created, remaining markers are written there and on .close() this file
421 gets atomically renamed to obsstore, thus guaranteeing consistency."""
425 gets atomically renamed to obsstore, thus guaranteeing consistency."""
422 if not indices:
426 if not indices:
423 # we don't want to rewrite the obsstore with the same content
427 # we don't want to rewrite the obsstore with the same content
424 return
428 return
425
429
426 left = []
430 left = []
427 current = obsstore._all
431 current = obsstore._all
428 n = 0
432 n = 0
429 for i, m in enumerate(current):
433 for i, m in enumerate(current):
430 if i in indices:
434 if i in indices:
431 n += 1
435 n += 1
432 continue
436 continue
433 left.append(m)
437 left.append(m)
434
438
435 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
439 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
436 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
440 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
437 newobsstorefile.write(bytes)
441 newobsstorefile.write(bytes)
438 newobsstorefile.close()
442 newobsstorefile.close()
439 return n
443 return n
General Comments 0
You need to be logged in to leave comments. Login now