##// END OF EJS Templates
strip: move attributes shortcut assigned earlier...
Boris Feld -
r41131:cfd95219 default
parent child Browse files
Show More
@@ -1,443 +1,443 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 obsolescence=True):
36 obsolescence=True):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = "strip-backup"
39 backupdir = "strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 hex(totalhash[:4]), suffix)
49 hex(totalhash[:4]), suffix)
50
50
51 cgversion = changegroup.localversion(repo)
51 cgversion = changegroup.localversion(repo)
52 comp = None
52 comp = None
53 if cgversion != '01':
53 if cgversion != '01':
54 bundletype = "HG20"
54 bundletype = "HG20"
55 if compress:
55 if compress:
56 comp = 'BZ'
56 comp = 'BZ'
57 elif compress:
57 elif compress:
58 bundletype = "HG10BZ"
58 bundletype = "HG10BZ"
59 else:
59 else:
60 bundletype = "HG10UN"
60 bundletype = "HG10UN"
61
61
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 contentopts = {
63 contentopts = {
64 'cg.version': cgversion,
64 'cg.version': cgversion,
65 'obsolescence': obsolescence,
65 'obsolescence': obsolescence,
66 'phases': True,
66 'phases': True,
67 }
67 }
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 outgoing, contentopts, vfs, compression=comp)
69 outgoing, contentopts, vfs, compression=comp)
70
70
71 def _collectfiles(repo, striprev):
71 def _collectfiles(repo, striprev):
72 """find out the filelogs affected by the strip"""
72 """find out the filelogs affected by the strip"""
73 files = set()
73 files = set()
74
74
75 for x in pycompat.xrange(striprev, len(repo)):
75 for x in pycompat.xrange(striprev, len(repo)):
76 files.update(repo[x].files())
76 files.update(repo[x].files())
77
77
78 return sorted(files)
78 return sorted(files)
79
79
80 def _collectrevlog(revlog, striprev):
80 def _collectrevlog(revlog, striprev):
81 _, brokenset = revlog.getstrippoint(striprev)
81 _, brokenset = revlog.getstrippoint(striprev)
82 return [revlog.linkrev(r) for r in brokenset]
82 return [revlog.linkrev(r) for r in brokenset]
83
83
84 def _collectmanifest(repo, striprev):
84 def _collectmanifest(repo, striprev):
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86
86
87 def _collectbrokencsets(repo, files, striprev):
87 def _collectbrokencsets(repo, files, striprev):
88 """return the changesets which will be broken by the truncation"""
88 """return the changesets which will be broken by the truncation"""
89 s = set()
89 s = set()
90
90
91 s.update(_collectmanifest(repo, striprev))
91 s.update(_collectmanifest(repo, striprev))
92 for fname in files:
92 for fname in files:
93 s.update(_collectrevlog(repo.file(fname), striprev))
93 s.update(_collectrevlog(repo.file(fname), striprev))
94
94
95 return s
95 return s
96
96
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 # This function requires the caller to lock the repo, but it operates
98 # This function requires the caller to lock the repo, but it operates
99 # within a transaction of its own, and thus requires there to be no current
99 # within a transaction of its own, and thus requires there to be no current
100 # transaction when it is called.
100 # transaction when it is called.
101 if repo.currenttransaction() is not None:
101 if repo.currenttransaction() is not None:
102 raise error.ProgrammingError('cannot strip from inside a transaction')
102 raise error.ProgrammingError('cannot strip from inside a transaction')
103
103
104 # Simple way to maintain backwards compatibility for this
104 # Simple way to maintain backwards compatibility for this
105 # argument.
105 # argument.
106 if backup in ['none', 'strip']:
106 if backup in ['none', 'strip']:
107 backup = False
107 backup = False
108
108
109 repo = repo.unfiltered()
109 repo = repo.unfiltered()
110 repo.destroying()
110 repo.destroying()
111 vfs = repo.vfs
112 cl = repo.changelog
111
113
112 cl = repo.changelog
113 # TODO handle undo of merge sets
114 # TODO handle undo of merge sets
114 if isinstance(nodelist, str):
115 if isinstance(nodelist, str):
115 nodelist = [nodelist]
116 nodelist = [nodelist]
116 striplist = [cl.rev(node) for node in nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
117 striprev = min(striplist)
118 striprev = min(striplist)
118
119
119 files = _collectfiles(repo, striprev)
120 files = _collectfiles(repo, striprev)
120 saverevs = _collectbrokencsets(repo, files, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
121
122
122 # Some revisions with rev > striprev may not be descendants of striprev.
123 # Some revisions with rev > striprev may not be descendants of striprev.
123 # We have to find these revisions and put them in a bundle, so that
124 # We have to find these revisions and put them in a bundle, so that
124 # we can restore them after the truncations.
125 # we can restore them after the truncations.
125 # To create the bundle we use repo.changegroupsubset which requires
126 # To create the bundle we use repo.changegroupsubset which requires
126 # the list of heads and bases of the set of interesting revisions.
127 # the list of heads and bases of the set of interesting revisions.
127 # (head = revision in the set that has no descendant in the set;
128 # (head = revision in the set that has no descendant in the set;
128 # base = revision in the set that has no ancestor in the set)
129 # base = revision in the set that has no ancestor in the set)
129 tostrip = set(striplist)
130 tostrip = set(striplist)
130 saveheads = set(saverevs)
131 saveheads = set(saverevs)
131 for r in cl.revs(start=striprev + 1):
132 for r in cl.revs(start=striprev + 1):
132 if any(p in tostrip for p in cl.parentrevs(r)):
133 if any(p in tostrip for p in cl.parentrevs(r)):
133 tostrip.add(r)
134 tostrip.add(r)
134
135
135 if r not in tostrip:
136 if r not in tostrip:
136 saverevs.add(r)
137 saverevs.add(r)
137 saveheads.difference_update(cl.parentrevs(r))
138 saveheads.difference_update(cl.parentrevs(r))
138 saveheads.add(r)
139 saveheads.add(r)
139 saveheads = [cl.node(r) for r in saveheads]
140 saveheads = [cl.node(r) for r in saveheads]
140
141
141 # compute base nodes
142 # compute base nodes
142 if saverevs:
143 if saverevs:
143 descendants = set(cl.descendants(saverevs))
144 descendants = set(cl.descendants(saverevs))
144 saverevs.difference_update(descendants)
145 saverevs.difference_update(descendants)
145 savebases = [cl.node(r) for r in saverevs]
146 savebases = [cl.node(r) for r in saverevs]
146 stripbases = [cl.node(r) for r in tostrip]
147 stripbases = [cl.node(r) for r in tostrip]
147
148
148 stripobsidx = obsmarkers = ()
149 stripobsidx = obsmarkers = ()
149 if repo.ui.configbool('devel', 'strip-obsmarkers'):
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
150 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
151 if obsmarkers:
152 if obsmarkers:
152 stripobsidx = [i for i, m in enumerate(repo.obsstore)
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
153 if m in obsmarkers]
154 if m in obsmarkers]
154
155
155 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
156 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
156 # is much faster
157 # is much faster
157 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
158 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
158 if newbmtarget:
159 if newbmtarget:
159 newbmtarget = repo[newbmtarget.first()].node()
160 newbmtarget = repo[newbmtarget.first()].node()
160 else:
161 else:
161 newbmtarget = '.'
162 newbmtarget = '.'
162
163
163 bm = repo._bookmarks
164 bm = repo._bookmarks
164 updatebm = []
165 updatebm = []
165 for m in bm:
166 for m in bm:
166 rev = repo[bm[m]].rev()
167 rev = repo[bm[m]].rev()
167 if rev in tostrip:
168 if rev in tostrip:
168 updatebm.append(m)
169 updatebm.append(m)
169
170
170 # create a changegroup for all the branches we need to keep
171 # create a changegroup for all the branches we need to keep
171 backupfile = None
172 backupfile = None
172 vfs = repo.vfs
173 node = nodelist[-1]
173 node = nodelist[-1]
174 if backup:
174 if backup:
175 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
175 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
176 repo.ui.status(_("saved backup bundle to %s\n") %
176 repo.ui.status(_("saved backup bundle to %s\n") %
177 vfs.join(backupfile))
177 vfs.join(backupfile))
178 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
178 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
179 vfs.join(backupfile))
179 vfs.join(backupfile))
180 tmpbundlefile = None
180 tmpbundlefile = None
181 if saveheads:
181 if saveheads:
182 # do not compress temporary bundle if we remove it from disk later
182 # do not compress temporary bundle if we remove it from disk later
183 #
183 #
184 # We do not include obsolescence, it might re-introduce prune markers
184 # We do not include obsolescence, it might re-introduce prune markers
185 # we are trying to strip. This is harmless since the stripped markers
185 # we are trying to strip. This is harmless since the stripped markers
186 # are already backed up and we did not touched the markers for the
186 # are already backed up and we did not touched the markers for the
187 # saved changesets.
187 # saved changesets.
188 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
188 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
189 compress=False, obsolescence=False)
189 compress=False, obsolescence=False)
190
190
191 with ui.uninterruptible():
191 with ui.uninterruptible():
192 try:
192 try:
193 with repo.transaction("strip") as tr:
193 with repo.transaction("strip") as tr:
194 # TODO this code violates the interface abstraction of the
194 # TODO this code violates the interface abstraction of the
195 # transaction and makes assumptions that file storage is
195 # transaction and makes assumptions that file storage is
196 # using append-only files. We'll need some kind of storage
196 # using append-only files. We'll need some kind of storage
197 # API to handle stripping for us.
197 # API to handle stripping for us.
198 offset = len(tr._entries)
198 offset = len(tr._entries)
199
199
200 tr.startgroup()
200 tr.startgroup()
201 cl.strip(striprev, tr)
201 cl.strip(striprev, tr)
202 stripmanifest(repo, striprev, tr, files)
202 stripmanifest(repo, striprev, tr, files)
203
203
204 for fn in files:
204 for fn in files:
205 repo.file(fn).strip(striprev, tr)
205 repo.file(fn).strip(striprev, tr)
206 tr.endgroup()
206 tr.endgroup()
207
207
208 for i in pycompat.xrange(offset, len(tr._entries)):
208 for i in pycompat.xrange(offset, len(tr._entries)):
209 file, troffset, ignore = tr._entries[i]
209 file, troffset, ignore = tr._entries[i]
210 with repo.svfs(file, 'a', checkambig=True) as fp:
210 with repo.svfs(file, 'a', checkambig=True) as fp:
211 fp.truncate(troffset)
211 fp.truncate(troffset)
212 if troffset == 0:
212 if troffset == 0:
213 repo.store.markremoved(file)
213 repo.store.markremoved(file)
214
214
215 deleteobsmarkers(repo.obsstore, stripobsidx)
215 deleteobsmarkers(repo.obsstore, stripobsidx)
216 del repo.obsstore
216 del repo.obsstore
217 repo.invalidatevolatilesets()
217 repo.invalidatevolatilesets()
218 repo._phasecache.filterunknown(repo)
218 repo._phasecache.filterunknown(repo)
219
219
220 if tmpbundlefile:
220 if tmpbundlefile:
221 ui.note(_("adding branch\n"))
221 ui.note(_("adding branch\n"))
222 f = vfs.open(tmpbundlefile, "rb")
222 f = vfs.open(tmpbundlefile, "rb")
223 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
223 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
224 if not repo.ui.verbose:
224 if not repo.ui.verbose:
225 # silence internal shuffling chatter
225 # silence internal shuffling chatter
226 repo.ui.pushbuffer()
226 repo.ui.pushbuffer()
227 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
227 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
228 txnname = 'strip'
228 txnname = 'strip'
229 if not isinstance(gen, bundle2.unbundle20):
229 if not isinstance(gen, bundle2.unbundle20):
230 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
230 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
231 with repo.transaction(txnname) as tr:
231 with repo.transaction(txnname) as tr:
232 bundle2.applybundle(repo, gen, tr, source='strip',
232 bundle2.applybundle(repo, gen, tr, source='strip',
233 url=tmpbundleurl)
233 url=tmpbundleurl)
234 if not repo.ui.verbose:
234 if not repo.ui.verbose:
235 repo.ui.popbuffer()
235 repo.ui.popbuffer()
236 f.close()
236 f.close()
237
237
238 with repo.transaction('repair') as tr:
238 with repo.transaction('repair') as tr:
239 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
239 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
240 bm.applychanges(repo, tr, bmchanges)
240 bm.applychanges(repo, tr, bmchanges)
241
241
242 # remove undo files
242 # remove undo files
243 for undovfs, undofile in repo.undofiles():
243 for undovfs, undofile in repo.undofiles():
244 try:
244 try:
245 undovfs.unlink(undofile)
245 undovfs.unlink(undofile)
246 except OSError as e:
246 except OSError as e:
247 if e.errno != errno.ENOENT:
247 if e.errno != errno.ENOENT:
248 ui.warn(_('error removing %s: %s\n') %
248 ui.warn(_('error removing %s: %s\n') %
249 (undovfs.join(undofile),
249 (undovfs.join(undofile),
250 stringutil.forcebytestr(e)))
250 stringutil.forcebytestr(e)))
251
251
252 except: # re-raises
252 except: # re-raises
253 if backupfile:
253 if backupfile:
254 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
254 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
255 % vfs.join(backupfile))
255 % vfs.join(backupfile))
256 if tmpbundlefile:
256 if tmpbundlefile:
257 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
257 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
258 % vfs.join(tmpbundlefile))
258 % vfs.join(tmpbundlefile))
259 ui.warn(_("(fix the problem, then recover the changesets with "
259 ui.warn(_("(fix the problem, then recover the changesets with "
260 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
260 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
261 raise
261 raise
262 else:
262 else:
263 if tmpbundlefile:
263 if tmpbundlefile:
264 # Remove temporary bundle only if there were no exceptions
264 # Remove temporary bundle only if there were no exceptions
265 vfs.unlink(tmpbundlefile)
265 vfs.unlink(tmpbundlefile)
266
266
267 repo.destroyed()
267 repo.destroyed()
268 # return the backup file path (or None if 'backup' was False) so
268 # return the backup file path (or None if 'backup' was False) so
269 # extensions can use it
269 # extensions can use it
270 return backupfile
270 return backupfile
271
271
272 def safestriproots(ui, repo, nodes):
272 def safestriproots(ui, repo, nodes):
273 """return list of roots of nodes where descendants are covered by nodes"""
273 """return list of roots of nodes where descendants are covered by nodes"""
274 torev = repo.unfiltered().changelog.rev
274 torev = repo.unfiltered().changelog.rev
275 revs = set(torev(n) for n in nodes)
275 revs = set(torev(n) for n in nodes)
276 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
276 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
277 # orphaned = affected - wanted
277 # orphaned = affected - wanted
278 # affected = descendants(roots(wanted))
278 # affected = descendants(roots(wanted))
279 # wanted = revs
279 # wanted = revs
280 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
280 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
281 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
281 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
282 notstrip = revs - tostrip
282 notstrip = revs - tostrip
283 if notstrip:
283 if notstrip:
284 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
284 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
285 ui.warn(_('warning: orphaned descendants detected, '
285 ui.warn(_('warning: orphaned descendants detected, '
286 'not stripping %s\n') % nodestr)
286 'not stripping %s\n') % nodestr)
287 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
287 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
288
288
289 class stripcallback(object):
289 class stripcallback(object):
290 """used as a transaction postclose callback"""
290 """used as a transaction postclose callback"""
291
291
292 def __init__(self, ui, repo, backup, topic):
292 def __init__(self, ui, repo, backup, topic):
293 self.ui = ui
293 self.ui = ui
294 self.repo = repo
294 self.repo = repo
295 self.backup = backup
295 self.backup = backup
296 self.topic = topic or 'backup'
296 self.topic = topic or 'backup'
297 self.nodelist = []
297 self.nodelist = []
298
298
299 def addnodes(self, nodes):
299 def addnodes(self, nodes):
300 self.nodelist.extend(nodes)
300 self.nodelist.extend(nodes)
301
301
302 def __call__(self, tr):
302 def __call__(self, tr):
303 roots = safestriproots(self.ui, self.repo, self.nodelist)
303 roots = safestriproots(self.ui, self.repo, self.nodelist)
304 if roots:
304 if roots:
305 strip(self.ui, self.repo, roots, self.backup, self.topic)
305 strip(self.ui, self.repo, roots, self.backup, self.topic)
306
306
307 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
307 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
308 """like strip, but works inside transaction and won't strip irreverent revs
308 """like strip, but works inside transaction and won't strip irreverent revs
309
309
310 nodelist must explicitly contain all descendants. Otherwise a warning will
310 nodelist must explicitly contain all descendants. Otherwise a warning will
311 be printed that some nodes are not stripped.
311 be printed that some nodes are not stripped.
312
312
313 Will do a backup if `backup` is True. The last non-None "topic" will be
313 Will do a backup if `backup` is True. The last non-None "topic" will be
314 used as the backup topic name. The default backup topic name is "backup".
314 used as the backup topic name. The default backup topic name is "backup".
315 """
315 """
316 tr = repo.currenttransaction()
316 tr = repo.currenttransaction()
317 if not tr:
317 if not tr:
318 nodes = safestriproots(ui, repo, nodelist)
318 nodes = safestriproots(ui, repo, nodelist)
319 return strip(ui, repo, nodes, backup=backup, topic=topic)
319 return strip(ui, repo, nodes, backup=backup, topic=topic)
320 # transaction postclose callbacks are called in alphabet order.
320 # transaction postclose callbacks are called in alphabet order.
321 # use '\xff' as prefix so we are likely to be called last.
321 # use '\xff' as prefix so we are likely to be called last.
322 callback = tr.getpostclose('\xffstrip')
322 callback = tr.getpostclose('\xffstrip')
323 if callback is None:
323 if callback is None:
324 callback = stripcallback(ui, repo, backup=backup, topic=topic)
324 callback = stripcallback(ui, repo, backup=backup, topic=topic)
325 tr.addpostclose('\xffstrip', callback)
325 tr.addpostclose('\xffstrip', callback)
326 if topic:
326 if topic:
327 callback.topic = topic
327 callback.topic = topic
328 callback.addnodes(nodelist)
328 callback.addnodes(nodelist)
329
329
330 def stripmanifest(repo, striprev, tr, files):
330 def stripmanifest(repo, striprev, tr, files):
331 revlog = repo.manifestlog.getstorage(b'')
331 revlog = repo.manifestlog.getstorage(b'')
332 revlog.strip(striprev, tr)
332 revlog.strip(striprev, tr)
333 striptrees(repo, tr, striprev, files)
333 striptrees(repo, tr, striprev, files)
334
334
335 def striptrees(repo, tr, striprev, files):
335 def striptrees(repo, tr, striprev, files):
336 if 'treemanifest' in repo.requirements: # safe but unnecessary
336 if 'treemanifest' in repo.requirements: # safe but unnecessary
337 # otherwise
337 # otherwise
338 for unencoded, encoded, size in repo.store.datafiles():
338 for unencoded, encoded, size in repo.store.datafiles():
339 if (unencoded.startswith('meta/') and
339 if (unencoded.startswith('meta/') and
340 unencoded.endswith('00manifest.i')):
340 unencoded.endswith('00manifest.i')):
341 dir = unencoded[5:-12]
341 dir = unencoded[5:-12]
342 repo.manifestlog.getstorage(dir).strip(striprev, tr)
342 repo.manifestlog.getstorage(dir).strip(striprev, tr)
343
343
344 def rebuildfncache(ui, repo):
344 def rebuildfncache(ui, repo):
345 """Rebuilds the fncache file from repo history.
345 """Rebuilds the fncache file from repo history.
346
346
347 Missing entries will be added. Extra entries will be removed.
347 Missing entries will be added. Extra entries will be removed.
348 """
348 """
349 repo = repo.unfiltered()
349 repo = repo.unfiltered()
350
350
351 if 'fncache' not in repo.requirements:
351 if 'fncache' not in repo.requirements:
352 ui.warn(_('(not rebuilding fncache because repository does not '
352 ui.warn(_('(not rebuilding fncache because repository does not '
353 'support fncache)\n'))
353 'support fncache)\n'))
354 return
354 return
355
355
356 with repo.lock():
356 with repo.lock():
357 fnc = repo.store.fncache
357 fnc = repo.store.fncache
358 # Trigger load of fncache.
358 # Trigger load of fncache.
359 if 'irrelevant' in fnc:
359 if 'irrelevant' in fnc:
360 pass
360 pass
361
361
362 oldentries = set(fnc.entries)
362 oldentries = set(fnc.entries)
363 newentries = set()
363 newentries = set()
364 seenfiles = set()
364 seenfiles = set()
365
365
366 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
366 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
367 total=len(repo))
367 total=len(repo))
368 for rev in repo:
368 for rev in repo:
369 progress.update(rev)
369 progress.update(rev)
370
370
371 ctx = repo[rev]
371 ctx = repo[rev]
372 for f in ctx.files():
372 for f in ctx.files():
373 # This is to minimize I/O.
373 # This is to minimize I/O.
374 if f in seenfiles:
374 if f in seenfiles:
375 continue
375 continue
376 seenfiles.add(f)
376 seenfiles.add(f)
377
377
378 i = 'data/%s.i' % f
378 i = 'data/%s.i' % f
379 d = 'data/%s.d' % f
379 d = 'data/%s.d' % f
380
380
381 if repo.store._exists(i):
381 if repo.store._exists(i):
382 newentries.add(i)
382 newentries.add(i)
383 if repo.store._exists(d):
383 if repo.store._exists(d):
384 newentries.add(d)
384 newentries.add(d)
385
385
386 progress.complete()
386 progress.complete()
387
387
388 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
388 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
389 for dir in util.dirs(seenfiles):
389 for dir in util.dirs(seenfiles):
390 i = 'meta/%s/00manifest.i' % dir
390 i = 'meta/%s/00manifest.i' % dir
391 d = 'meta/%s/00manifest.d' % dir
391 d = 'meta/%s/00manifest.d' % dir
392
392
393 if repo.store._exists(i):
393 if repo.store._exists(i):
394 newentries.add(i)
394 newentries.add(i)
395 if repo.store._exists(d):
395 if repo.store._exists(d):
396 newentries.add(d)
396 newentries.add(d)
397
397
398 addcount = len(newentries - oldentries)
398 addcount = len(newentries - oldentries)
399 removecount = len(oldentries - newentries)
399 removecount = len(oldentries - newentries)
400 for p in sorted(oldentries - newentries):
400 for p in sorted(oldentries - newentries):
401 ui.write(_('removing %s\n') % p)
401 ui.write(_('removing %s\n') % p)
402 for p in sorted(newentries - oldentries):
402 for p in sorted(newentries - oldentries):
403 ui.write(_('adding %s\n') % p)
403 ui.write(_('adding %s\n') % p)
404
404
405 if addcount or removecount:
405 if addcount or removecount:
406 ui.write(_('%d items added, %d removed from fncache\n') %
406 ui.write(_('%d items added, %d removed from fncache\n') %
407 (addcount, removecount))
407 (addcount, removecount))
408 fnc.entries = newentries
408 fnc.entries = newentries
409 fnc._dirty = True
409 fnc._dirty = True
410
410
411 with repo.transaction('fncache') as tr:
411 with repo.transaction('fncache') as tr:
412 fnc.write(tr)
412 fnc.write(tr)
413 else:
413 else:
414 ui.write(_('fncache already up to date\n'))
414 ui.write(_('fncache already up to date\n'))
415
415
416 def deleteobsmarkers(obsstore, indices):
416 def deleteobsmarkers(obsstore, indices):
417 """Delete some obsmarkers from obsstore and return how many were deleted
417 """Delete some obsmarkers from obsstore and return how many were deleted
418
418
419 'indices' is a list of ints which are the indices
419 'indices' is a list of ints which are the indices
420 of the markers to be deleted.
420 of the markers to be deleted.
421
421
422 Every invocation of this function completely rewrites the obsstore file,
422 Every invocation of this function completely rewrites the obsstore file,
423 skipping the markers we want to be removed. The new temporary file is
423 skipping the markers we want to be removed. The new temporary file is
424 created, remaining markers are written there and on .close() this file
424 created, remaining markers are written there and on .close() this file
425 gets atomically renamed to obsstore, thus guaranteeing consistency."""
425 gets atomically renamed to obsstore, thus guaranteeing consistency."""
426 if not indices:
426 if not indices:
427 # we don't want to rewrite the obsstore with the same content
427 # we don't want to rewrite the obsstore with the same content
428 return
428 return
429
429
430 left = []
430 left = []
431 current = obsstore._all
431 current = obsstore._all
432 n = 0
432 n = 0
433 for i, m in enumerate(current):
433 for i, m in enumerate(current):
434 if i in indices:
434 if i in indices:
435 n += 1
435 n += 1
436 continue
436 continue
437 left.append(m)
437 left.append(m)
438
438
439 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
439 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
440 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
440 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
441 newobsstorefile.write(bytes)
441 newobsstorefile.write(bytes)
442 newobsstorefile.close()
442 newobsstorefile.close()
443 return n
443 return n
General Comments 0
You need to be logged in to leave comments. Login now