##// END OF EJS Templates
strip: access bookmark before getting a reference to changelog...
marmoute -
r42902:a1f10edc stable
parent child Browse files
Show More
@@ -1,479 +1,482 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 obsolescence=True):
36 obsolescence=True):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = "strip-backup"
39 backupdir = "strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 hex(totalhash[:4]), suffix)
49 hex(totalhash[:4]), suffix)
50
50
51 cgversion = changegroup.localversion(repo)
51 cgversion = changegroup.localversion(repo)
52 comp = None
52 comp = None
53 if cgversion != '01':
53 if cgversion != '01':
54 bundletype = "HG20"
54 bundletype = "HG20"
55 if compress:
55 if compress:
56 comp = 'BZ'
56 comp = 'BZ'
57 elif compress:
57 elif compress:
58 bundletype = "HG10BZ"
58 bundletype = "HG10BZ"
59 else:
59 else:
60 bundletype = "HG10UN"
60 bundletype = "HG10UN"
61
61
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 contentopts = {
63 contentopts = {
64 'cg.version': cgversion,
64 'cg.version': cgversion,
65 'obsolescence': obsolescence,
65 'obsolescence': obsolescence,
66 'phases': True,
66 'phases': True,
67 }
67 }
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 outgoing, contentopts, vfs, compression=comp)
69 outgoing, contentopts, vfs, compression=comp)
70
70
71 def _collectfiles(repo, striprev):
71 def _collectfiles(repo, striprev):
72 """find out the filelogs affected by the strip"""
72 """find out the filelogs affected by the strip"""
73 files = set()
73 files = set()
74
74
75 for x in pycompat.xrange(striprev, len(repo)):
75 for x in pycompat.xrange(striprev, len(repo)):
76 files.update(repo[x].files())
76 files.update(repo[x].files())
77
77
78 return sorted(files)
78 return sorted(files)
79
79
80 def _collectrevlog(revlog, striprev):
80 def _collectrevlog(revlog, striprev):
81 _, brokenset = revlog.getstrippoint(striprev)
81 _, brokenset = revlog.getstrippoint(striprev)
82 return [revlog.linkrev(r) for r in brokenset]
82 return [revlog.linkrev(r) for r in brokenset]
83
83
84 def _collectmanifest(repo, striprev):
84 def _collectmanifest(repo, striprev):
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86
86
87 def _collectbrokencsets(repo, files, striprev):
87 def _collectbrokencsets(repo, files, striprev):
88 """return the changesets which will be broken by the truncation"""
88 """return the changesets which will be broken by the truncation"""
89 s = set()
89 s = set()
90
90
91 s.update(_collectmanifest(repo, striprev))
91 s.update(_collectmanifest(repo, striprev))
92 for fname in files:
92 for fname in files:
93 s.update(_collectrevlog(repo.file(fname), striprev))
93 s.update(_collectrevlog(repo.file(fname), striprev))
94
94
95 return s
95 return s
96
96
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 # This function requires the caller to lock the repo, but it operates
98 # This function requires the caller to lock the repo, but it operates
99 # within a transaction of its own, and thus requires there to be no current
99 # within a transaction of its own, and thus requires there to be no current
100 # transaction when it is called.
100 # transaction when it is called.
101 if repo.currenttransaction() is not None:
101 if repo.currenttransaction() is not None:
102 raise error.ProgrammingError('cannot strip from inside a transaction')
102 raise error.ProgrammingError('cannot strip from inside a transaction')
103
103
104 # Simple way to maintain backwards compatibility for this
104 # Simple way to maintain backwards compatibility for this
105 # argument.
105 # argument.
106 if backup in ['none', 'strip']:
106 if backup in ['none', 'strip']:
107 backup = False
107 backup = False
108
108
109 repo = repo.unfiltered()
109 repo = repo.unfiltered()
110 repo.destroying()
110 repo.destroying()
111 vfs = repo.vfs
111 vfs = repo.vfs
112 # load bookmark before changelog to avoid side effect from outdated
113 # changelog (see repo._refreshchangelog)
114 repo._bookmarks
112 cl = repo.changelog
115 cl = repo.changelog
113
116
114 # TODO handle undo of merge sets
117 # TODO handle undo of merge sets
115 if isinstance(nodelist, str):
118 if isinstance(nodelist, str):
116 nodelist = [nodelist]
119 nodelist = [nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
120 striplist = [cl.rev(node) for node in nodelist]
118 striprev = min(striplist)
121 striprev = min(striplist)
119
122
120 files = _collectfiles(repo, striprev)
123 files = _collectfiles(repo, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
124 saverevs = _collectbrokencsets(repo, files, striprev)
122
125
123 # Some revisions with rev > striprev may not be descendants of striprev.
126 # Some revisions with rev > striprev may not be descendants of striprev.
124 # We have to find these revisions and put them in a bundle, so that
127 # We have to find these revisions and put them in a bundle, so that
125 # we can restore them after the truncations.
128 # we can restore them after the truncations.
126 # To create the bundle we use repo.changegroupsubset which requires
129 # To create the bundle we use repo.changegroupsubset which requires
127 # the list of heads and bases of the set of interesting revisions.
130 # the list of heads and bases of the set of interesting revisions.
128 # (head = revision in the set that has no descendant in the set;
131 # (head = revision in the set that has no descendant in the set;
129 # base = revision in the set that has no ancestor in the set)
132 # base = revision in the set that has no ancestor in the set)
130 tostrip = set(striplist)
133 tostrip = set(striplist)
131 saveheads = set(saverevs)
134 saveheads = set(saverevs)
132 for r in cl.revs(start=striprev + 1):
135 for r in cl.revs(start=striprev + 1):
133 if any(p in tostrip for p in cl.parentrevs(r)):
136 if any(p in tostrip for p in cl.parentrevs(r)):
134 tostrip.add(r)
137 tostrip.add(r)
135
138
136 if r not in tostrip:
139 if r not in tostrip:
137 saverevs.add(r)
140 saverevs.add(r)
138 saveheads.difference_update(cl.parentrevs(r))
141 saveheads.difference_update(cl.parentrevs(r))
139 saveheads.add(r)
142 saveheads.add(r)
140 saveheads = [cl.node(r) for r in saveheads]
143 saveheads = [cl.node(r) for r in saveheads]
141
144
142 # compute base nodes
145 # compute base nodes
143 if saverevs:
146 if saverevs:
144 descendants = set(cl.descendants(saverevs))
147 descendants = set(cl.descendants(saverevs))
145 saverevs.difference_update(descendants)
148 saverevs.difference_update(descendants)
146 savebases = [cl.node(r) for r in saverevs]
149 savebases = [cl.node(r) for r in saverevs]
147 stripbases = [cl.node(r) for r in tostrip]
150 stripbases = [cl.node(r) for r in tostrip]
148
151
149 stripobsidx = obsmarkers = ()
152 stripobsidx = obsmarkers = ()
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
153 if repo.ui.configbool('devel', 'strip-obsmarkers'):
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
154 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
152 if obsmarkers:
155 if obsmarkers:
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
156 stripobsidx = [i for i, m in enumerate(repo.obsstore)
154 if m in obsmarkers]
157 if m in obsmarkers]
155
158
156 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
159 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
157
160
158 backupfile = None
161 backupfile = None
159 node = nodelist[-1]
162 node = nodelist[-1]
160 if backup:
163 if backup:
161 backupfile = _createstripbackup(repo, stripbases, node, topic)
164 backupfile = _createstripbackup(repo, stripbases, node, topic)
162 # create a changegroup for all the branches we need to keep
165 # create a changegroup for all the branches we need to keep
163 tmpbundlefile = None
166 tmpbundlefile = None
164 if saveheads:
167 if saveheads:
165 # do not compress temporary bundle if we remove it from disk later
168 # do not compress temporary bundle if we remove it from disk later
166 #
169 #
167 # We do not include obsolescence, it might re-introduce prune markers
170 # We do not include obsolescence, it might re-introduce prune markers
168 # we are trying to strip. This is harmless since the stripped markers
171 # we are trying to strip. This is harmless since the stripped markers
169 # are already backed up and we did not touched the markers for the
172 # are already backed up and we did not touched the markers for the
170 # saved changesets.
173 # saved changesets.
171 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
174 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
172 compress=False, obsolescence=False)
175 compress=False, obsolescence=False)
173
176
174 with ui.uninterruptible():
177 with ui.uninterruptible():
175 try:
178 try:
176 with repo.transaction("strip") as tr:
179 with repo.transaction("strip") as tr:
177 # TODO this code violates the interface abstraction of the
180 # TODO this code violates the interface abstraction of the
178 # transaction and makes assumptions that file storage is
181 # transaction and makes assumptions that file storage is
179 # using append-only files. We'll need some kind of storage
182 # using append-only files. We'll need some kind of storage
180 # API to handle stripping for us.
183 # API to handle stripping for us.
181 offset = len(tr._entries)
184 offset = len(tr._entries)
182
185
183 tr.startgroup()
186 tr.startgroup()
184 cl.strip(striprev, tr)
187 cl.strip(striprev, tr)
185 stripmanifest(repo, striprev, tr, files)
188 stripmanifest(repo, striprev, tr, files)
186
189
187 for fn in files:
190 for fn in files:
188 repo.file(fn).strip(striprev, tr)
191 repo.file(fn).strip(striprev, tr)
189 tr.endgroup()
192 tr.endgroup()
190
193
191 for i in pycompat.xrange(offset, len(tr._entries)):
194 for i in pycompat.xrange(offset, len(tr._entries)):
192 file, troffset, ignore = tr._entries[i]
195 file, troffset, ignore = tr._entries[i]
193 with repo.svfs(file, 'a', checkambig=True) as fp:
196 with repo.svfs(file, 'a', checkambig=True) as fp:
194 fp.truncate(troffset)
197 fp.truncate(troffset)
195 if troffset == 0:
198 if troffset == 0:
196 repo.store.markremoved(file)
199 repo.store.markremoved(file)
197
200
198 deleteobsmarkers(repo.obsstore, stripobsidx)
201 deleteobsmarkers(repo.obsstore, stripobsidx)
199 del repo.obsstore
202 del repo.obsstore
200 repo.invalidatevolatilesets()
203 repo.invalidatevolatilesets()
201 repo._phasecache.filterunknown(repo)
204 repo._phasecache.filterunknown(repo)
202
205
203 if tmpbundlefile:
206 if tmpbundlefile:
204 ui.note(_("adding branch\n"))
207 ui.note(_("adding branch\n"))
205 f = vfs.open(tmpbundlefile, "rb")
208 f = vfs.open(tmpbundlefile, "rb")
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
209 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 if not repo.ui.verbose:
210 if not repo.ui.verbose:
208 # silence internal shuffling chatter
211 # silence internal shuffling chatter
209 repo.ui.pushbuffer()
212 repo.ui.pushbuffer()
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
213 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 txnname = 'strip'
214 txnname = 'strip'
212 if not isinstance(gen, bundle2.unbundle20):
215 if not isinstance(gen, bundle2.unbundle20):
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
216 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 with repo.transaction(txnname) as tr:
217 with repo.transaction(txnname) as tr:
215 bundle2.applybundle(repo, gen, tr, source='strip',
218 bundle2.applybundle(repo, gen, tr, source='strip',
216 url=tmpbundleurl)
219 url=tmpbundleurl)
217 if not repo.ui.verbose:
220 if not repo.ui.verbose:
218 repo.ui.popbuffer()
221 repo.ui.popbuffer()
219 f.close()
222 f.close()
220
223
221 with repo.transaction('repair') as tr:
224 with repo.transaction('repair') as tr:
222 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
225 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
223 repo._bookmarks.applychanges(repo, tr, bmchanges)
226 repo._bookmarks.applychanges(repo, tr, bmchanges)
224
227
225 # remove undo files
228 # remove undo files
226 for undovfs, undofile in repo.undofiles():
229 for undovfs, undofile in repo.undofiles():
227 try:
230 try:
228 undovfs.unlink(undofile)
231 undovfs.unlink(undofile)
229 except OSError as e:
232 except OSError as e:
230 if e.errno != errno.ENOENT:
233 if e.errno != errno.ENOENT:
231 ui.warn(_('error removing %s: %s\n') %
234 ui.warn(_('error removing %s: %s\n') %
232 (undovfs.join(undofile),
235 (undovfs.join(undofile),
233 stringutil.forcebytestr(e)))
236 stringutil.forcebytestr(e)))
234
237
235 except: # re-raises
238 except: # re-raises
236 if backupfile:
239 if backupfile:
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
240 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 % vfs.join(backupfile))
241 % vfs.join(backupfile))
239 if tmpbundlefile:
242 if tmpbundlefile:
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
243 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 % vfs.join(tmpbundlefile))
244 % vfs.join(tmpbundlefile))
242 ui.warn(_("(fix the problem, then recover the changesets with "
245 ui.warn(_("(fix the problem, then recover the changesets with "
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
246 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 raise
247 raise
245 else:
248 else:
246 if tmpbundlefile:
249 if tmpbundlefile:
247 # Remove temporary bundle only if there were no exceptions
250 # Remove temporary bundle only if there were no exceptions
248 vfs.unlink(tmpbundlefile)
251 vfs.unlink(tmpbundlefile)
249
252
250 repo.destroyed()
253 repo.destroyed()
251 # return the backup file path (or None if 'backup' was False) so
254 # return the backup file path (or None if 'backup' was False) so
252 # extensions can use it
255 # extensions can use it
253 return backupfile
256 return backupfile
254
257
255 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
258 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
256 """perform a "soft" strip using the archived phase"""
259 """perform a "soft" strip using the archived phase"""
257 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
260 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
258 if not tostrip:
261 if not tostrip:
259 return None
262 return None
260
263
261 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
264 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
262 if backup:
265 if backup:
263 node = tostrip[0]
266 node = tostrip[0]
264 backupfile = _createstripbackup(repo, tostrip, node, topic)
267 backupfile = _createstripbackup(repo, tostrip, node, topic)
265
268
266 with repo.transaction('strip') as tr:
269 with repo.transaction('strip') as tr:
267 phases.retractboundary(repo, tr, phases.archived, tostrip)
270 phases.retractboundary(repo, tr, phases.archived, tostrip)
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
271 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
272 repo._bookmarks.applychanges(repo, tr, bmchanges)
270 return backupfile
273 return backupfile
271
274
272
275
273 def _bookmarkmovements(repo, tostrip):
276 def _bookmarkmovements(repo, tostrip):
274 # compute necessary bookmark movement
277 # compute necessary bookmark movement
275 bm = repo._bookmarks
278 bm = repo._bookmarks
276 updatebm = []
279 updatebm = []
277 for m in bm:
280 for m in bm:
278 rev = repo[bm[m]].rev()
281 rev = repo[bm[m]].rev()
279 if rev in tostrip:
282 if rev in tostrip:
280 updatebm.append(m)
283 updatebm.append(m)
281 newbmtarget = None
284 newbmtarget = None
282 # If we need to move bookmarks, compute bookmark
285 # If we need to move bookmarks, compute bookmark
283 # targets. Otherwise we can skip doing this logic.
286 # targets. Otherwise we can skip doing this logic.
284 if updatebm:
287 if updatebm:
285 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
288 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
286 # but is much faster
289 # but is much faster
287 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
290 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
288 if newbmtarget:
291 if newbmtarget:
289 newbmtarget = repo[newbmtarget.first()].node()
292 newbmtarget = repo[newbmtarget.first()].node()
290 else:
293 else:
291 newbmtarget = '.'
294 newbmtarget = '.'
292 return newbmtarget, updatebm
295 return newbmtarget, updatebm
293
296
294 def _createstripbackup(repo, stripbases, node, topic):
297 def _createstripbackup(repo, stripbases, node, topic):
295 # backup the changeset we are about to strip
298 # backup the changeset we are about to strip
296 vfs = repo.vfs
299 vfs = repo.vfs
297 cl = repo.changelog
300 cl = repo.changelog
298 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
301 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
299 repo.ui.status(_("saved backup bundle to %s\n") %
302 repo.ui.status(_("saved backup bundle to %s\n") %
300 vfs.join(backupfile))
303 vfs.join(backupfile))
301 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
304 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
302 vfs.join(backupfile))
305 vfs.join(backupfile))
303 return backupfile
306 return backupfile
304
307
305 def safestriproots(ui, repo, nodes):
308 def safestriproots(ui, repo, nodes):
306 """return list of roots of nodes where descendants are covered by nodes"""
309 """return list of roots of nodes where descendants are covered by nodes"""
307 torev = repo.unfiltered().changelog.rev
310 torev = repo.unfiltered().changelog.rev
308 revs = set(torev(n) for n in nodes)
311 revs = set(torev(n) for n in nodes)
309 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
312 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
310 # orphaned = affected - wanted
313 # orphaned = affected - wanted
311 # affected = descendants(roots(wanted))
314 # affected = descendants(roots(wanted))
312 # wanted = revs
315 # wanted = revs
313 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
316 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
314 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
317 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
315 notstrip = revs - tostrip
318 notstrip = revs - tostrip
316 if notstrip:
319 if notstrip:
317 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
320 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
318 ui.warn(_('warning: orphaned descendants detected, '
321 ui.warn(_('warning: orphaned descendants detected, '
319 'not stripping %s\n') % nodestr)
322 'not stripping %s\n') % nodestr)
320 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
323 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
321
324
322 class stripcallback(object):
325 class stripcallback(object):
323 """used as a transaction postclose callback"""
326 """used as a transaction postclose callback"""
324
327
325 def __init__(self, ui, repo, backup, topic):
328 def __init__(self, ui, repo, backup, topic):
326 self.ui = ui
329 self.ui = ui
327 self.repo = repo
330 self.repo = repo
328 self.backup = backup
331 self.backup = backup
329 self.topic = topic or 'backup'
332 self.topic = topic or 'backup'
330 self.nodelist = []
333 self.nodelist = []
331
334
332 def addnodes(self, nodes):
335 def addnodes(self, nodes):
333 self.nodelist.extend(nodes)
336 self.nodelist.extend(nodes)
334
337
335 def __call__(self, tr):
338 def __call__(self, tr):
336 roots = safestriproots(self.ui, self.repo, self.nodelist)
339 roots = safestriproots(self.ui, self.repo, self.nodelist)
337 if roots:
340 if roots:
338 strip(self.ui, self.repo, roots, self.backup, self.topic)
341 strip(self.ui, self.repo, roots, self.backup, self.topic)
339
342
340 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
343 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
341 """like strip, but works inside transaction and won't strip irreverent revs
344 """like strip, but works inside transaction and won't strip irreverent revs
342
345
343 nodelist must explicitly contain all descendants. Otherwise a warning will
346 nodelist must explicitly contain all descendants. Otherwise a warning will
344 be printed that some nodes are not stripped.
347 be printed that some nodes are not stripped.
345
348
346 Will do a backup if `backup` is True. The last non-None "topic" will be
349 Will do a backup if `backup` is True. The last non-None "topic" will be
347 used as the backup topic name. The default backup topic name is "backup".
350 used as the backup topic name. The default backup topic name is "backup".
348 """
351 """
349 tr = repo.currenttransaction()
352 tr = repo.currenttransaction()
350 if not tr:
353 if not tr:
351 nodes = safestriproots(ui, repo, nodelist)
354 nodes = safestriproots(ui, repo, nodelist)
352 return strip(ui, repo, nodes, backup=backup, topic=topic)
355 return strip(ui, repo, nodes, backup=backup, topic=topic)
353 # transaction postclose callbacks are called in alphabet order.
356 # transaction postclose callbacks are called in alphabet order.
354 # use '\xff' as prefix so we are likely to be called last.
357 # use '\xff' as prefix so we are likely to be called last.
355 callback = tr.getpostclose('\xffstrip')
358 callback = tr.getpostclose('\xffstrip')
356 if callback is None:
359 if callback is None:
357 callback = stripcallback(ui, repo, backup=backup, topic=topic)
360 callback = stripcallback(ui, repo, backup=backup, topic=topic)
358 tr.addpostclose('\xffstrip', callback)
361 tr.addpostclose('\xffstrip', callback)
359 if topic:
362 if topic:
360 callback.topic = topic
363 callback.topic = topic
361 callback.addnodes(nodelist)
364 callback.addnodes(nodelist)
362
365
363 def stripmanifest(repo, striprev, tr, files):
366 def stripmanifest(repo, striprev, tr, files):
364 revlog = repo.manifestlog.getstorage(b'')
367 revlog = repo.manifestlog.getstorage(b'')
365 revlog.strip(striprev, tr)
368 revlog.strip(striprev, tr)
366 striptrees(repo, tr, striprev, files)
369 striptrees(repo, tr, striprev, files)
367
370
368 def striptrees(repo, tr, striprev, files):
371 def striptrees(repo, tr, striprev, files):
369 if 'treemanifest' in repo.requirements:
372 if 'treemanifest' in repo.requirements:
370 # This logic is safe if treemanifest isn't enabled, but also
373 # This logic is safe if treemanifest isn't enabled, but also
371 # pointless, so we skip it if treemanifest isn't enabled.
374 # pointless, so we skip it if treemanifest isn't enabled.
372 for unencoded, encoded, size in repo.store.datafiles():
375 for unencoded, encoded, size in repo.store.datafiles():
373 if (unencoded.startswith('meta/') and
376 if (unencoded.startswith('meta/') and
374 unencoded.endswith('00manifest.i')):
377 unencoded.endswith('00manifest.i')):
375 dir = unencoded[5:-12]
378 dir = unencoded[5:-12]
376 repo.manifestlog.getstorage(dir).strip(striprev, tr)
379 repo.manifestlog.getstorage(dir).strip(striprev, tr)
377
380
378 def rebuildfncache(ui, repo):
381 def rebuildfncache(ui, repo):
379 """Rebuilds the fncache file from repo history.
382 """Rebuilds the fncache file from repo history.
380
383
381 Missing entries will be added. Extra entries will be removed.
384 Missing entries will be added. Extra entries will be removed.
382 """
385 """
383 repo = repo.unfiltered()
386 repo = repo.unfiltered()
384
387
385 if 'fncache' not in repo.requirements:
388 if 'fncache' not in repo.requirements:
386 ui.warn(_('(not rebuilding fncache because repository does not '
389 ui.warn(_('(not rebuilding fncache because repository does not '
387 'support fncache)\n'))
390 'support fncache)\n'))
388 return
391 return
389
392
390 with repo.lock():
393 with repo.lock():
391 fnc = repo.store.fncache
394 fnc = repo.store.fncache
392 # Trigger load of fncache.
395 # Trigger load of fncache.
393 if 'irrelevant' in fnc:
396 if 'irrelevant' in fnc:
394 pass
397 pass
395
398
396 oldentries = set(fnc.entries)
399 oldentries = set(fnc.entries)
397 newentries = set()
400 newentries = set()
398 seenfiles = set()
401 seenfiles = set()
399
402
400 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
403 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
401 total=len(repo))
404 total=len(repo))
402 for rev in repo:
405 for rev in repo:
403 progress.update(rev)
406 progress.update(rev)
404
407
405 ctx = repo[rev]
408 ctx = repo[rev]
406 for f in ctx.files():
409 for f in ctx.files():
407 # This is to minimize I/O.
410 # This is to minimize I/O.
408 if f in seenfiles:
411 if f in seenfiles:
409 continue
412 continue
410 seenfiles.add(f)
413 seenfiles.add(f)
411
414
412 i = 'data/%s.i' % f
415 i = 'data/%s.i' % f
413 d = 'data/%s.d' % f
416 d = 'data/%s.d' % f
414
417
415 if repo.store._exists(i):
418 if repo.store._exists(i):
416 newentries.add(i)
419 newentries.add(i)
417 if repo.store._exists(d):
420 if repo.store._exists(d):
418 newentries.add(d)
421 newentries.add(d)
419
422
420 progress.complete()
423 progress.complete()
421
424
422 if 'treemanifest' in repo.requirements:
425 if 'treemanifest' in repo.requirements:
423 # This logic is safe if treemanifest isn't enabled, but also
426 # This logic is safe if treemanifest isn't enabled, but also
424 # pointless, so we skip it if treemanifest isn't enabled.
427 # pointless, so we skip it if treemanifest isn't enabled.
425 for dir in util.dirs(seenfiles):
428 for dir in util.dirs(seenfiles):
426 i = 'meta/%s/00manifest.i' % dir
429 i = 'meta/%s/00manifest.i' % dir
427 d = 'meta/%s/00manifest.d' % dir
430 d = 'meta/%s/00manifest.d' % dir
428
431
429 if repo.store._exists(i):
432 if repo.store._exists(i):
430 newentries.add(i)
433 newentries.add(i)
431 if repo.store._exists(d):
434 if repo.store._exists(d):
432 newentries.add(d)
435 newentries.add(d)
433
436
434 addcount = len(newentries - oldentries)
437 addcount = len(newentries - oldentries)
435 removecount = len(oldentries - newentries)
438 removecount = len(oldentries - newentries)
436 for p in sorted(oldentries - newentries):
439 for p in sorted(oldentries - newentries):
437 ui.write(_('removing %s\n') % p)
440 ui.write(_('removing %s\n') % p)
438 for p in sorted(newentries - oldentries):
441 for p in sorted(newentries - oldentries):
439 ui.write(_('adding %s\n') % p)
442 ui.write(_('adding %s\n') % p)
440
443
441 if addcount or removecount:
444 if addcount or removecount:
442 ui.write(_('%d items added, %d removed from fncache\n') %
445 ui.write(_('%d items added, %d removed from fncache\n') %
443 (addcount, removecount))
446 (addcount, removecount))
444 fnc.entries = newentries
447 fnc.entries = newentries
445 fnc._dirty = True
448 fnc._dirty = True
446
449
447 with repo.transaction('fncache') as tr:
450 with repo.transaction('fncache') as tr:
448 fnc.write(tr)
451 fnc.write(tr)
449 else:
452 else:
450 ui.write(_('fncache already up to date\n'))
453 ui.write(_('fncache already up to date\n'))
451
454
452 def deleteobsmarkers(obsstore, indices):
455 def deleteobsmarkers(obsstore, indices):
453 """Delete some obsmarkers from obsstore and return how many were deleted
456 """Delete some obsmarkers from obsstore and return how many were deleted
454
457
455 'indices' is a list of ints which are the indices
458 'indices' is a list of ints which are the indices
456 of the markers to be deleted.
459 of the markers to be deleted.
457
460
458 Every invocation of this function completely rewrites the obsstore file,
461 Every invocation of this function completely rewrites the obsstore file,
459 skipping the markers we want to be removed. The new temporary file is
462 skipping the markers we want to be removed. The new temporary file is
460 created, remaining markers are written there and on .close() this file
463 created, remaining markers are written there and on .close() this file
461 gets atomically renamed to obsstore, thus guaranteeing consistency."""
464 gets atomically renamed to obsstore, thus guaranteeing consistency."""
462 if not indices:
465 if not indices:
463 # we don't want to rewrite the obsstore with the same content
466 # we don't want to rewrite the obsstore with the same content
464 return
467 return
465
468
466 left = []
469 left = []
467 current = obsstore._all
470 current = obsstore._all
468 n = 0
471 n = 0
469 for i, m in enumerate(current):
472 for i, m in enumerate(current):
470 if i in indices:
473 if i in indices:
471 n += 1
474 n += 1
472 continue
475 continue
473 left.append(m)
476 left.append(m)
474
477
475 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
478 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
476 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
479 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
477 newobsstorefile.write(bytes)
480 newobsstorefile.write(bytes)
478 newobsstorefile.close()
481 newobsstorefile.close()
479 return n
482 return n
General Comments 0
You need to be logged in to leave comments. Login now