##// END OF EJS Templates
repair: reword comment about bookmarks logic...
Augie Fackler -
r42432:e10b8058 default
parent child Browse files
Show More
@@ -1,474 +1,476 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 obsolescence=True):
36 obsolescence=True):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = "strip-backup"
39 backupdir = "strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 hex(totalhash[:4]), suffix)
49 hex(totalhash[:4]), suffix)
50
50
51 cgversion = changegroup.localversion(repo)
51 cgversion = changegroup.localversion(repo)
52 comp = None
52 comp = None
53 if cgversion != '01':
53 if cgversion != '01':
54 bundletype = "HG20"
54 bundletype = "HG20"
55 if compress:
55 if compress:
56 comp = 'BZ'
56 comp = 'BZ'
57 elif compress:
57 elif compress:
58 bundletype = "HG10BZ"
58 bundletype = "HG10BZ"
59 else:
59 else:
60 bundletype = "HG10UN"
60 bundletype = "HG10UN"
61
61
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 contentopts = {
63 contentopts = {
64 'cg.version': cgversion,
64 'cg.version': cgversion,
65 'obsolescence': obsolescence,
65 'obsolescence': obsolescence,
66 'phases': True,
66 'phases': True,
67 }
67 }
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 outgoing, contentopts, vfs, compression=comp)
69 outgoing, contentopts, vfs, compression=comp)
70
70
71 def _collectfiles(repo, striprev):
71 def _collectfiles(repo, striprev):
72 """find out the filelogs affected by the strip"""
72 """find out the filelogs affected by the strip"""
73 files = set()
73 files = set()
74
74
75 for x in pycompat.xrange(striprev, len(repo)):
75 for x in pycompat.xrange(striprev, len(repo)):
76 files.update(repo[x].files())
76 files.update(repo[x].files())
77
77
78 return sorted(files)
78 return sorted(files)
79
79
80 def _collectrevlog(revlog, striprev):
80 def _collectrevlog(revlog, striprev):
81 _, brokenset = revlog.getstrippoint(striprev)
81 _, brokenset = revlog.getstrippoint(striprev)
82 return [revlog.linkrev(r) for r in brokenset]
82 return [revlog.linkrev(r) for r in brokenset]
83
83
84 def _collectmanifest(repo, striprev):
84 def _collectmanifest(repo, striprev):
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86
86
87 def _collectbrokencsets(repo, files, striprev):
87 def _collectbrokencsets(repo, files, striprev):
88 """return the changesets which will be broken by the truncation"""
88 """return the changesets which will be broken by the truncation"""
89 s = set()
89 s = set()
90
90
91 s.update(_collectmanifest(repo, striprev))
91 s.update(_collectmanifest(repo, striprev))
92 for fname in files:
92 for fname in files:
93 s.update(_collectrevlog(repo.file(fname), striprev))
93 s.update(_collectrevlog(repo.file(fname), striprev))
94
94
95 return s
95 return s
96
96
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 # This function requires the caller to lock the repo, but it operates
98 # This function requires the caller to lock the repo, but it operates
99 # within a transaction of its own, and thus requires there to be no current
99 # within a transaction of its own, and thus requires there to be no current
100 # transaction when it is called.
100 # transaction when it is called.
101 if repo.currenttransaction() is not None:
101 if repo.currenttransaction() is not None:
102 raise error.ProgrammingError('cannot strip from inside a transaction')
102 raise error.ProgrammingError('cannot strip from inside a transaction')
103
103
104 # Simple way to maintain backwards compatibility for this
104 # Simple way to maintain backwards compatibility for this
105 # argument.
105 # argument.
106 if backup in ['none', 'strip']:
106 if backup in ['none', 'strip']:
107 backup = False
107 backup = False
108
108
109 repo = repo.unfiltered()
109 repo = repo.unfiltered()
110 repo.destroying()
110 repo.destroying()
111 vfs = repo.vfs
111 vfs = repo.vfs
112 cl = repo.changelog
112 cl = repo.changelog
113
113
114 # TODO handle undo of merge sets
114 # TODO handle undo of merge sets
115 if isinstance(nodelist, str):
115 if isinstance(nodelist, str):
116 nodelist = [nodelist]
116 nodelist = [nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
118 striprev = min(striplist)
118 striprev = min(striplist)
119
119
120 files = _collectfiles(repo, striprev)
120 files = _collectfiles(repo, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
122
122
123 # Some revisions with rev > striprev may not be descendants of striprev.
123 # Some revisions with rev > striprev may not be descendants of striprev.
124 # We have to find these revisions and put them in a bundle, so that
124 # We have to find these revisions and put them in a bundle, so that
125 # we can restore them after the truncations.
125 # we can restore them after the truncations.
126 # To create the bundle we use repo.changegroupsubset which requires
126 # To create the bundle we use repo.changegroupsubset which requires
127 # the list of heads and bases of the set of interesting revisions.
127 # the list of heads and bases of the set of interesting revisions.
128 # (head = revision in the set that has no descendant in the set;
128 # (head = revision in the set that has no descendant in the set;
129 # base = revision in the set that has no ancestor in the set)
129 # base = revision in the set that has no ancestor in the set)
130 tostrip = set(striplist)
130 tostrip = set(striplist)
131 saveheads = set(saverevs)
131 saveheads = set(saverevs)
132 for r in cl.revs(start=striprev + 1):
132 for r in cl.revs(start=striprev + 1):
133 if any(p in tostrip for p in cl.parentrevs(r)):
133 if any(p in tostrip for p in cl.parentrevs(r)):
134 tostrip.add(r)
134 tostrip.add(r)
135
135
136 if r not in tostrip:
136 if r not in tostrip:
137 saverevs.add(r)
137 saverevs.add(r)
138 saveheads.difference_update(cl.parentrevs(r))
138 saveheads.difference_update(cl.parentrevs(r))
139 saveheads.add(r)
139 saveheads.add(r)
140 saveheads = [cl.node(r) for r in saveheads]
140 saveheads = [cl.node(r) for r in saveheads]
141
141
142 # compute base nodes
142 # compute base nodes
143 if saverevs:
143 if saverevs:
144 descendants = set(cl.descendants(saverevs))
144 descendants = set(cl.descendants(saverevs))
145 saverevs.difference_update(descendants)
145 saverevs.difference_update(descendants)
146 savebases = [cl.node(r) for r in saverevs]
146 savebases = [cl.node(r) for r in saverevs]
147 stripbases = [cl.node(r) for r in tostrip]
147 stripbases = [cl.node(r) for r in tostrip]
148
148
149 stripobsidx = obsmarkers = ()
149 stripobsidx = obsmarkers = ()
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
152 if obsmarkers:
152 if obsmarkers:
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
154 if m in obsmarkers]
154 if m in obsmarkers]
155
155
156 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
156 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
157
157
158 backupfile = None
158 backupfile = None
159 node = nodelist[-1]
159 node = nodelist[-1]
160 if backup:
160 if backup:
161 backupfile = _createstripbackup(repo, stripbases, node, topic)
161 backupfile = _createstripbackup(repo, stripbases, node, topic)
162 # create a changegroup for all the branches we need to keep
162 # create a changegroup for all the branches we need to keep
163 tmpbundlefile = None
163 tmpbundlefile = None
164 if saveheads:
164 if saveheads:
165 # do not compress temporary bundle if we remove it from disk later
165 # do not compress temporary bundle if we remove it from disk later
166 #
166 #
167 # We do not include obsolescence, it might re-introduce prune markers
167 # We do not include obsolescence, it might re-introduce prune markers
168 # we are trying to strip. This is harmless since the stripped markers
168 # we are trying to strip. This is harmless since the stripped markers
169 # are already backed up and we did not touched the markers for the
169 # are already backed up and we did not touched the markers for the
170 # saved changesets.
170 # saved changesets.
171 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
171 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
172 compress=False, obsolescence=False)
172 compress=False, obsolescence=False)
173
173
174 with ui.uninterruptible():
174 with ui.uninterruptible():
175 try:
175 try:
176 with repo.transaction("strip") as tr:
176 with repo.transaction("strip") as tr:
177 # TODO this code violates the interface abstraction of the
177 # TODO this code violates the interface abstraction of the
178 # transaction and makes assumptions that file storage is
178 # transaction and makes assumptions that file storage is
179 # using append-only files. We'll need some kind of storage
179 # using append-only files. We'll need some kind of storage
180 # API to handle stripping for us.
180 # API to handle stripping for us.
181 offset = len(tr._entries)
181 offset = len(tr._entries)
182
182
183 tr.startgroup()
183 tr.startgroup()
184 cl.strip(striprev, tr)
184 cl.strip(striprev, tr)
185 stripmanifest(repo, striprev, tr, files)
185 stripmanifest(repo, striprev, tr, files)
186
186
187 for fn in files:
187 for fn in files:
188 repo.file(fn).strip(striprev, tr)
188 repo.file(fn).strip(striprev, tr)
189 tr.endgroup()
189 tr.endgroup()
190
190
191 for i in pycompat.xrange(offset, len(tr._entries)):
191 for i in pycompat.xrange(offset, len(tr._entries)):
192 file, troffset, ignore = tr._entries[i]
192 file, troffset, ignore = tr._entries[i]
193 with repo.svfs(file, 'a', checkambig=True) as fp:
193 with repo.svfs(file, 'a', checkambig=True) as fp:
194 fp.truncate(troffset)
194 fp.truncate(troffset)
195 if troffset == 0:
195 if troffset == 0:
196 repo.store.markremoved(file)
196 repo.store.markremoved(file)
197
197
198 deleteobsmarkers(repo.obsstore, stripobsidx)
198 deleteobsmarkers(repo.obsstore, stripobsidx)
199 del repo.obsstore
199 del repo.obsstore
200 repo.invalidatevolatilesets()
200 repo.invalidatevolatilesets()
201 repo._phasecache.filterunknown(repo)
201 repo._phasecache.filterunknown(repo)
202
202
203 if tmpbundlefile:
203 if tmpbundlefile:
204 ui.note(_("adding branch\n"))
204 ui.note(_("adding branch\n"))
205 f = vfs.open(tmpbundlefile, "rb")
205 f = vfs.open(tmpbundlefile, "rb")
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 if not repo.ui.verbose:
207 if not repo.ui.verbose:
208 # silence internal shuffling chatter
208 # silence internal shuffling chatter
209 repo.ui.pushbuffer()
209 repo.ui.pushbuffer()
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 txnname = 'strip'
211 txnname = 'strip'
212 if not isinstance(gen, bundle2.unbundle20):
212 if not isinstance(gen, bundle2.unbundle20):
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 with repo.transaction(txnname) as tr:
214 with repo.transaction(txnname) as tr:
215 bundle2.applybundle(repo, gen, tr, source='strip',
215 bundle2.applybundle(repo, gen, tr, source='strip',
216 url=tmpbundleurl)
216 url=tmpbundleurl)
217 if not repo.ui.verbose:
217 if not repo.ui.verbose:
218 repo.ui.popbuffer()
218 repo.ui.popbuffer()
219 f.close()
219 f.close()
220
220
221 with repo.transaction('repair') as tr:
221 with repo.transaction('repair') as tr:
222 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
222 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
223 repo._bookmarks.applychanges(repo, tr, bmchanges)
223 repo._bookmarks.applychanges(repo, tr, bmchanges)
224
224
225 # remove undo files
225 # remove undo files
226 for undovfs, undofile in repo.undofiles():
226 for undovfs, undofile in repo.undofiles():
227 try:
227 try:
228 undovfs.unlink(undofile)
228 undovfs.unlink(undofile)
229 except OSError as e:
229 except OSError as e:
230 if e.errno != errno.ENOENT:
230 if e.errno != errno.ENOENT:
231 ui.warn(_('error removing %s: %s\n') %
231 ui.warn(_('error removing %s: %s\n') %
232 (undovfs.join(undofile),
232 (undovfs.join(undofile),
233 stringutil.forcebytestr(e)))
233 stringutil.forcebytestr(e)))
234
234
235 except: # re-raises
235 except: # re-raises
236 if backupfile:
236 if backupfile:
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 % vfs.join(backupfile))
238 % vfs.join(backupfile))
239 if tmpbundlefile:
239 if tmpbundlefile:
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 % vfs.join(tmpbundlefile))
241 % vfs.join(tmpbundlefile))
242 ui.warn(_("(fix the problem, then recover the changesets with "
242 ui.warn(_("(fix the problem, then recover the changesets with "
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 raise
244 raise
245 else:
245 else:
246 if tmpbundlefile:
246 if tmpbundlefile:
247 # Remove temporary bundle only if there were no exceptions
247 # Remove temporary bundle only if there were no exceptions
248 vfs.unlink(tmpbundlefile)
248 vfs.unlink(tmpbundlefile)
249
249
250 repo.destroyed()
250 repo.destroyed()
251 # return the backup file path (or None if 'backup' was False) so
251 # return the backup file path (or None if 'backup' was False) so
252 # extensions can use it
252 # extensions can use it
253 return backupfile
253 return backupfile
254
254
255 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
255 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
256 """perform a "soft" strip using the archived phase"""
256 """perform a "soft" strip using the archived phase"""
257 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
257 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
258 if not tostrip:
258 if not tostrip:
259 return None
259 return None
260
260
261 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
261 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
262 if backup:
262 if backup:
263 node = tostrip[0]
263 node = tostrip[0]
264 backupfile = _createstripbackup(repo, tostrip, node, topic)
264 backupfile = _createstripbackup(repo, tostrip, node, topic)
265
265
266 with repo.transaction('strip') as tr:
266 with repo.transaction('strip') as tr:
267 phases.retractboundary(repo, tr, phases.archived, tostrip)
267 phases.retractboundary(repo, tr, phases.archived, tostrip)
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
270 return backupfile
270 return backupfile
271
271
272
272
273 def _bookmarkmovements(repo, tostrip):
273 def _bookmarkmovements(repo, tostrip):
274 # compute necessary bookmark movement
274 # compute necessary bookmark movement
275 bm = repo._bookmarks
275 bm = repo._bookmarks
276 updatebm = []
276 updatebm = []
277 for m in bm:
277 for m in bm:
278 rev = repo[bm[m]].rev()
278 rev = repo[bm[m]].rev()
279 if rev in tostrip:
279 if rev in tostrip:
280 updatebm.append(m)
280 updatebm.append(m)
281 newbmtarget = None
281 newbmtarget = None
282 if updatebm: # don't compute anything is there is no bookmark to move anyway
282 # If we need to move bookmarks, compute bookmark
283 # targets. Otherwise we can skip doing this logic.
284 if updatebm:
283 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
285 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
284 # but is much faster
286 # but is much faster
285 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
287 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
286 if newbmtarget:
288 if newbmtarget:
287 newbmtarget = repo[newbmtarget.first()].node()
289 newbmtarget = repo[newbmtarget.first()].node()
288 else:
290 else:
289 newbmtarget = '.'
291 newbmtarget = '.'
290 return newbmtarget, updatebm
292 return newbmtarget, updatebm
291
293
292 def _createstripbackup(repo, stripbases, node, topic):
294 def _createstripbackup(repo, stripbases, node, topic):
293 # backup the changeset we are about to strip
295 # backup the changeset we are about to strip
294 vfs = repo.vfs
296 vfs = repo.vfs
295 cl = repo.changelog
297 cl = repo.changelog
296 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
298 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
297 repo.ui.status(_("saved backup bundle to %s\n") %
299 repo.ui.status(_("saved backup bundle to %s\n") %
298 vfs.join(backupfile))
300 vfs.join(backupfile))
299 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
301 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
300 vfs.join(backupfile))
302 vfs.join(backupfile))
301 return backupfile
303 return backupfile
302
304
303 def safestriproots(ui, repo, nodes):
305 def safestriproots(ui, repo, nodes):
304 """return list of roots of nodes where descendants are covered by nodes"""
306 """return list of roots of nodes where descendants are covered by nodes"""
305 torev = repo.unfiltered().changelog.rev
307 torev = repo.unfiltered().changelog.rev
306 revs = set(torev(n) for n in nodes)
308 revs = set(torev(n) for n in nodes)
307 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
309 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
308 # orphaned = affected - wanted
310 # orphaned = affected - wanted
309 # affected = descendants(roots(wanted))
311 # affected = descendants(roots(wanted))
310 # wanted = revs
312 # wanted = revs
311 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
313 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
312 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
314 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
313 notstrip = revs - tostrip
315 notstrip = revs - tostrip
314 if notstrip:
316 if notstrip:
315 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
317 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
316 ui.warn(_('warning: orphaned descendants detected, '
318 ui.warn(_('warning: orphaned descendants detected, '
317 'not stripping %s\n') % nodestr)
319 'not stripping %s\n') % nodestr)
318 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
320 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
319
321
320 class stripcallback(object):
322 class stripcallback(object):
321 """used as a transaction postclose callback"""
323 """used as a transaction postclose callback"""
322
324
323 def __init__(self, ui, repo, backup, topic):
325 def __init__(self, ui, repo, backup, topic):
324 self.ui = ui
326 self.ui = ui
325 self.repo = repo
327 self.repo = repo
326 self.backup = backup
328 self.backup = backup
327 self.topic = topic or 'backup'
329 self.topic = topic or 'backup'
328 self.nodelist = []
330 self.nodelist = []
329
331
330 def addnodes(self, nodes):
332 def addnodes(self, nodes):
331 self.nodelist.extend(nodes)
333 self.nodelist.extend(nodes)
332
334
333 def __call__(self, tr):
335 def __call__(self, tr):
334 roots = safestriproots(self.ui, self.repo, self.nodelist)
336 roots = safestriproots(self.ui, self.repo, self.nodelist)
335 if roots:
337 if roots:
336 strip(self.ui, self.repo, roots, self.backup, self.topic)
338 strip(self.ui, self.repo, roots, self.backup, self.topic)
337
339
338 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
340 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
339 """like strip, but works inside transaction and won't strip irreverent revs
341 """like strip, but works inside transaction and won't strip irreverent revs
340
342
341 nodelist must explicitly contain all descendants. Otherwise a warning will
343 nodelist must explicitly contain all descendants. Otherwise a warning will
342 be printed that some nodes are not stripped.
344 be printed that some nodes are not stripped.
343
345
344 Will do a backup if `backup` is True. The last non-None "topic" will be
346 Will do a backup if `backup` is True. The last non-None "topic" will be
345 used as the backup topic name. The default backup topic name is "backup".
347 used as the backup topic name. The default backup topic name is "backup".
346 """
348 """
347 tr = repo.currenttransaction()
349 tr = repo.currenttransaction()
348 if not tr:
350 if not tr:
349 nodes = safestriproots(ui, repo, nodelist)
351 nodes = safestriproots(ui, repo, nodelist)
350 return strip(ui, repo, nodes, backup=backup, topic=topic)
352 return strip(ui, repo, nodes, backup=backup, topic=topic)
351 # transaction postclose callbacks are called in alphabet order.
353 # transaction postclose callbacks are called in alphabet order.
352 # use '\xff' as prefix so we are likely to be called last.
354 # use '\xff' as prefix so we are likely to be called last.
353 callback = tr.getpostclose('\xffstrip')
355 callback = tr.getpostclose('\xffstrip')
354 if callback is None:
356 if callback is None:
355 callback = stripcallback(ui, repo, backup=backup, topic=topic)
357 callback = stripcallback(ui, repo, backup=backup, topic=topic)
356 tr.addpostclose('\xffstrip', callback)
358 tr.addpostclose('\xffstrip', callback)
357 if topic:
359 if topic:
358 callback.topic = topic
360 callback.topic = topic
359 callback.addnodes(nodelist)
361 callback.addnodes(nodelist)
360
362
361 def stripmanifest(repo, striprev, tr, files):
363 def stripmanifest(repo, striprev, tr, files):
362 revlog = repo.manifestlog.getstorage(b'')
364 revlog = repo.manifestlog.getstorage(b'')
363 revlog.strip(striprev, tr)
365 revlog.strip(striprev, tr)
364 striptrees(repo, tr, striprev, files)
366 striptrees(repo, tr, striprev, files)
365
367
366 def striptrees(repo, tr, striprev, files):
368 def striptrees(repo, tr, striprev, files):
367 if 'treemanifest' in repo.requirements: # safe but unnecessary
369 if 'treemanifest' in repo.requirements: # safe but unnecessary
368 # otherwise
370 # otherwise
369 for unencoded, encoded, size in repo.store.datafiles():
371 for unencoded, encoded, size in repo.store.datafiles():
370 if (unencoded.startswith('meta/') and
372 if (unencoded.startswith('meta/') and
371 unencoded.endswith('00manifest.i')):
373 unencoded.endswith('00manifest.i')):
372 dir = unencoded[5:-12]
374 dir = unencoded[5:-12]
373 repo.manifestlog.getstorage(dir).strip(striprev, tr)
375 repo.manifestlog.getstorage(dir).strip(striprev, tr)
374
376
375 def rebuildfncache(ui, repo):
377 def rebuildfncache(ui, repo):
376 """Rebuilds the fncache file from repo history.
378 """Rebuilds the fncache file from repo history.
377
379
378 Missing entries will be added. Extra entries will be removed.
380 Missing entries will be added. Extra entries will be removed.
379 """
381 """
380 repo = repo.unfiltered()
382 repo = repo.unfiltered()
381
383
382 if 'fncache' not in repo.requirements:
384 if 'fncache' not in repo.requirements:
383 ui.warn(_('(not rebuilding fncache because repository does not '
385 ui.warn(_('(not rebuilding fncache because repository does not '
384 'support fncache)\n'))
386 'support fncache)\n'))
385 return
387 return
386
388
387 with repo.lock():
389 with repo.lock():
388 fnc = repo.store.fncache
390 fnc = repo.store.fncache
389 # Trigger load of fncache.
391 # Trigger load of fncache.
390 if 'irrelevant' in fnc:
392 if 'irrelevant' in fnc:
391 pass
393 pass
392
394
393 oldentries = set(fnc.entries)
395 oldentries = set(fnc.entries)
394 newentries = set()
396 newentries = set()
395 seenfiles = set()
397 seenfiles = set()
396
398
397 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
399 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
398 total=len(repo))
400 total=len(repo))
399 for rev in repo:
401 for rev in repo:
400 progress.update(rev)
402 progress.update(rev)
401
403
402 ctx = repo[rev]
404 ctx = repo[rev]
403 for f in ctx.files():
405 for f in ctx.files():
404 # This is to minimize I/O.
406 # This is to minimize I/O.
405 if f in seenfiles:
407 if f in seenfiles:
406 continue
408 continue
407 seenfiles.add(f)
409 seenfiles.add(f)
408
410
409 i = 'data/%s.i' % f
411 i = 'data/%s.i' % f
410 d = 'data/%s.d' % f
412 d = 'data/%s.d' % f
411
413
412 if repo.store._exists(i):
414 if repo.store._exists(i):
413 newentries.add(i)
415 newentries.add(i)
414 if repo.store._exists(d):
416 if repo.store._exists(d):
415 newentries.add(d)
417 newentries.add(d)
416
418
417 progress.complete()
419 progress.complete()
418
420
419 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
421 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
420 for dir in util.dirs(seenfiles):
422 for dir in util.dirs(seenfiles):
421 i = 'meta/%s/00manifest.i' % dir
423 i = 'meta/%s/00manifest.i' % dir
422 d = 'meta/%s/00manifest.d' % dir
424 d = 'meta/%s/00manifest.d' % dir
423
425
424 if repo.store._exists(i):
426 if repo.store._exists(i):
425 newentries.add(i)
427 newentries.add(i)
426 if repo.store._exists(d):
428 if repo.store._exists(d):
427 newentries.add(d)
429 newentries.add(d)
428
430
429 addcount = len(newentries - oldentries)
431 addcount = len(newentries - oldentries)
430 removecount = len(oldentries - newentries)
432 removecount = len(oldentries - newentries)
431 for p in sorted(oldentries - newentries):
433 for p in sorted(oldentries - newentries):
432 ui.write(_('removing %s\n') % p)
434 ui.write(_('removing %s\n') % p)
433 for p in sorted(newentries - oldentries):
435 for p in sorted(newentries - oldentries):
434 ui.write(_('adding %s\n') % p)
436 ui.write(_('adding %s\n') % p)
435
437
436 if addcount or removecount:
438 if addcount or removecount:
437 ui.write(_('%d items added, %d removed from fncache\n') %
439 ui.write(_('%d items added, %d removed from fncache\n') %
438 (addcount, removecount))
440 (addcount, removecount))
439 fnc.entries = newentries
441 fnc.entries = newentries
440 fnc._dirty = True
442 fnc._dirty = True
441
443
442 with repo.transaction('fncache') as tr:
444 with repo.transaction('fncache') as tr:
443 fnc.write(tr)
445 fnc.write(tr)
444 else:
446 else:
445 ui.write(_('fncache already up to date\n'))
447 ui.write(_('fncache already up to date\n'))
446
448
447 def deleteobsmarkers(obsstore, indices):
449 def deleteobsmarkers(obsstore, indices):
448 """Delete some obsmarkers from obsstore and return how many were deleted
450 """Delete some obsmarkers from obsstore and return how many were deleted
449
451
450 'indices' is a list of ints which are the indices
452 'indices' is a list of ints which are the indices
451 of the markers to be deleted.
453 of the markers to be deleted.
452
454
453 Every invocation of this function completely rewrites the obsstore file,
455 Every invocation of this function completely rewrites the obsstore file,
454 skipping the markers we want to be removed. The new temporary file is
456 skipping the markers we want to be removed. The new temporary file is
455 created, remaining markers are written there and on .close() this file
457 created, remaining markers are written there and on .close() this file
456 gets atomically renamed to obsstore, thus guaranteeing consistency."""
458 gets atomically renamed to obsstore, thus guaranteeing consistency."""
457 if not indices:
459 if not indices:
458 # we don't want to rewrite the obsstore with the same content
460 # we don't want to rewrite the obsstore with the same content
459 return
461 return
460
462
461 left = []
463 left = []
462 current = obsstore._all
464 current = obsstore._all
463 n = 0
465 n = 0
464 for i, m in enumerate(current):
466 for i, m in enumerate(current):
465 if i in indices:
467 if i in indices:
466 n += 1
468 n += 1
467 continue
469 continue
468 left.append(m)
470 left.append(m)
469
471
470 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
472 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
471 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
473 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
472 newobsstorefile.write(bytes)
474 newobsstorefile.write(bytes)
473 newobsstorefile.close()
475 newobsstorefile.close()
474 return n
476 return n
General Comments 0
You need to be logged in to leave comments. Login now