##// END OF EJS Templates
repair: reword comments that I noticed while working on source formatting...
Augie Fackler -
r42439:d811f170 default
parent child Browse files
Show More
@@ -1,476 +1,479
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 def backupbundle(repo, bases, heads, node, suffix, compress=True,
36 obsolescence=True):
36 obsolescence=True):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = "strip-backup"
39 backupdir = "strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set('%ln::%ln', bases, heads)
45 allcommits = repo.set('%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(''.join(allhashes)).digest()
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
49 hex(totalhash[:4]), suffix)
49 hex(totalhash[:4]), suffix)
50
50
51 cgversion = changegroup.localversion(repo)
51 cgversion = changegroup.localversion(repo)
52 comp = None
52 comp = None
53 if cgversion != '01':
53 if cgversion != '01':
54 bundletype = "HG20"
54 bundletype = "HG20"
55 if compress:
55 if compress:
56 comp = 'BZ'
56 comp = 'BZ'
57 elif compress:
57 elif compress:
58 bundletype = "HG10BZ"
58 bundletype = "HG10BZ"
59 else:
59 else:
60 bundletype = "HG10UN"
60 bundletype = "HG10UN"
61
61
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
63 contentopts = {
63 contentopts = {
64 'cg.version': cgversion,
64 'cg.version': cgversion,
65 'obsolescence': obsolescence,
65 'obsolescence': obsolescence,
66 'phases': True,
66 'phases': True,
67 }
67 }
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
69 outgoing, contentopts, vfs, compression=comp)
69 outgoing, contentopts, vfs, compression=comp)
70
70
71 def _collectfiles(repo, striprev):
71 def _collectfiles(repo, striprev):
72 """find out the filelogs affected by the strip"""
72 """find out the filelogs affected by the strip"""
73 files = set()
73 files = set()
74
74
75 for x in pycompat.xrange(striprev, len(repo)):
75 for x in pycompat.xrange(striprev, len(repo)):
76 files.update(repo[x].files())
76 files.update(repo[x].files())
77
77
78 return sorted(files)
78 return sorted(files)
79
79
80 def _collectrevlog(revlog, striprev):
80 def _collectrevlog(revlog, striprev):
81 _, brokenset = revlog.getstrippoint(striprev)
81 _, brokenset = revlog.getstrippoint(striprev)
82 return [revlog.linkrev(r) for r in brokenset]
82 return [revlog.linkrev(r) for r in brokenset]
83
83
84 def _collectmanifest(repo, striprev):
84 def _collectmanifest(repo, striprev):
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
85 return _collectrevlog(repo.manifestlog.getstorage(b''), striprev)
86
86
87 def _collectbrokencsets(repo, files, striprev):
87 def _collectbrokencsets(repo, files, striprev):
88 """return the changesets which will be broken by the truncation"""
88 """return the changesets which will be broken by the truncation"""
89 s = set()
89 s = set()
90
90
91 s.update(_collectmanifest(repo, striprev))
91 s.update(_collectmanifest(repo, striprev))
92 for fname in files:
92 for fname in files:
93 s.update(_collectrevlog(repo.file(fname), striprev))
93 s.update(_collectrevlog(repo.file(fname), striprev))
94
94
95 return s
95 return s
96
96
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 def strip(ui, repo, nodelist, backup=True, topic='backup'):
98 # This function requires the caller to lock the repo, but it operates
98 # This function requires the caller to lock the repo, but it operates
99 # within a transaction of its own, and thus requires there to be no current
99 # within a transaction of its own, and thus requires there to be no current
100 # transaction when it is called.
100 # transaction when it is called.
101 if repo.currenttransaction() is not None:
101 if repo.currenttransaction() is not None:
102 raise error.ProgrammingError('cannot strip from inside a transaction')
102 raise error.ProgrammingError('cannot strip from inside a transaction')
103
103
104 # Simple way to maintain backwards compatibility for this
104 # Simple way to maintain backwards compatibility for this
105 # argument.
105 # argument.
106 if backup in ['none', 'strip']:
106 if backup in ['none', 'strip']:
107 backup = False
107 backup = False
108
108
109 repo = repo.unfiltered()
109 repo = repo.unfiltered()
110 repo.destroying()
110 repo.destroying()
111 vfs = repo.vfs
111 vfs = repo.vfs
112 cl = repo.changelog
112 cl = repo.changelog
113
113
114 # TODO handle undo of merge sets
114 # TODO handle undo of merge sets
115 if isinstance(nodelist, str):
115 if isinstance(nodelist, str):
116 nodelist = [nodelist]
116 nodelist = [nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
117 striplist = [cl.rev(node) for node in nodelist]
118 striprev = min(striplist)
118 striprev = min(striplist)
119
119
120 files = _collectfiles(repo, striprev)
120 files = _collectfiles(repo, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
121 saverevs = _collectbrokencsets(repo, files, striprev)
122
122
123 # Some revisions with rev > striprev may not be descendants of striprev.
123 # Some revisions with rev > striprev may not be descendants of striprev.
124 # We have to find these revisions and put them in a bundle, so that
124 # We have to find these revisions and put them in a bundle, so that
125 # we can restore them after the truncations.
125 # we can restore them after the truncations.
126 # To create the bundle we use repo.changegroupsubset which requires
126 # To create the bundle we use repo.changegroupsubset which requires
127 # the list of heads and bases of the set of interesting revisions.
127 # the list of heads and bases of the set of interesting revisions.
128 # (head = revision in the set that has no descendant in the set;
128 # (head = revision in the set that has no descendant in the set;
129 # base = revision in the set that has no ancestor in the set)
129 # base = revision in the set that has no ancestor in the set)
130 tostrip = set(striplist)
130 tostrip = set(striplist)
131 saveheads = set(saverevs)
131 saveheads = set(saverevs)
132 for r in cl.revs(start=striprev + 1):
132 for r in cl.revs(start=striprev + 1):
133 if any(p in tostrip for p in cl.parentrevs(r)):
133 if any(p in tostrip for p in cl.parentrevs(r)):
134 tostrip.add(r)
134 tostrip.add(r)
135
135
136 if r not in tostrip:
136 if r not in tostrip:
137 saverevs.add(r)
137 saverevs.add(r)
138 saveheads.difference_update(cl.parentrevs(r))
138 saveheads.difference_update(cl.parentrevs(r))
139 saveheads.add(r)
139 saveheads.add(r)
140 saveheads = [cl.node(r) for r in saveheads]
140 saveheads = [cl.node(r) for r in saveheads]
141
141
142 # compute base nodes
142 # compute base nodes
143 if saverevs:
143 if saverevs:
144 descendants = set(cl.descendants(saverevs))
144 descendants = set(cl.descendants(saverevs))
145 saverevs.difference_update(descendants)
145 saverevs.difference_update(descendants)
146 savebases = [cl.node(r) for r in saverevs]
146 savebases = [cl.node(r) for r in saverevs]
147 stripbases = [cl.node(r) for r in tostrip]
147 stripbases = [cl.node(r) for r in tostrip]
148
148
149 stripobsidx = obsmarkers = ()
149 stripobsidx = obsmarkers = ()
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
150 if repo.ui.configbool('devel', 'strip-obsmarkers'):
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
151 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
152 if obsmarkers:
152 if obsmarkers:
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
153 stripobsidx = [i for i, m in enumerate(repo.obsstore)
154 if m in obsmarkers]
154 if m in obsmarkers]
155
155
156 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
156 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
157
157
158 backupfile = None
158 backupfile = None
159 node = nodelist[-1]
159 node = nodelist[-1]
160 if backup:
160 if backup:
161 backupfile = _createstripbackup(repo, stripbases, node, topic)
161 backupfile = _createstripbackup(repo, stripbases, node, topic)
162 # create a changegroup for all the branches we need to keep
162 # create a changegroup for all the branches we need to keep
163 tmpbundlefile = None
163 tmpbundlefile = None
164 if saveheads:
164 if saveheads:
165 # do not compress temporary bundle if we remove it from disk later
165 # do not compress temporary bundle if we remove it from disk later
166 #
166 #
167 # We do not include obsolescence, it might re-introduce prune markers
167 # We do not include obsolescence, it might re-introduce prune markers
168 # we are trying to strip. This is harmless since the stripped markers
168 # we are trying to strip. This is harmless since the stripped markers
169 # are already backed up and we did not touched the markers for the
169 # are already backed up and we did not touched the markers for the
170 # saved changesets.
170 # saved changesets.
171 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
171 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
172 compress=False, obsolescence=False)
172 compress=False, obsolescence=False)
173
173
174 with ui.uninterruptible():
174 with ui.uninterruptible():
175 try:
175 try:
176 with repo.transaction("strip") as tr:
176 with repo.transaction("strip") as tr:
177 # TODO this code violates the interface abstraction of the
177 # TODO this code violates the interface abstraction of the
178 # transaction and makes assumptions that file storage is
178 # transaction and makes assumptions that file storage is
179 # using append-only files. We'll need some kind of storage
179 # using append-only files. We'll need some kind of storage
180 # API to handle stripping for us.
180 # API to handle stripping for us.
181 offset = len(tr._entries)
181 offset = len(tr._entries)
182
182
183 tr.startgroup()
183 tr.startgroup()
184 cl.strip(striprev, tr)
184 cl.strip(striprev, tr)
185 stripmanifest(repo, striprev, tr, files)
185 stripmanifest(repo, striprev, tr, files)
186
186
187 for fn in files:
187 for fn in files:
188 repo.file(fn).strip(striprev, tr)
188 repo.file(fn).strip(striprev, tr)
189 tr.endgroup()
189 tr.endgroup()
190
190
191 for i in pycompat.xrange(offset, len(tr._entries)):
191 for i in pycompat.xrange(offset, len(tr._entries)):
192 file, troffset, ignore = tr._entries[i]
192 file, troffset, ignore = tr._entries[i]
193 with repo.svfs(file, 'a', checkambig=True) as fp:
193 with repo.svfs(file, 'a', checkambig=True) as fp:
194 fp.truncate(troffset)
194 fp.truncate(troffset)
195 if troffset == 0:
195 if troffset == 0:
196 repo.store.markremoved(file)
196 repo.store.markremoved(file)
197
197
198 deleteobsmarkers(repo.obsstore, stripobsidx)
198 deleteobsmarkers(repo.obsstore, stripobsidx)
199 del repo.obsstore
199 del repo.obsstore
200 repo.invalidatevolatilesets()
200 repo.invalidatevolatilesets()
201 repo._phasecache.filterunknown(repo)
201 repo._phasecache.filterunknown(repo)
202
202
203 if tmpbundlefile:
203 if tmpbundlefile:
204 ui.note(_("adding branch\n"))
204 ui.note(_("adding branch\n"))
205 f = vfs.open(tmpbundlefile, "rb")
205 f = vfs.open(tmpbundlefile, "rb")
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
206 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
207 if not repo.ui.verbose:
207 if not repo.ui.verbose:
208 # silence internal shuffling chatter
208 # silence internal shuffling chatter
209 repo.ui.pushbuffer()
209 repo.ui.pushbuffer()
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
210 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
211 txnname = 'strip'
211 txnname = 'strip'
212 if not isinstance(gen, bundle2.unbundle20):
212 if not isinstance(gen, bundle2.unbundle20):
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
213 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
214 with repo.transaction(txnname) as tr:
214 with repo.transaction(txnname) as tr:
215 bundle2.applybundle(repo, gen, tr, source='strip',
215 bundle2.applybundle(repo, gen, tr, source='strip',
216 url=tmpbundleurl)
216 url=tmpbundleurl)
217 if not repo.ui.verbose:
217 if not repo.ui.verbose:
218 repo.ui.popbuffer()
218 repo.ui.popbuffer()
219 f.close()
219 f.close()
220
220
221 with repo.transaction('repair') as tr:
221 with repo.transaction('repair') as tr:
222 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
222 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
223 repo._bookmarks.applychanges(repo, tr, bmchanges)
223 repo._bookmarks.applychanges(repo, tr, bmchanges)
224
224
225 # remove undo files
225 # remove undo files
226 for undovfs, undofile in repo.undofiles():
226 for undovfs, undofile in repo.undofiles():
227 try:
227 try:
228 undovfs.unlink(undofile)
228 undovfs.unlink(undofile)
229 except OSError as e:
229 except OSError as e:
230 if e.errno != errno.ENOENT:
230 if e.errno != errno.ENOENT:
231 ui.warn(_('error removing %s: %s\n') %
231 ui.warn(_('error removing %s: %s\n') %
232 (undovfs.join(undofile),
232 (undovfs.join(undofile),
233 stringutil.forcebytestr(e)))
233 stringutil.forcebytestr(e)))
234
234
235 except: # re-raises
235 except: # re-raises
236 if backupfile:
236 if backupfile:
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
237 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
238 % vfs.join(backupfile))
238 % vfs.join(backupfile))
239 if tmpbundlefile:
239 if tmpbundlefile:
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
240 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
241 % vfs.join(tmpbundlefile))
241 % vfs.join(tmpbundlefile))
242 ui.warn(_("(fix the problem, then recover the changesets with "
242 ui.warn(_("(fix the problem, then recover the changesets with "
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
243 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
244 raise
244 raise
245 else:
245 else:
246 if tmpbundlefile:
246 if tmpbundlefile:
247 # Remove temporary bundle only if there were no exceptions
247 # Remove temporary bundle only if there were no exceptions
248 vfs.unlink(tmpbundlefile)
248 vfs.unlink(tmpbundlefile)
249
249
250 repo.destroyed()
250 repo.destroyed()
251 # return the backup file path (or None if 'backup' was False) so
251 # return the backup file path (or None if 'backup' was False) so
252 # extensions can use it
252 # extensions can use it
253 return backupfile
253 return backupfile
254
254
255 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
255 def softstrip(ui, repo, nodelist, backup=True, topic='backup'):
256 """perform a "soft" strip using the archived phase"""
256 """perform a "soft" strip using the archived phase"""
257 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
257 tostrip = [c.node() for c in repo.set('sort(%ln::)', nodelist)]
258 if not tostrip:
258 if not tostrip:
259 return None
259 return None
260
260
261 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
261 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
262 if backup:
262 if backup:
263 node = tostrip[0]
263 node = tostrip[0]
264 backupfile = _createstripbackup(repo, tostrip, node, topic)
264 backupfile = _createstripbackup(repo, tostrip, node, topic)
265
265
266 with repo.transaction('strip') as tr:
266 with repo.transaction('strip') as tr:
267 phases.retractboundary(repo, tr, phases.archived, tostrip)
267 phases.retractboundary(repo, tr, phases.archived, tostrip)
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
270 return backupfile
270 return backupfile
271
271
272
272
273 def _bookmarkmovements(repo, tostrip):
273 def _bookmarkmovements(repo, tostrip):
274 # compute necessary bookmark movement
274 # compute necessary bookmark movement
275 bm = repo._bookmarks
275 bm = repo._bookmarks
276 updatebm = []
276 updatebm = []
277 for m in bm:
277 for m in bm:
278 rev = repo[bm[m]].rev()
278 rev = repo[bm[m]].rev()
279 if rev in tostrip:
279 if rev in tostrip:
280 updatebm.append(m)
280 updatebm.append(m)
281 newbmtarget = None
281 newbmtarget = None
282 # If we need to move bookmarks, compute bookmark
282 # If we need to move bookmarks, compute bookmark
283 # targets. Otherwise we can skip doing this logic.
283 # targets. Otherwise we can skip doing this logic.
284 if updatebm:
284 if updatebm:
285 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
285 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
286 # but is much faster
286 # but is much faster
287 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
287 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
288 if newbmtarget:
288 if newbmtarget:
289 newbmtarget = repo[newbmtarget.first()].node()
289 newbmtarget = repo[newbmtarget.first()].node()
290 else:
290 else:
291 newbmtarget = '.'
291 newbmtarget = '.'
292 return newbmtarget, updatebm
292 return newbmtarget, updatebm
293
293
294 def _createstripbackup(repo, stripbases, node, topic):
294 def _createstripbackup(repo, stripbases, node, topic):
295 # backup the changeset we are about to strip
295 # backup the changeset we are about to strip
296 vfs = repo.vfs
296 vfs = repo.vfs
297 cl = repo.changelog
297 cl = repo.changelog
298 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
298 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
299 repo.ui.status(_("saved backup bundle to %s\n") %
299 repo.ui.status(_("saved backup bundle to %s\n") %
300 vfs.join(backupfile))
300 vfs.join(backupfile))
301 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
301 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
302 vfs.join(backupfile))
302 vfs.join(backupfile))
303 return backupfile
303 return backupfile
304
304
305 def safestriproots(ui, repo, nodes):
305 def safestriproots(ui, repo, nodes):
306 """return list of roots of nodes where descendants are covered by nodes"""
306 """return list of roots of nodes where descendants are covered by nodes"""
307 torev = repo.unfiltered().changelog.rev
307 torev = repo.unfiltered().changelog.rev
308 revs = set(torev(n) for n in nodes)
308 revs = set(torev(n) for n in nodes)
309 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
309 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
310 # orphaned = affected - wanted
310 # orphaned = affected - wanted
311 # affected = descendants(roots(wanted))
311 # affected = descendants(roots(wanted))
312 # wanted = revs
312 # wanted = revs
313 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
313 revset = '%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
314 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
314 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
315 notstrip = revs - tostrip
315 notstrip = revs - tostrip
316 if notstrip:
316 if notstrip:
317 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
317 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
318 ui.warn(_('warning: orphaned descendants detected, '
318 ui.warn(_('warning: orphaned descendants detected, '
319 'not stripping %s\n') % nodestr)
319 'not stripping %s\n') % nodestr)
320 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
320 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
321
321
322 class stripcallback(object):
322 class stripcallback(object):
323 """used as a transaction postclose callback"""
323 """used as a transaction postclose callback"""
324
324
325 def __init__(self, ui, repo, backup, topic):
325 def __init__(self, ui, repo, backup, topic):
326 self.ui = ui
326 self.ui = ui
327 self.repo = repo
327 self.repo = repo
328 self.backup = backup
328 self.backup = backup
329 self.topic = topic or 'backup'
329 self.topic = topic or 'backup'
330 self.nodelist = []
330 self.nodelist = []
331
331
332 def addnodes(self, nodes):
332 def addnodes(self, nodes):
333 self.nodelist.extend(nodes)
333 self.nodelist.extend(nodes)
334
334
335 def __call__(self, tr):
335 def __call__(self, tr):
336 roots = safestriproots(self.ui, self.repo, self.nodelist)
336 roots = safestriproots(self.ui, self.repo, self.nodelist)
337 if roots:
337 if roots:
338 strip(self.ui, self.repo, roots, self.backup, self.topic)
338 strip(self.ui, self.repo, roots, self.backup, self.topic)
339
339
340 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
340 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
341 """like strip, but works inside transaction and won't strip irreverent revs
341 """like strip, but works inside transaction and won't strip irreverent revs
342
342
343 nodelist must explicitly contain all descendants. Otherwise a warning will
343 nodelist must explicitly contain all descendants. Otherwise a warning will
344 be printed that some nodes are not stripped.
344 be printed that some nodes are not stripped.
345
345
346 Will do a backup if `backup` is True. The last non-None "topic" will be
346 Will do a backup if `backup` is True. The last non-None "topic" will be
347 used as the backup topic name. The default backup topic name is "backup".
347 used as the backup topic name. The default backup topic name is "backup".
348 """
348 """
349 tr = repo.currenttransaction()
349 tr = repo.currenttransaction()
350 if not tr:
350 if not tr:
351 nodes = safestriproots(ui, repo, nodelist)
351 nodes = safestriproots(ui, repo, nodelist)
352 return strip(ui, repo, nodes, backup=backup, topic=topic)
352 return strip(ui, repo, nodes, backup=backup, topic=topic)
353 # transaction postclose callbacks are called in alphabet order.
353 # transaction postclose callbacks are called in alphabet order.
354 # use '\xff' as prefix so we are likely to be called last.
354 # use '\xff' as prefix so we are likely to be called last.
355 callback = tr.getpostclose('\xffstrip')
355 callback = tr.getpostclose('\xffstrip')
356 if callback is None:
356 if callback is None:
357 callback = stripcallback(ui, repo, backup=backup, topic=topic)
357 callback = stripcallback(ui, repo, backup=backup, topic=topic)
358 tr.addpostclose('\xffstrip', callback)
358 tr.addpostclose('\xffstrip', callback)
359 if topic:
359 if topic:
360 callback.topic = topic
360 callback.topic = topic
361 callback.addnodes(nodelist)
361 callback.addnodes(nodelist)
362
362
363 def stripmanifest(repo, striprev, tr, files):
363 def stripmanifest(repo, striprev, tr, files):
364 revlog = repo.manifestlog.getstorage(b'')
364 revlog = repo.manifestlog.getstorage(b'')
365 revlog.strip(striprev, tr)
365 revlog.strip(striprev, tr)
366 striptrees(repo, tr, striprev, files)
366 striptrees(repo, tr, striprev, files)
367
367
368 def striptrees(repo, tr, striprev, files):
368 def striptrees(repo, tr, striprev, files):
369 if 'treemanifest' in repo.requirements: # safe but unnecessary
369 if 'treemanifest' in repo.requirements:
370 # otherwise
370 # This logic is safe if treemanifest isn't enabled, but also
371 # pointless, so we skip it if treemanifest isn't enabled.
371 for unencoded, encoded, size in repo.store.datafiles():
372 for unencoded, encoded, size in repo.store.datafiles():
372 if (unencoded.startswith('meta/') and
373 if (unencoded.startswith('meta/') and
373 unencoded.endswith('00manifest.i')):
374 unencoded.endswith('00manifest.i')):
374 dir = unencoded[5:-12]
375 dir = unencoded[5:-12]
375 repo.manifestlog.getstorage(dir).strip(striprev, tr)
376 repo.manifestlog.getstorage(dir).strip(striprev, tr)
376
377
377 def rebuildfncache(ui, repo):
378 def rebuildfncache(ui, repo):
378 """Rebuilds the fncache file from repo history.
379 """Rebuilds the fncache file from repo history.
379
380
380 Missing entries will be added. Extra entries will be removed.
381 Missing entries will be added. Extra entries will be removed.
381 """
382 """
382 repo = repo.unfiltered()
383 repo = repo.unfiltered()
383
384
384 if 'fncache' not in repo.requirements:
385 if 'fncache' not in repo.requirements:
385 ui.warn(_('(not rebuilding fncache because repository does not '
386 ui.warn(_('(not rebuilding fncache because repository does not '
386 'support fncache)\n'))
387 'support fncache)\n'))
387 return
388 return
388
389
389 with repo.lock():
390 with repo.lock():
390 fnc = repo.store.fncache
391 fnc = repo.store.fncache
391 # Trigger load of fncache.
392 # Trigger load of fncache.
392 if 'irrelevant' in fnc:
393 if 'irrelevant' in fnc:
393 pass
394 pass
394
395
395 oldentries = set(fnc.entries)
396 oldentries = set(fnc.entries)
396 newentries = set()
397 newentries = set()
397 seenfiles = set()
398 seenfiles = set()
398
399
399 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
400 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
400 total=len(repo))
401 total=len(repo))
401 for rev in repo:
402 for rev in repo:
402 progress.update(rev)
403 progress.update(rev)
403
404
404 ctx = repo[rev]
405 ctx = repo[rev]
405 for f in ctx.files():
406 for f in ctx.files():
406 # This is to minimize I/O.
407 # This is to minimize I/O.
407 if f in seenfiles:
408 if f in seenfiles:
408 continue
409 continue
409 seenfiles.add(f)
410 seenfiles.add(f)
410
411
411 i = 'data/%s.i' % f
412 i = 'data/%s.i' % f
412 d = 'data/%s.d' % f
413 d = 'data/%s.d' % f
413
414
414 if repo.store._exists(i):
415 if repo.store._exists(i):
415 newentries.add(i)
416 newentries.add(i)
416 if repo.store._exists(d):
417 if repo.store._exists(d):
417 newentries.add(d)
418 newentries.add(d)
418
419
419 progress.complete()
420 progress.complete()
420
421
421 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
422 if 'treemanifest' in repo.requirements:
423 # This logic is safe if treemanifest isn't enabled, but also
424 # pointless, so we skip it if treemanifest isn't enabled.
422 for dir in util.dirs(seenfiles):
425 for dir in util.dirs(seenfiles):
423 i = 'meta/%s/00manifest.i' % dir
426 i = 'meta/%s/00manifest.i' % dir
424 d = 'meta/%s/00manifest.d' % dir
427 d = 'meta/%s/00manifest.d' % dir
425
428
426 if repo.store._exists(i):
429 if repo.store._exists(i):
427 newentries.add(i)
430 newentries.add(i)
428 if repo.store._exists(d):
431 if repo.store._exists(d):
429 newentries.add(d)
432 newentries.add(d)
430
433
431 addcount = len(newentries - oldentries)
434 addcount = len(newentries - oldentries)
432 removecount = len(oldentries - newentries)
435 removecount = len(oldentries - newentries)
433 for p in sorted(oldentries - newentries):
436 for p in sorted(oldentries - newentries):
434 ui.write(_('removing %s\n') % p)
437 ui.write(_('removing %s\n') % p)
435 for p in sorted(newentries - oldentries):
438 for p in sorted(newentries - oldentries):
436 ui.write(_('adding %s\n') % p)
439 ui.write(_('adding %s\n') % p)
437
440
438 if addcount or removecount:
441 if addcount or removecount:
439 ui.write(_('%d items added, %d removed from fncache\n') %
442 ui.write(_('%d items added, %d removed from fncache\n') %
440 (addcount, removecount))
443 (addcount, removecount))
441 fnc.entries = newentries
444 fnc.entries = newentries
442 fnc._dirty = True
445 fnc._dirty = True
443
446
444 with repo.transaction('fncache') as tr:
447 with repo.transaction('fncache') as tr:
445 fnc.write(tr)
448 fnc.write(tr)
446 else:
449 else:
447 ui.write(_('fncache already up to date\n'))
450 ui.write(_('fncache already up to date\n'))
448
451
449 def deleteobsmarkers(obsstore, indices):
452 def deleteobsmarkers(obsstore, indices):
450 """Delete some obsmarkers from obsstore and return how many were deleted
453 """Delete some obsmarkers from obsstore and return how many were deleted
451
454
452 'indices' is a list of ints which are the indices
455 'indices' is a list of ints which are the indices
453 of the markers to be deleted.
456 of the markers to be deleted.
454
457
455 Every invocation of this function completely rewrites the obsstore file,
458 Every invocation of this function completely rewrites the obsstore file,
456 skipping the markers we want to be removed. The new temporary file is
459 skipping the markers we want to be removed. The new temporary file is
457 created, remaining markers are written there and on .close() this file
460 created, remaining markers are written there and on .close() this file
458 gets atomically renamed to obsstore, thus guaranteeing consistency."""
461 gets atomically renamed to obsstore, thus guaranteeing consistency."""
459 if not indices:
462 if not indices:
460 # we don't want to rewrite the obsstore with the same content
463 # we don't want to rewrite the obsstore with the same content
461 return
464 return
462
465
463 left = []
466 left = []
464 current = obsstore._all
467 current = obsstore._all
465 n = 0
468 n = 0
466 for i, m in enumerate(current):
469 for i, m in enumerate(current):
467 if i in indices:
470 if i in indices:
468 n += 1
471 n += 1
469 continue
472 continue
470 left.append(m)
473 left.append(m)
471
474
472 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
475 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
473 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
476 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
474 newobsstorefile.write(bytes)
477 newobsstorefile.write(bytes)
475 newobsstorefile.close()
478 newobsstorefile.close()
476 return n
479 return n
General Comments 0
You need to be logged in to leave comments. Login now