##// END OF EJS Templates
repair: fix an `isinstance(nodelist, str)` check for py3...
Matt Harbison -
r44179:8accf5fa stable
parent child Browse files
Show More
@@ -1,536 +1,536 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 exchange,
24 exchange,
25 obsolete,
25 obsolete,
26 obsutil,
26 obsutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import stringutil
31 from .utils import stringutil
32
32
33
33
34 def backupbundle(
34 def backupbundle(
35 repo, bases, heads, node, suffix, compress=True, obsolescence=True
35 repo, bases, heads, node, suffix, compress=True, obsolescence=True
36 ):
36 ):
37 """create a bundle with the specified revisions as a backup"""
37 """create a bundle with the specified revisions as a backup"""
38
38
39 backupdir = b"strip-backup"
39 backupdir = b"strip-backup"
40 vfs = repo.vfs
40 vfs = repo.vfs
41 if not vfs.isdir(backupdir):
41 if not vfs.isdir(backupdir):
42 vfs.mkdir(backupdir)
42 vfs.mkdir(backupdir)
43
43
44 # Include a hash of all the nodes in the filename for uniqueness
44 # Include a hash of all the nodes in the filename for uniqueness
45 allcommits = repo.set(b'%ln::%ln', bases, heads)
45 allcommits = repo.set(b'%ln::%ln', bases, heads)
46 allhashes = sorted(c.hex() for c in allcommits)
46 allhashes = sorted(c.hex() for c in allcommits)
47 totalhash = hashlib.sha1(b''.join(allhashes)).digest()
47 totalhash = hashlib.sha1(b''.join(allhashes)).digest()
48 name = b"%s/%s-%s-%s.hg" % (
48 name = b"%s/%s-%s-%s.hg" % (
49 backupdir,
49 backupdir,
50 short(node),
50 short(node),
51 hex(totalhash[:4]),
51 hex(totalhash[:4]),
52 suffix,
52 suffix,
53 )
53 )
54
54
55 cgversion = changegroup.localversion(repo)
55 cgversion = changegroup.localversion(repo)
56 comp = None
56 comp = None
57 if cgversion != b'01':
57 if cgversion != b'01':
58 bundletype = b"HG20"
58 bundletype = b"HG20"
59 if compress:
59 if compress:
60 comp = b'BZ'
60 comp = b'BZ'
61 elif compress:
61 elif compress:
62 bundletype = b"HG10BZ"
62 bundletype = b"HG10BZ"
63 else:
63 else:
64 bundletype = b"HG10UN"
64 bundletype = b"HG10UN"
65
65
66 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
66 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
67 contentopts = {
67 contentopts = {
68 b'cg.version': cgversion,
68 b'cg.version': cgversion,
69 b'obsolescence': obsolescence,
69 b'obsolescence': obsolescence,
70 b'phases': True,
70 b'phases': True,
71 }
71 }
72 return bundle2.writenewbundle(
72 return bundle2.writenewbundle(
73 repo.ui,
73 repo.ui,
74 repo,
74 repo,
75 b'strip',
75 b'strip',
76 name,
76 name,
77 bundletype,
77 bundletype,
78 outgoing,
78 outgoing,
79 contentopts,
79 contentopts,
80 vfs,
80 vfs,
81 compression=comp,
81 compression=comp,
82 )
82 )
83
83
84
84
85 def _collectfiles(repo, striprev):
85 def _collectfiles(repo, striprev):
86 """find out the filelogs affected by the strip"""
86 """find out the filelogs affected by the strip"""
87 files = set()
87 files = set()
88
88
89 for x in pycompat.xrange(striprev, len(repo)):
89 for x in pycompat.xrange(striprev, len(repo)):
90 files.update(repo[x].files())
90 files.update(repo[x].files())
91
91
92 return sorted(files)
92 return sorted(files)
93
93
94
94
95 def _collectrevlog(revlog, striprev):
95 def _collectrevlog(revlog, striprev):
96 _, brokenset = revlog.getstrippoint(striprev)
96 _, brokenset = revlog.getstrippoint(striprev)
97 return [revlog.linkrev(r) for r in brokenset]
97 return [revlog.linkrev(r) for r in brokenset]
98
98
99
99
100 def _collectbrokencsets(repo, files, striprev):
100 def _collectbrokencsets(repo, files, striprev):
101 """return the changesets which will be broken by the truncation"""
101 """return the changesets which will be broken by the truncation"""
102 s = set()
102 s = set()
103
103
104 for revlog in manifestrevlogs(repo):
104 for revlog in manifestrevlogs(repo):
105 s.update(_collectrevlog(revlog, striprev))
105 s.update(_collectrevlog(revlog, striprev))
106 for fname in files:
106 for fname in files:
107 s.update(_collectrevlog(repo.file(fname), striprev))
107 s.update(_collectrevlog(repo.file(fname), striprev))
108
108
109 return s
109 return s
110
110
111
111
112 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
112 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
113 # This function requires the caller to lock the repo, but it operates
113 # This function requires the caller to lock the repo, but it operates
114 # within a transaction of its own, and thus requires there to be no current
114 # within a transaction of its own, and thus requires there to be no current
115 # transaction when it is called.
115 # transaction when it is called.
116 if repo.currenttransaction() is not None:
116 if repo.currenttransaction() is not None:
117 raise error.ProgrammingError(b'cannot strip from inside a transaction')
117 raise error.ProgrammingError(b'cannot strip from inside a transaction')
118
118
119 # Simple way to maintain backwards compatibility for this
119 # Simple way to maintain backwards compatibility for this
120 # argument.
120 # argument.
121 if backup in [b'none', b'strip']:
121 if backup in [b'none', b'strip']:
122 backup = False
122 backup = False
123
123
124 repo = repo.unfiltered()
124 repo = repo.unfiltered()
125 repo.destroying()
125 repo.destroying()
126 vfs = repo.vfs
126 vfs = repo.vfs
127 # load bookmark before changelog to avoid side effect from outdated
127 # load bookmark before changelog to avoid side effect from outdated
128 # changelog (see repo._refreshchangelog)
128 # changelog (see repo._refreshchangelog)
129 repo._bookmarks
129 repo._bookmarks
130 cl = repo.changelog
130 cl = repo.changelog
131
131
132 # TODO handle undo of merge sets
132 # TODO handle undo of merge sets
133 if isinstance(nodelist, str):
133 if isinstance(nodelist, bytes):
134 nodelist = [nodelist]
134 nodelist = [nodelist]
135 striplist = [cl.rev(node) for node in nodelist]
135 striplist = [cl.rev(node) for node in nodelist]
136 striprev = min(striplist)
136 striprev = min(striplist)
137
137
138 files = _collectfiles(repo, striprev)
138 files = _collectfiles(repo, striprev)
139 saverevs = _collectbrokencsets(repo, files, striprev)
139 saverevs = _collectbrokencsets(repo, files, striprev)
140
140
141 # Some revisions with rev > striprev may not be descendants of striprev.
141 # Some revisions with rev > striprev may not be descendants of striprev.
142 # We have to find these revisions and put them in a bundle, so that
142 # We have to find these revisions and put them in a bundle, so that
143 # we can restore them after the truncations.
143 # we can restore them after the truncations.
144 # To create the bundle we use repo.changegroupsubset which requires
144 # To create the bundle we use repo.changegroupsubset which requires
145 # the list of heads and bases of the set of interesting revisions.
145 # the list of heads and bases of the set of interesting revisions.
146 # (head = revision in the set that has no descendant in the set;
146 # (head = revision in the set that has no descendant in the set;
147 # base = revision in the set that has no ancestor in the set)
147 # base = revision in the set that has no ancestor in the set)
148 tostrip = set(striplist)
148 tostrip = set(striplist)
149 saveheads = set(saverevs)
149 saveheads = set(saverevs)
150 for r in cl.revs(start=striprev + 1):
150 for r in cl.revs(start=striprev + 1):
151 if any(p in tostrip for p in cl.parentrevs(r)):
151 if any(p in tostrip for p in cl.parentrevs(r)):
152 tostrip.add(r)
152 tostrip.add(r)
153
153
154 if r not in tostrip:
154 if r not in tostrip:
155 saverevs.add(r)
155 saverevs.add(r)
156 saveheads.difference_update(cl.parentrevs(r))
156 saveheads.difference_update(cl.parentrevs(r))
157 saveheads.add(r)
157 saveheads.add(r)
158 saveheads = [cl.node(r) for r in saveheads]
158 saveheads = [cl.node(r) for r in saveheads]
159
159
160 # compute base nodes
160 # compute base nodes
161 if saverevs:
161 if saverevs:
162 descendants = set(cl.descendants(saverevs))
162 descendants = set(cl.descendants(saverevs))
163 saverevs.difference_update(descendants)
163 saverevs.difference_update(descendants)
164 savebases = [cl.node(r) for r in saverevs]
164 savebases = [cl.node(r) for r in saverevs]
165 stripbases = [cl.node(r) for r in tostrip]
165 stripbases = [cl.node(r) for r in tostrip]
166
166
167 stripobsidx = obsmarkers = ()
167 stripobsidx = obsmarkers = ()
168 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
168 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
169 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
169 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
170 if obsmarkers:
170 if obsmarkers:
171 stripobsidx = [
171 stripobsidx = [
172 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
172 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
173 ]
173 ]
174
174
175 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
175 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
176
176
177 backupfile = None
177 backupfile = None
178 node = nodelist[-1]
178 node = nodelist[-1]
179 if backup:
179 if backup:
180 backupfile = _createstripbackup(repo, stripbases, node, topic)
180 backupfile = _createstripbackup(repo, stripbases, node, topic)
181 # create a changegroup for all the branches we need to keep
181 # create a changegroup for all the branches we need to keep
182 tmpbundlefile = None
182 tmpbundlefile = None
183 if saveheads:
183 if saveheads:
184 # do not compress temporary bundle if we remove it from disk later
184 # do not compress temporary bundle if we remove it from disk later
185 #
185 #
186 # We do not include obsolescence, it might re-introduce prune markers
186 # We do not include obsolescence, it might re-introduce prune markers
187 # we are trying to strip. This is harmless since the stripped markers
187 # we are trying to strip. This is harmless since the stripped markers
188 # are already backed up and we did not touched the markers for the
188 # are already backed up and we did not touched the markers for the
189 # saved changesets.
189 # saved changesets.
190 tmpbundlefile = backupbundle(
190 tmpbundlefile = backupbundle(
191 repo,
191 repo,
192 savebases,
192 savebases,
193 saveheads,
193 saveheads,
194 node,
194 node,
195 b'temp',
195 b'temp',
196 compress=False,
196 compress=False,
197 obsolescence=False,
197 obsolescence=False,
198 )
198 )
199
199
200 with ui.uninterruptible():
200 with ui.uninterruptible():
201 try:
201 try:
202 with repo.transaction(b"strip") as tr:
202 with repo.transaction(b"strip") as tr:
203 # TODO this code violates the interface abstraction of the
203 # TODO this code violates the interface abstraction of the
204 # transaction and makes assumptions that file storage is
204 # transaction and makes assumptions that file storage is
205 # using append-only files. We'll need some kind of storage
205 # using append-only files. We'll need some kind of storage
206 # API to handle stripping for us.
206 # API to handle stripping for us.
207 offset = len(tr._entries)
207 offset = len(tr._entries)
208
208
209 tr.startgroup()
209 tr.startgroup()
210 cl.strip(striprev, tr)
210 cl.strip(striprev, tr)
211 stripmanifest(repo, striprev, tr, files)
211 stripmanifest(repo, striprev, tr, files)
212
212
213 for fn in files:
213 for fn in files:
214 repo.file(fn).strip(striprev, tr)
214 repo.file(fn).strip(striprev, tr)
215 tr.endgroup()
215 tr.endgroup()
216
216
217 for i in pycompat.xrange(offset, len(tr._entries)):
217 for i in pycompat.xrange(offset, len(tr._entries)):
218 file, troffset, ignore = tr._entries[i]
218 file, troffset, ignore = tr._entries[i]
219 with repo.svfs(file, b'a', checkambig=True) as fp:
219 with repo.svfs(file, b'a', checkambig=True) as fp:
220 fp.truncate(troffset)
220 fp.truncate(troffset)
221 if troffset == 0:
221 if troffset == 0:
222 repo.store.markremoved(file)
222 repo.store.markremoved(file)
223
223
224 deleteobsmarkers(repo.obsstore, stripobsidx)
224 deleteobsmarkers(repo.obsstore, stripobsidx)
225 del repo.obsstore
225 del repo.obsstore
226 repo.invalidatevolatilesets()
226 repo.invalidatevolatilesets()
227 repo._phasecache.filterunknown(repo)
227 repo._phasecache.filterunknown(repo)
228
228
229 if tmpbundlefile:
229 if tmpbundlefile:
230 ui.note(_(b"adding branch\n"))
230 ui.note(_(b"adding branch\n"))
231 f = vfs.open(tmpbundlefile, b"rb")
231 f = vfs.open(tmpbundlefile, b"rb")
232 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
232 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
233 if not repo.ui.verbose:
233 if not repo.ui.verbose:
234 # silence internal shuffling chatter
234 # silence internal shuffling chatter
235 repo.ui.pushbuffer()
235 repo.ui.pushbuffer()
236 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
236 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
237 txnname = b'strip'
237 txnname = b'strip'
238 if not isinstance(gen, bundle2.unbundle20):
238 if not isinstance(gen, bundle2.unbundle20):
239 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
239 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
240 with repo.transaction(txnname) as tr:
240 with repo.transaction(txnname) as tr:
241 bundle2.applybundle(
241 bundle2.applybundle(
242 repo, gen, tr, source=b'strip', url=tmpbundleurl
242 repo, gen, tr, source=b'strip', url=tmpbundleurl
243 )
243 )
244 if not repo.ui.verbose:
244 if not repo.ui.verbose:
245 repo.ui.popbuffer()
245 repo.ui.popbuffer()
246 f.close()
246 f.close()
247
247
248 with repo.transaction(b'repair') as tr:
248 with repo.transaction(b'repair') as tr:
249 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
249 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
250 repo._bookmarks.applychanges(repo, tr, bmchanges)
250 repo._bookmarks.applychanges(repo, tr, bmchanges)
251
251
252 # remove undo files
252 # remove undo files
253 for undovfs, undofile in repo.undofiles():
253 for undovfs, undofile in repo.undofiles():
254 try:
254 try:
255 undovfs.unlink(undofile)
255 undovfs.unlink(undofile)
256 except OSError as e:
256 except OSError as e:
257 if e.errno != errno.ENOENT:
257 if e.errno != errno.ENOENT:
258 ui.warn(
258 ui.warn(
259 _(b'error removing %s: %s\n')
259 _(b'error removing %s: %s\n')
260 % (
260 % (
261 undovfs.join(undofile),
261 undovfs.join(undofile),
262 stringutil.forcebytestr(e),
262 stringutil.forcebytestr(e),
263 )
263 )
264 )
264 )
265
265
266 except: # re-raises
266 except: # re-raises
267 if backupfile:
267 if backupfile:
268 ui.warn(
268 ui.warn(
269 _(b"strip failed, backup bundle stored in '%s'\n")
269 _(b"strip failed, backup bundle stored in '%s'\n")
270 % vfs.join(backupfile)
270 % vfs.join(backupfile)
271 )
271 )
272 if tmpbundlefile:
272 if tmpbundlefile:
273 ui.warn(
273 ui.warn(
274 _(b"strip failed, unrecovered changes stored in '%s'\n")
274 _(b"strip failed, unrecovered changes stored in '%s'\n")
275 % vfs.join(tmpbundlefile)
275 % vfs.join(tmpbundlefile)
276 )
276 )
277 ui.warn(
277 ui.warn(
278 _(
278 _(
279 b"(fix the problem, then recover the changesets with "
279 b"(fix the problem, then recover the changesets with "
280 b"\"hg unbundle '%s'\")\n"
280 b"\"hg unbundle '%s'\")\n"
281 )
281 )
282 % vfs.join(tmpbundlefile)
282 % vfs.join(tmpbundlefile)
283 )
283 )
284 raise
284 raise
285 else:
285 else:
286 if tmpbundlefile:
286 if tmpbundlefile:
287 # Remove temporary bundle only if there were no exceptions
287 # Remove temporary bundle only if there were no exceptions
288 vfs.unlink(tmpbundlefile)
288 vfs.unlink(tmpbundlefile)
289
289
290 repo.destroyed()
290 repo.destroyed()
291 # return the backup file path (or None if 'backup' was False) so
291 # return the backup file path (or None if 'backup' was False) so
292 # extensions can use it
292 # extensions can use it
293 return backupfile
293 return backupfile
294
294
295
295
296 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
296 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
297 """perform a "soft" strip using the archived phase"""
297 """perform a "soft" strip using the archived phase"""
298 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
298 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
299 if not tostrip:
299 if not tostrip:
300 return None
300 return None
301
301
302 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
302 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
303 if backup:
303 if backup:
304 node = tostrip[0]
304 node = tostrip[0]
305 backupfile = _createstripbackup(repo, tostrip, node, topic)
305 backupfile = _createstripbackup(repo, tostrip, node, topic)
306
306
307 with repo.transaction(b'strip') as tr:
307 with repo.transaction(b'strip') as tr:
308 phases.retractboundary(repo, tr, phases.archived, tostrip)
308 phases.retractboundary(repo, tr, phases.archived, tostrip)
309 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
309 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
310 repo._bookmarks.applychanges(repo, tr, bmchanges)
310 repo._bookmarks.applychanges(repo, tr, bmchanges)
311 return backupfile
311 return backupfile
312
312
313
313
314 def _bookmarkmovements(repo, tostrip):
314 def _bookmarkmovements(repo, tostrip):
315 # compute necessary bookmark movement
315 # compute necessary bookmark movement
316 bm = repo._bookmarks
316 bm = repo._bookmarks
317 updatebm = []
317 updatebm = []
318 for m in bm:
318 for m in bm:
319 rev = repo[bm[m]].rev()
319 rev = repo[bm[m]].rev()
320 if rev in tostrip:
320 if rev in tostrip:
321 updatebm.append(m)
321 updatebm.append(m)
322 newbmtarget = None
322 newbmtarget = None
323 # If we need to move bookmarks, compute bookmark
323 # If we need to move bookmarks, compute bookmark
324 # targets. Otherwise we can skip doing this logic.
324 # targets. Otherwise we can skip doing this logic.
325 if updatebm:
325 if updatebm:
326 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
326 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
327 # but is much faster
327 # but is much faster
328 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
328 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
329 if newbmtarget:
329 if newbmtarget:
330 newbmtarget = repo[newbmtarget.first()].node()
330 newbmtarget = repo[newbmtarget.first()].node()
331 else:
331 else:
332 newbmtarget = b'.'
332 newbmtarget = b'.'
333 return newbmtarget, updatebm
333 return newbmtarget, updatebm
334
334
335
335
336 def _createstripbackup(repo, stripbases, node, topic):
336 def _createstripbackup(repo, stripbases, node, topic):
337 # backup the changeset we are about to strip
337 # backup the changeset we are about to strip
338 vfs = repo.vfs
338 vfs = repo.vfs
339 cl = repo.changelog
339 cl = repo.changelog
340 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
340 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
341 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
341 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
342 repo.ui.log(
342 repo.ui.log(
343 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
343 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
344 )
344 )
345 return backupfile
345 return backupfile
346
346
347
347
348 def safestriproots(ui, repo, nodes):
348 def safestriproots(ui, repo, nodes):
349 """return list of roots of nodes where descendants are covered by nodes"""
349 """return list of roots of nodes where descendants are covered by nodes"""
350 torev = repo.unfiltered().changelog.rev
350 torev = repo.unfiltered().changelog.rev
351 revs = set(torev(n) for n in nodes)
351 revs = set(torev(n) for n in nodes)
352 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
352 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
353 # orphaned = affected - wanted
353 # orphaned = affected - wanted
354 # affected = descendants(roots(wanted))
354 # affected = descendants(roots(wanted))
355 # wanted = revs
355 # wanted = revs
356 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
356 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
357 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
357 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
358 notstrip = revs - tostrip
358 notstrip = revs - tostrip
359 if notstrip:
359 if notstrip:
360 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
360 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
361 ui.warn(
361 ui.warn(
362 _(b'warning: orphaned descendants detected, not stripping %s\n')
362 _(b'warning: orphaned descendants detected, not stripping %s\n')
363 % nodestr
363 % nodestr
364 )
364 )
365 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
365 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
366
366
367
367
368 class stripcallback(object):
368 class stripcallback(object):
369 """used as a transaction postclose callback"""
369 """used as a transaction postclose callback"""
370
370
371 def __init__(self, ui, repo, backup, topic):
371 def __init__(self, ui, repo, backup, topic):
372 self.ui = ui
372 self.ui = ui
373 self.repo = repo
373 self.repo = repo
374 self.backup = backup
374 self.backup = backup
375 self.topic = topic or b'backup'
375 self.topic = topic or b'backup'
376 self.nodelist = []
376 self.nodelist = []
377
377
378 def addnodes(self, nodes):
378 def addnodes(self, nodes):
379 self.nodelist.extend(nodes)
379 self.nodelist.extend(nodes)
380
380
381 def __call__(self, tr):
381 def __call__(self, tr):
382 roots = safestriproots(self.ui, self.repo, self.nodelist)
382 roots = safestriproots(self.ui, self.repo, self.nodelist)
383 if roots:
383 if roots:
384 strip(self.ui, self.repo, roots, self.backup, self.topic)
384 strip(self.ui, self.repo, roots, self.backup, self.topic)
385
385
386
386
387 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
387 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
388 """like strip, but works inside transaction and won't strip irreverent revs
388 """like strip, but works inside transaction and won't strip irreverent revs
389
389
390 nodelist must explicitly contain all descendants. Otherwise a warning will
390 nodelist must explicitly contain all descendants. Otherwise a warning will
391 be printed that some nodes are not stripped.
391 be printed that some nodes are not stripped.
392
392
393 Will do a backup if `backup` is True. The last non-None "topic" will be
393 Will do a backup if `backup` is True. The last non-None "topic" will be
394 used as the backup topic name. The default backup topic name is "backup".
394 used as the backup topic name. The default backup topic name is "backup".
395 """
395 """
396 tr = repo.currenttransaction()
396 tr = repo.currenttransaction()
397 if not tr:
397 if not tr:
398 nodes = safestriproots(ui, repo, nodelist)
398 nodes = safestriproots(ui, repo, nodelist)
399 return strip(ui, repo, nodes, backup=backup, topic=topic)
399 return strip(ui, repo, nodes, backup=backup, topic=topic)
400 # transaction postclose callbacks are called in alphabet order.
400 # transaction postclose callbacks are called in alphabet order.
401 # use '\xff' as prefix so we are likely to be called last.
401 # use '\xff' as prefix so we are likely to be called last.
402 callback = tr.getpostclose(b'\xffstrip')
402 callback = tr.getpostclose(b'\xffstrip')
403 if callback is None:
403 if callback is None:
404 callback = stripcallback(ui, repo, backup=backup, topic=topic)
404 callback = stripcallback(ui, repo, backup=backup, topic=topic)
405 tr.addpostclose(b'\xffstrip', callback)
405 tr.addpostclose(b'\xffstrip', callback)
406 if topic:
406 if topic:
407 callback.topic = topic
407 callback.topic = topic
408 callback.addnodes(nodelist)
408 callback.addnodes(nodelist)
409
409
410
410
411 def stripmanifest(repo, striprev, tr, files):
411 def stripmanifest(repo, striprev, tr, files):
412 for revlog in manifestrevlogs(repo):
412 for revlog in manifestrevlogs(repo):
413 revlog.strip(striprev, tr)
413 revlog.strip(striprev, tr)
414
414
415
415
416 def manifestrevlogs(repo):
416 def manifestrevlogs(repo):
417 yield repo.manifestlog.getstorage(b'')
417 yield repo.manifestlog.getstorage(b'')
418 if b'treemanifest' in repo.requirements:
418 if b'treemanifest' in repo.requirements:
419 # This logic is safe if treemanifest isn't enabled, but also
419 # This logic is safe if treemanifest isn't enabled, but also
420 # pointless, so we skip it if treemanifest isn't enabled.
420 # pointless, so we skip it if treemanifest isn't enabled.
421 for unencoded, encoded, size in repo.store.datafiles():
421 for unencoded, encoded, size in repo.store.datafiles():
422 if unencoded.startswith(b'meta/') and unencoded.endswith(
422 if unencoded.startswith(b'meta/') and unencoded.endswith(
423 b'00manifest.i'
423 b'00manifest.i'
424 ):
424 ):
425 dir = unencoded[5:-12]
425 dir = unencoded[5:-12]
426 yield repo.manifestlog.getstorage(dir)
426 yield repo.manifestlog.getstorage(dir)
427
427
428
428
429 def rebuildfncache(ui, repo):
429 def rebuildfncache(ui, repo):
430 """Rebuilds the fncache file from repo history.
430 """Rebuilds the fncache file from repo history.
431
431
432 Missing entries will be added. Extra entries will be removed.
432 Missing entries will be added. Extra entries will be removed.
433 """
433 """
434 repo = repo.unfiltered()
434 repo = repo.unfiltered()
435
435
436 if b'fncache' not in repo.requirements:
436 if b'fncache' not in repo.requirements:
437 ui.warn(
437 ui.warn(
438 _(
438 _(
439 b'(not rebuilding fncache because repository does not '
439 b'(not rebuilding fncache because repository does not '
440 b'support fncache)\n'
440 b'support fncache)\n'
441 )
441 )
442 )
442 )
443 return
443 return
444
444
445 with repo.lock():
445 with repo.lock():
446 fnc = repo.store.fncache
446 fnc = repo.store.fncache
447 fnc.ensureloaded(warn=ui.warn)
447 fnc.ensureloaded(warn=ui.warn)
448
448
449 oldentries = set(fnc.entries)
449 oldentries = set(fnc.entries)
450 newentries = set()
450 newentries = set()
451 seenfiles = set()
451 seenfiles = set()
452
452
453 progress = ui.makeprogress(
453 progress = ui.makeprogress(
454 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
454 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
455 )
455 )
456 for rev in repo:
456 for rev in repo:
457 progress.update(rev)
457 progress.update(rev)
458
458
459 ctx = repo[rev]
459 ctx = repo[rev]
460 for f in ctx.files():
460 for f in ctx.files():
461 # This is to minimize I/O.
461 # This is to minimize I/O.
462 if f in seenfiles:
462 if f in seenfiles:
463 continue
463 continue
464 seenfiles.add(f)
464 seenfiles.add(f)
465
465
466 i = b'data/%s.i' % f
466 i = b'data/%s.i' % f
467 d = b'data/%s.d' % f
467 d = b'data/%s.d' % f
468
468
469 if repo.store._exists(i):
469 if repo.store._exists(i):
470 newentries.add(i)
470 newentries.add(i)
471 if repo.store._exists(d):
471 if repo.store._exists(d):
472 newentries.add(d)
472 newentries.add(d)
473
473
474 progress.complete()
474 progress.complete()
475
475
476 if b'treemanifest' in repo.requirements:
476 if b'treemanifest' in repo.requirements:
477 # This logic is safe if treemanifest isn't enabled, but also
477 # This logic is safe if treemanifest isn't enabled, but also
478 # pointless, so we skip it if treemanifest isn't enabled.
478 # pointless, so we skip it if treemanifest isn't enabled.
479 for dir in util.dirs(seenfiles):
479 for dir in util.dirs(seenfiles):
480 i = b'meta/%s/00manifest.i' % dir
480 i = b'meta/%s/00manifest.i' % dir
481 d = b'meta/%s/00manifest.d' % dir
481 d = b'meta/%s/00manifest.d' % dir
482
482
483 if repo.store._exists(i):
483 if repo.store._exists(i):
484 newentries.add(i)
484 newentries.add(i)
485 if repo.store._exists(d):
485 if repo.store._exists(d):
486 newentries.add(d)
486 newentries.add(d)
487
487
488 addcount = len(newentries - oldentries)
488 addcount = len(newentries - oldentries)
489 removecount = len(oldentries - newentries)
489 removecount = len(oldentries - newentries)
490 for p in sorted(oldentries - newentries):
490 for p in sorted(oldentries - newentries):
491 ui.write(_(b'removing %s\n') % p)
491 ui.write(_(b'removing %s\n') % p)
492 for p in sorted(newentries - oldentries):
492 for p in sorted(newentries - oldentries):
493 ui.write(_(b'adding %s\n') % p)
493 ui.write(_(b'adding %s\n') % p)
494
494
495 if addcount or removecount:
495 if addcount or removecount:
496 ui.write(
496 ui.write(
497 _(b'%d items added, %d removed from fncache\n')
497 _(b'%d items added, %d removed from fncache\n')
498 % (addcount, removecount)
498 % (addcount, removecount)
499 )
499 )
500 fnc.entries = newentries
500 fnc.entries = newentries
501 fnc._dirty = True
501 fnc._dirty = True
502
502
503 with repo.transaction(b'fncache') as tr:
503 with repo.transaction(b'fncache') as tr:
504 fnc.write(tr)
504 fnc.write(tr)
505 else:
505 else:
506 ui.write(_(b'fncache already up to date\n'))
506 ui.write(_(b'fncache already up to date\n'))
507
507
508
508
509 def deleteobsmarkers(obsstore, indices):
509 def deleteobsmarkers(obsstore, indices):
510 """Delete some obsmarkers from obsstore and return how many were deleted
510 """Delete some obsmarkers from obsstore and return how many were deleted
511
511
512 'indices' is a list of ints which are the indices
512 'indices' is a list of ints which are the indices
513 of the markers to be deleted.
513 of the markers to be deleted.
514
514
515 Every invocation of this function completely rewrites the obsstore file,
515 Every invocation of this function completely rewrites the obsstore file,
516 skipping the markers we want to be removed. The new temporary file is
516 skipping the markers we want to be removed. The new temporary file is
517 created, remaining markers are written there and on .close() this file
517 created, remaining markers are written there and on .close() this file
518 gets atomically renamed to obsstore, thus guaranteeing consistency."""
518 gets atomically renamed to obsstore, thus guaranteeing consistency."""
519 if not indices:
519 if not indices:
520 # we don't want to rewrite the obsstore with the same content
520 # we don't want to rewrite the obsstore with the same content
521 return
521 return
522
522
523 left = []
523 left = []
524 current = obsstore._all
524 current = obsstore._all
525 n = 0
525 n = 0
526 for i, m in enumerate(current):
526 for i, m in enumerate(current):
527 if i in indices:
527 if i in indices:
528 n += 1
528 n += 1
529 continue
529 continue
530 left.append(m)
530 left.append(m)
531
531
532 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
532 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
533 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
533 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
534 newobsstorefile.write(bytes)
534 newobsstorefile.write(bytes)
535 newobsstorefile.close()
535 newobsstorefile.close()
536 return n
536 return n
General Comments 0
You need to be logged in to leave comments. Login now