##// END OF EJS Templates
softstrip: fix a reference to an undefined variable...
Martin von Zweigbergk -
r47265:cd915434 default
parent child Browse files
Show More
@@ -1,545 +1,546 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 discovery,
21 discovery,
22 error,
22 error,
23 exchange,
23 exchange,
24 obsolete,
24 obsolete,
25 obsutil,
25 obsutil,
26 pathutil,
26 pathutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33 from .utils import (
33 from .utils import (
34 hashutil,
34 hashutil,
35 stringutil,
35 stringutil,
36 )
36 )
37
37
38
38
39 def backupbundle(
39 def backupbundle(
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
41 ):
41 ):
42 """create a bundle with the specified revisions as a backup"""
42 """create a bundle with the specified revisions as a backup"""
43
43
44 backupdir = b"strip-backup"
44 backupdir = b"strip-backup"
45 vfs = repo.vfs
45 vfs = repo.vfs
46 if not vfs.isdir(backupdir):
46 if not vfs.isdir(backupdir):
47 vfs.mkdir(backupdir)
47 vfs.mkdir(backupdir)
48
48
49 # Include a hash of all the nodes in the filename for uniqueness
49 # Include a hash of all the nodes in the filename for uniqueness
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
51 allhashes = sorted(c.hex() for c in allcommits)
51 allhashes = sorted(c.hex() for c in allcommits)
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
53 name = b"%s/%s-%s-%s.hg" % (
53 name = b"%s/%s-%s-%s.hg" % (
54 backupdir,
54 backupdir,
55 short(node),
55 short(node),
56 hex(totalhash[:4]),
56 hex(totalhash[:4]),
57 suffix,
57 suffix,
58 )
58 )
59
59
60 cgversion = changegroup.localversion(repo)
60 cgversion = changegroup.localversion(repo)
61 comp = None
61 comp = None
62 if cgversion != b'01':
62 if cgversion != b'01':
63 bundletype = b"HG20"
63 bundletype = b"HG20"
64 if compress:
64 if compress:
65 comp = b'BZ'
65 comp = b'BZ'
66 elif compress:
66 elif compress:
67 bundletype = b"HG10BZ"
67 bundletype = b"HG10BZ"
68 else:
68 else:
69 bundletype = b"HG10UN"
69 bundletype = b"HG10UN"
70
70
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
72 contentopts = {
72 contentopts = {
73 b'cg.version': cgversion,
73 b'cg.version': cgversion,
74 b'obsolescence': obsolescence,
74 b'obsolescence': obsolescence,
75 b'phases': True,
75 b'phases': True,
76 }
76 }
77 return bundle2.writenewbundle(
77 return bundle2.writenewbundle(
78 repo.ui,
78 repo.ui,
79 repo,
79 repo,
80 b'strip',
80 b'strip',
81 name,
81 name,
82 bundletype,
82 bundletype,
83 outgoing,
83 outgoing,
84 contentopts,
84 contentopts,
85 vfs,
85 vfs,
86 compression=comp,
86 compression=comp,
87 )
87 )
88
88
89
89
90 def _collectfiles(repo, striprev):
90 def _collectfiles(repo, striprev):
91 """find out the filelogs affected by the strip"""
91 """find out the filelogs affected by the strip"""
92 files = set()
92 files = set()
93
93
94 for x in pycompat.xrange(striprev, len(repo)):
94 for x in pycompat.xrange(striprev, len(repo)):
95 files.update(repo[x].files())
95 files.update(repo[x].files())
96
96
97 return sorted(files)
97 return sorted(files)
98
98
99
99
100 def _collectrevlog(revlog, striprev):
100 def _collectrevlog(revlog, striprev):
101 _, brokenset = revlog.getstrippoint(striprev)
101 _, brokenset = revlog.getstrippoint(striprev)
102 return [revlog.linkrev(r) for r in brokenset]
102 return [revlog.linkrev(r) for r in brokenset]
103
103
104
104
105 def _collectbrokencsets(repo, files, striprev):
105 def _collectbrokencsets(repo, files, striprev):
106 """return the changesets which will be broken by the truncation"""
106 """return the changesets which will be broken by the truncation"""
107 s = set()
107 s = set()
108
108
109 for revlog in manifestrevlogs(repo):
109 for revlog in manifestrevlogs(repo):
110 s.update(_collectrevlog(revlog, striprev))
110 s.update(_collectrevlog(revlog, striprev))
111 for fname in files:
111 for fname in files:
112 s.update(_collectrevlog(repo.file(fname), striprev))
112 s.update(_collectrevlog(repo.file(fname), striprev))
113
113
114 return s
114 return s
115
115
116
116
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
118 # This function requires the caller to lock the repo, but it operates
118 # This function requires the caller to lock the repo, but it operates
119 # within a transaction of its own, and thus requires there to be no current
119 # within a transaction of its own, and thus requires there to be no current
120 # transaction when it is called.
120 # transaction when it is called.
121 if repo.currenttransaction() is not None:
121 if repo.currenttransaction() is not None:
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
123
123
124 # Simple way to maintain backwards compatibility for this
124 # Simple way to maintain backwards compatibility for this
125 # argument.
125 # argument.
126 if backup in [b'none', b'strip']:
126 if backup in [b'none', b'strip']:
127 backup = False
127 backup = False
128
128
129 repo = repo.unfiltered()
129 repo = repo.unfiltered()
130 repo.destroying()
130 repo.destroying()
131 vfs = repo.vfs
131 vfs = repo.vfs
132 # load bookmark before changelog to avoid side effect from outdated
132 # load bookmark before changelog to avoid side effect from outdated
133 # changelog (see repo._refreshchangelog)
133 # changelog (see repo._refreshchangelog)
134 repo._bookmarks
134 repo._bookmarks
135 cl = repo.changelog
135 cl = repo.changelog
136
136
137 # TODO handle undo of merge sets
137 # TODO handle undo of merge sets
138 if isinstance(nodelist, bytes):
138 if isinstance(nodelist, bytes):
139 nodelist = [nodelist]
139 nodelist = [nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
141 striprev = min(striplist)
141 striprev = min(striplist)
142
142
143 files = _collectfiles(repo, striprev)
143 files = _collectfiles(repo, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
145
145
146 # Some revisions with rev > striprev may not be descendants of striprev.
146 # Some revisions with rev > striprev may not be descendants of striprev.
147 # We have to find these revisions and put them in a bundle, so that
147 # We have to find these revisions and put them in a bundle, so that
148 # we can restore them after the truncations.
148 # we can restore them after the truncations.
149 # To create the bundle we use repo.changegroupsubset which requires
149 # To create the bundle we use repo.changegroupsubset which requires
150 # the list of heads and bases of the set of interesting revisions.
150 # the list of heads and bases of the set of interesting revisions.
151 # (head = revision in the set that has no descendant in the set;
151 # (head = revision in the set that has no descendant in the set;
152 # base = revision in the set that has no ancestor in the set)
152 # base = revision in the set that has no ancestor in the set)
153 tostrip = set(striplist)
153 tostrip = set(striplist)
154 saveheads = set(saverevs)
154 saveheads = set(saverevs)
155 for r in cl.revs(start=striprev + 1):
155 for r in cl.revs(start=striprev + 1):
156 if any(p in tostrip for p in cl.parentrevs(r)):
156 if any(p in tostrip for p in cl.parentrevs(r)):
157 tostrip.add(r)
157 tostrip.add(r)
158
158
159 if r not in tostrip:
159 if r not in tostrip:
160 saverevs.add(r)
160 saverevs.add(r)
161 saveheads.difference_update(cl.parentrevs(r))
161 saveheads.difference_update(cl.parentrevs(r))
162 saveheads.add(r)
162 saveheads.add(r)
163 saveheads = [cl.node(r) for r in saveheads]
163 saveheads = [cl.node(r) for r in saveheads]
164
164
165 # compute base nodes
165 # compute base nodes
166 if saverevs:
166 if saverevs:
167 descendants = set(cl.descendants(saverevs))
167 descendants = set(cl.descendants(saverevs))
168 saverevs.difference_update(descendants)
168 saverevs.difference_update(descendants)
169 savebases = [cl.node(r) for r in saverevs]
169 savebases = [cl.node(r) for r in saverevs]
170 stripbases = [cl.node(r) for r in tostrip]
170 stripbases = [cl.node(r) for r in tostrip]
171
171
172 stripobsidx = obsmarkers = ()
172 stripobsidx = obsmarkers = ()
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
175 if obsmarkers:
175 if obsmarkers:
176 stripobsidx = [
176 stripobsidx = [
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
178 ]
178 ]
179
179
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
181
181
182 backupfile = None
182 backupfile = None
183 node = nodelist[-1]
183 node = nodelist[-1]
184 if backup:
184 if backup:
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
186 # create a changegroup for all the branches we need to keep
186 # create a changegroup for all the branches we need to keep
187 tmpbundlefile = None
187 tmpbundlefile = None
188 if saveheads:
188 if saveheads:
189 # do not compress temporary bundle if we remove it from disk later
189 # do not compress temporary bundle if we remove it from disk later
190 #
190 #
191 # We do not include obsolescence, it might re-introduce prune markers
191 # We do not include obsolescence, it might re-introduce prune markers
192 # we are trying to strip. This is harmless since the stripped markers
192 # we are trying to strip. This is harmless since the stripped markers
193 # are already backed up and we did not touched the markers for the
193 # are already backed up and we did not touched the markers for the
194 # saved changesets.
194 # saved changesets.
195 tmpbundlefile = backupbundle(
195 tmpbundlefile = backupbundle(
196 repo,
196 repo,
197 savebases,
197 savebases,
198 saveheads,
198 saveheads,
199 node,
199 node,
200 b'temp',
200 b'temp',
201 compress=False,
201 compress=False,
202 obsolescence=False,
202 obsolescence=False,
203 )
203 )
204
204
205 with ui.uninterruptible():
205 with ui.uninterruptible():
206 try:
206 try:
207 with repo.transaction(b"strip") as tr:
207 with repo.transaction(b"strip") as tr:
208 # TODO this code violates the interface abstraction of the
208 # TODO this code violates the interface abstraction of the
209 # transaction and makes assumptions that file storage is
209 # transaction and makes assumptions that file storage is
210 # using append-only files. We'll need some kind of storage
210 # using append-only files. We'll need some kind of storage
211 # API to handle stripping for us.
211 # API to handle stripping for us.
212 oldfiles = set(tr._offsetmap.keys())
212 oldfiles = set(tr._offsetmap.keys())
213 oldfiles.update(tr._newfiles)
213 oldfiles.update(tr._newfiles)
214
214
215 tr.startgroup()
215 tr.startgroup()
216 cl.strip(striprev, tr)
216 cl.strip(striprev, tr)
217 stripmanifest(repo, striprev, tr, files)
217 stripmanifest(repo, striprev, tr, files)
218
218
219 for fn in files:
219 for fn in files:
220 repo.file(fn).strip(striprev, tr)
220 repo.file(fn).strip(striprev, tr)
221 tr.endgroup()
221 tr.endgroup()
222
222
223 entries = tr.readjournal()
223 entries = tr.readjournal()
224
224
225 for file, troffset in entries:
225 for file, troffset in entries:
226 if file in oldfiles:
226 if file in oldfiles:
227 continue
227 continue
228 with repo.svfs(file, b'a', checkambig=True) as fp:
228 with repo.svfs(file, b'a', checkambig=True) as fp:
229 fp.truncate(troffset)
229 fp.truncate(troffset)
230 if troffset == 0:
230 if troffset == 0:
231 repo.store.markremoved(file)
231 repo.store.markremoved(file)
232
232
233 deleteobsmarkers(repo.obsstore, stripobsidx)
233 deleteobsmarkers(repo.obsstore, stripobsidx)
234 del repo.obsstore
234 del repo.obsstore
235 repo.invalidatevolatilesets()
235 repo.invalidatevolatilesets()
236 repo._phasecache.filterunknown(repo)
236 repo._phasecache.filterunknown(repo)
237
237
238 if tmpbundlefile:
238 if tmpbundlefile:
239 ui.note(_(b"adding branch\n"))
239 ui.note(_(b"adding branch\n"))
240 f = vfs.open(tmpbundlefile, b"rb")
240 f = vfs.open(tmpbundlefile, b"rb")
241 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
242 if not repo.ui.verbose:
242 if not repo.ui.verbose:
243 # silence internal shuffling chatter
243 # silence internal shuffling chatter
244 repo.ui.pushbuffer()
244 repo.ui.pushbuffer()
245 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
245 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
246 txnname = b'strip'
246 txnname = b'strip'
247 if not isinstance(gen, bundle2.unbundle20):
247 if not isinstance(gen, bundle2.unbundle20):
248 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
248 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
249 with repo.transaction(txnname) as tr:
249 with repo.transaction(txnname) as tr:
250 bundle2.applybundle(
250 bundle2.applybundle(
251 repo, gen, tr, source=b'strip', url=tmpbundleurl
251 repo, gen, tr, source=b'strip', url=tmpbundleurl
252 )
252 )
253 if not repo.ui.verbose:
253 if not repo.ui.verbose:
254 repo.ui.popbuffer()
254 repo.ui.popbuffer()
255 f.close()
255 f.close()
256
256
257 with repo.transaction(b'repair') as tr:
257 with repo.transaction(b'repair') as tr:
258 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
258 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
259 repo._bookmarks.applychanges(repo, tr, bmchanges)
259 repo._bookmarks.applychanges(repo, tr, bmchanges)
260
260
261 # remove undo files
261 # remove undo files
262 for undovfs, undofile in repo.undofiles():
262 for undovfs, undofile in repo.undofiles():
263 try:
263 try:
264 undovfs.unlink(undofile)
264 undovfs.unlink(undofile)
265 except OSError as e:
265 except OSError as e:
266 if e.errno != errno.ENOENT:
266 if e.errno != errno.ENOENT:
267 ui.warn(
267 ui.warn(
268 _(b'error removing %s: %s\n')
268 _(b'error removing %s: %s\n')
269 % (
269 % (
270 undovfs.join(undofile),
270 undovfs.join(undofile),
271 stringutil.forcebytestr(e),
271 stringutil.forcebytestr(e),
272 )
272 )
273 )
273 )
274
274
275 except: # re-raises
275 except: # re-raises
276 if backupfile:
276 if backupfile:
277 ui.warn(
277 ui.warn(
278 _(b"strip failed, backup bundle stored in '%s'\n")
278 _(b"strip failed, backup bundle stored in '%s'\n")
279 % vfs.join(backupfile)
279 % vfs.join(backupfile)
280 )
280 )
281 if tmpbundlefile:
281 if tmpbundlefile:
282 ui.warn(
282 ui.warn(
283 _(b"strip failed, unrecovered changes stored in '%s'\n")
283 _(b"strip failed, unrecovered changes stored in '%s'\n")
284 % vfs.join(tmpbundlefile)
284 % vfs.join(tmpbundlefile)
285 )
285 )
286 ui.warn(
286 ui.warn(
287 _(
287 _(
288 b"(fix the problem, then recover the changesets with "
288 b"(fix the problem, then recover the changesets with "
289 b"\"hg unbundle '%s'\")\n"
289 b"\"hg unbundle '%s'\")\n"
290 )
290 )
291 % vfs.join(tmpbundlefile)
291 % vfs.join(tmpbundlefile)
292 )
292 )
293 raise
293 raise
294 else:
294 else:
295 if tmpbundlefile:
295 if tmpbundlefile:
296 # Remove temporary bundle only if there were no exceptions
296 # Remove temporary bundle only if there were no exceptions
297 vfs.unlink(tmpbundlefile)
297 vfs.unlink(tmpbundlefile)
298
298
299 repo.destroyed()
299 repo.destroyed()
300 # return the backup file path (or None if 'backup' was False) so
300 # return the backup file path (or None if 'backup' was False) so
301 # extensions can use it
301 # extensions can use it
302 return backupfile
302 return backupfile
303
303
304
304
305 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
305 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
306 """perform a "soft" strip using the archived phase"""
306 """perform a "soft" strip using the archived phase"""
307 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
307 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
308 if not tostrip:
308 if not tostrip:
309 return None
309 return None
310
310
311 backupfile = None
311 if backup:
312 if backup:
312 node = tostrip[0]
313 node = tostrip[0]
313 backupfile = _createstripbackup(repo, tostrip, node, topic)
314 backupfile = _createstripbackup(repo, tostrip, node, topic)
314
315
315 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
316 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
316 with repo.transaction(b'strip') as tr:
317 with repo.transaction(b'strip') as tr:
317 phases.retractboundary(repo, tr, phases.archived, tostrip)
318 phases.retractboundary(repo, tr, phases.archived, tostrip)
318 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
319 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
319 repo._bookmarks.applychanges(repo, tr, bmchanges)
320 repo._bookmarks.applychanges(repo, tr, bmchanges)
320 return backupfile
321 return backupfile
321
322
322
323
323 def _bookmarkmovements(repo, tostrip):
324 def _bookmarkmovements(repo, tostrip):
324 # compute necessary bookmark movement
325 # compute necessary bookmark movement
325 bm = repo._bookmarks
326 bm = repo._bookmarks
326 updatebm = []
327 updatebm = []
327 for m in bm:
328 for m in bm:
328 rev = repo[bm[m]].rev()
329 rev = repo[bm[m]].rev()
329 if rev in tostrip:
330 if rev in tostrip:
330 updatebm.append(m)
331 updatebm.append(m)
331 newbmtarget = None
332 newbmtarget = None
332 # If we need to move bookmarks, compute bookmark
333 # If we need to move bookmarks, compute bookmark
333 # targets. Otherwise we can skip doing this logic.
334 # targets. Otherwise we can skip doing this logic.
334 if updatebm:
335 if updatebm:
335 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
336 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
336 # but is much faster
337 # but is much faster
337 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
338 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
338 if newbmtarget:
339 if newbmtarget:
339 newbmtarget = repo[newbmtarget.first()].node()
340 newbmtarget = repo[newbmtarget.first()].node()
340 else:
341 else:
341 newbmtarget = b'.'
342 newbmtarget = b'.'
342 return newbmtarget, updatebm
343 return newbmtarget, updatebm
343
344
344
345
345 def _createstripbackup(repo, stripbases, node, topic):
346 def _createstripbackup(repo, stripbases, node, topic):
346 # backup the changeset we are about to strip
347 # backup the changeset we are about to strip
347 vfs = repo.vfs
348 vfs = repo.vfs
348 cl = repo.changelog
349 cl = repo.changelog
349 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
350 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
350 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
351 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
351 repo.ui.log(
352 repo.ui.log(
352 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
353 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
353 )
354 )
354 return backupfile
355 return backupfile
355
356
356
357
357 def safestriproots(ui, repo, nodes):
358 def safestriproots(ui, repo, nodes):
358 """return list of roots of nodes where descendants are covered by nodes"""
359 """return list of roots of nodes where descendants are covered by nodes"""
359 torev = repo.unfiltered().changelog.rev
360 torev = repo.unfiltered().changelog.rev
360 revs = {torev(n) for n in nodes}
361 revs = {torev(n) for n in nodes}
361 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
362 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
362 # orphaned = affected - wanted
363 # orphaned = affected - wanted
363 # affected = descendants(roots(wanted))
364 # affected = descendants(roots(wanted))
364 # wanted = revs
365 # wanted = revs
365 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
366 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
366 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
367 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
367 notstrip = revs - tostrip
368 notstrip = revs - tostrip
368 if notstrip:
369 if notstrip:
369 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
370 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
370 ui.warn(
371 ui.warn(
371 _(b'warning: orphaned descendants detected, not stripping %s\n')
372 _(b'warning: orphaned descendants detected, not stripping %s\n')
372 % nodestr
373 % nodestr
373 )
374 )
374 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
375 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
375
376
376
377
377 class stripcallback(object):
378 class stripcallback(object):
378 """used as a transaction postclose callback"""
379 """used as a transaction postclose callback"""
379
380
380 def __init__(self, ui, repo, backup, topic):
381 def __init__(self, ui, repo, backup, topic):
381 self.ui = ui
382 self.ui = ui
382 self.repo = repo
383 self.repo = repo
383 self.backup = backup
384 self.backup = backup
384 self.topic = topic or b'backup'
385 self.topic = topic or b'backup'
385 self.nodelist = []
386 self.nodelist = []
386
387
387 def addnodes(self, nodes):
388 def addnodes(self, nodes):
388 self.nodelist.extend(nodes)
389 self.nodelist.extend(nodes)
389
390
390 def __call__(self, tr):
391 def __call__(self, tr):
391 roots = safestriproots(self.ui, self.repo, self.nodelist)
392 roots = safestriproots(self.ui, self.repo, self.nodelist)
392 if roots:
393 if roots:
393 strip(self.ui, self.repo, roots, self.backup, self.topic)
394 strip(self.ui, self.repo, roots, self.backup, self.topic)
394
395
395
396
396 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
397 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
397 """like strip, but works inside transaction and won't strip irreverent revs
398 """like strip, but works inside transaction and won't strip irreverent revs
398
399
399 nodelist must explicitly contain all descendants. Otherwise a warning will
400 nodelist must explicitly contain all descendants. Otherwise a warning will
400 be printed that some nodes are not stripped.
401 be printed that some nodes are not stripped.
401
402
402 Will do a backup if `backup` is True. The last non-None "topic" will be
403 Will do a backup if `backup` is True. The last non-None "topic" will be
403 used as the backup topic name. The default backup topic name is "backup".
404 used as the backup topic name. The default backup topic name is "backup".
404 """
405 """
405 tr = repo.currenttransaction()
406 tr = repo.currenttransaction()
406 if not tr:
407 if not tr:
407 nodes = safestriproots(ui, repo, nodelist)
408 nodes = safestriproots(ui, repo, nodelist)
408 return strip(ui, repo, nodes, backup=backup, topic=topic)
409 return strip(ui, repo, nodes, backup=backup, topic=topic)
409 # transaction postclose callbacks are called in alphabet order.
410 # transaction postclose callbacks are called in alphabet order.
410 # use '\xff' as prefix so we are likely to be called last.
411 # use '\xff' as prefix so we are likely to be called last.
411 callback = tr.getpostclose(b'\xffstrip')
412 callback = tr.getpostclose(b'\xffstrip')
412 if callback is None:
413 if callback is None:
413 callback = stripcallback(ui, repo, backup=backup, topic=topic)
414 callback = stripcallback(ui, repo, backup=backup, topic=topic)
414 tr.addpostclose(b'\xffstrip', callback)
415 tr.addpostclose(b'\xffstrip', callback)
415 if topic:
416 if topic:
416 callback.topic = topic
417 callback.topic = topic
417 callback.addnodes(nodelist)
418 callback.addnodes(nodelist)
418
419
419
420
420 def stripmanifest(repo, striprev, tr, files):
421 def stripmanifest(repo, striprev, tr, files):
421 for revlog in manifestrevlogs(repo):
422 for revlog in manifestrevlogs(repo):
422 revlog.strip(striprev, tr)
423 revlog.strip(striprev, tr)
423
424
424
425
425 def manifestrevlogs(repo):
426 def manifestrevlogs(repo):
426 yield repo.manifestlog.getstorage(b'')
427 yield repo.manifestlog.getstorage(b'')
427 if scmutil.istreemanifest(repo):
428 if scmutil.istreemanifest(repo):
428 # This logic is safe if treemanifest isn't enabled, but also
429 # This logic is safe if treemanifest isn't enabled, but also
429 # pointless, so we skip it if treemanifest isn't enabled.
430 # pointless, so we skip it if treemanifest isn't enabled.
430 for unencoded, encoded, size in repo.store.datafiles():
431 for unencoded, encoded, size in repo.store.datafiles():
431 if unencoded.startswith(b'meta/') and unencoded.endswith(
432 if unencoded.startswith(b'meta/') and unencoded.endswith(
432 b'00manifest.i'
433 b'00manifest.i'
433 ):
434 ):
434 dir = unencoded[5:-12]
435 dir = unencoded[5:-12]
435 yield repo.manifestlog.getstorage(dir)
436 yield repo.manifestlog.getstorage(dir)
436
437
437
438
438 def rebuildfncache(ui, repo):
439 def rebuildfncache(ui, repo):
439 """Rebuilds the fncache file from repo history.
440 """Rebuilds the fncache file from repo history.
440
441
441 Missing entries will be added. Extra entries will be removed.
442 Missing entries will be added. Extra entries will be removed.
442 """
443 """
443 repo = repo.unfiltered()
444 repo = repo.unfiltered()
444
445
445 if b'fncache' not in repo.requirements:
446 if b'fncache' not in repo.requirements:
446 ui.warn(
447 ui.warn(
447 _(
448 _(
448 b'(not rebuilding fncache because repository does not '
449 b'(not rebuilding fncache because repository does not '
449 b'support fncache)\n'
450 b'support fncache)\n'
450 )
451 )
451 )
452 )
452 return
453 return
453
454
454 with repo.lock():
455 with repo.lock():
455 fnc = repo.store.fncache
456 fnc = repo.store.fncache
456 fnc.ensureloaded(warn=ui.warn)
457 fnc.ensureloaded(warn=ui.warn)
457
458
458 oldentries = set(fnc.entries)
459 oldentries = set(fnc.entries)
459 newentries = set()
460 newentries = set()
460 seenfiles = set()
461 seenfiles = set()
461
462
462 progress = ui.makeprogress(
463 progress = ui.makeprogress(
463 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
464 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
464 )
465 )
465 for rev in repo:
466 for rev in repo:
466 progress.update(rev)
467 progress.update(rev)
467
468
468 ctx = repo[rev]
469 ctx = repo[rev]
469 for f in ctx.files():
470 for f in ctx.files():
470 # This is to minimize I/O.
471 # This is to minimize I/O.
471 if f in seenfiles:
472 if f in seenfiles:
472 continue
473 continue
473 seenfiles.add(f)
474 seenfiles.add(f)
474
475
475 i = b'data/%s.i' % f
476 i = b'data/%s.i' % f
476 d = b'data/%s.d' % f
477 d = b'data/%s.d' % f
477
478
478 if repo.store._exists(i):
479 if repo.store._exists(i):
479 newentries.add(i)
480 newentries.add(i)
480 if repo.store._exists(d):
481 if repo.store._exists(d):
481 newentries.add(d)
482 newentries.add(d)
482
483
483 progress.complete()
484 progress.complete()
484
485
485 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
486 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
486 # This logic is safe if treemanifest isn't enabled, but also
487 # This logic is safe if treemanifest isn't enabled, but also
487 # pointless, so we skip it if treemanifest isn't enabled.
488 # pointless, so we skip it if treemanifest isn't enabled.
488 for dir in pathutil.dirs(seenfiles):
489 for dir in pathutil.dirs(seenfiles):
489 i = b'meta/%s/00manifest.i' % dir
490 i = b'meta/%s/00manifest.i' % dir
490 d = b'meta/%s/00manifest.d' % dir
491 d = b'meta/%s/00manifest.d' % dir
491
492
492 if repo.store._exists(i):
493 if repo.store._exists(i):
493 newentries.add(i)
494 newentries.add(i)
494 if repo.store._exists(d):
495 if repo.store._exists(d):
495 newentries.add(d)
496 newentries.add(d)
496
497
497 addcount = len(newentries - oldentries)
498 addcount = len(newentries - oldentries)
498 removecount = len(oldentries - newentries)
499 removecount = len(oldentries - newentries)
499 for p in sorted(oldentries - newentries):
500 for p in sorted(oldentries - newentries):
500 ui.write(_(b'removing %s\n') % p)
501 ui.write(_(b'removing %s\n') % p)
501 for p in sorted(newentries - oldentries):
502 for p in sorted(newentries - oldentries):
502 ui.write(_(b'adding %s\n') % p)
503 ui.write(_(b'adding %s\n') % p)
503
504
504 if addcount or removecount:
505 if addcount or removecount:
505 ui.write(
506 ui.write(
506 _(b'%d items added, %d removed from fncache\n')
507 _(b'%d items added, %d removed from fncache\n')
507 % (addcount, removecount)
508 % (addcount, removecount)
508 )
509 )
509 fnc.entries = newentries
510 fnc.entries = newentries
510 fnc._dirty = True
511 fnc._dirty = True
511
512
512 with repo.transaction(b'fncache') as tr:
513 with repo.transaction(b'fncache') as tr:
513 fnc.write(tr)
514 fnc.write(tr)
514 else:
515 else:
515 ui.write(_(b'fncache already up to date\n'))
516 ui.write(_(b'fncache already up to date\n'))
516
517
517
518
518 def deleteobsmarkers(obsstore, indices):
519 def deleteobsmarkers(obsstore, indices):
519 """Delete some obsmarkers from obsstore and return how many were deleted
520 """Delete some obsmarkers from obsstore and return how many were deleted
520
521
521 'indices' is a list of ints which are the indices
522 'indices' is a list of ints which are the indices
522 of the markers to be deleted.
523 of the markers to be deleted.
523
524
524 Every invocation of this function completely rewrites the obsstore file,
525 Every invocation of this function completely rewrites the obsstore file,
525 skipping the markers we want to be removed. The new temporary file is
526 skipping the markers we want to be removed. The new temporary file is
526 created, remaining markers are written there and on .close() this file
527 created, remaining markers are written there and on .close() this file
527 gets atomically renamed to obsstore, thus guaranteeing consistency."""
528 gets atomically renamed to obsstore, thus guaranteeing consistency."""
528 if not indices:
529 if not indices:
529 # we don't want to rewrite the obsstore with the same content
530 # we don't want to rewrite the obsstore with the same content
530 return
531 return
531
532
532 left = []
533 left = []
533 current = obsstore._all
534 current = obsstore._all
534 n = 0
535 n = 0
535 for i, m in enumerate(current):
536 for i, m in enumerate(current):
536 if i in indices:
537 if i in indices:
537 n += 1
538 n += 1
538 continue
539 continue
539 left.append(m)
540 left.append(m)
540
541
541 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
542 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
542 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
543 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
543 newobsstorefile.write(bytes)
544 newobsstorefile.write(bytes)
544 newobsstorefile.close()
545 newobsstorefile.close()
545 return n
546 return n
General Comments 0
You need to be logged in to leave comments. Login now