##// END OF EJS Templates
undo-files: extract the cleanup code from strip in a function...
marmoute -
r51184:80110176 stable
parent child Browse files
Show More
@@ -1,561 +1,565 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Olivia Mackall
4 # Copyright 2007 Olivia Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17 from . import (
17 from . import (
18 bundle2,
18 bundle2,
19 changegroup,
19 changegroup,
20 discovery,
20 discovery,
21 error,
21 error,
22 exchange,
22 exchange,
23 obsolete,
23 obsolete,
24 obsutil,
24 obsutil,
25 pathutil,
25 pathutil,
26 phases,
26 phases,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 hashutil,
32 hashutil,
33 stringutil,
33 stringutil,
34 urlutil,
34 urlutil,
35 )
35 )
36
36
37
37
38 def backupbundle(
38 def backupbundle(
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 ):
40 ):
41 """create a bundle with the specified revisions as a backup"""
41 """create a bundle with the specified revisions as a backup"""
42
42
43 backupdir = b"strip-backup"
43 backupdir = b"strip-backup"
44 vfs = repo.vfs
44 vfs = repo.vfs
45 if not vfs.isdir(backupdir):
45 if not vfs.isdir(backupdir):
46 vfs.mkdir(backupdir)
46 vfs.mkdir(backupdir)
47
47
48 # Include a hash of all the nodes in the filename for uniqueness
48 # Include a hash of all the nodes in the filename for uniqueness
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allhashes = sorted(c.hex() for c in allcommits)
50 allhashes = sorted(c.hex() for c in allcommits)
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 name = b"%s/%s-%s-%s.hg" % (
52 name = b"%s/%s-%s-%s.hg" % (
53 backupdir,
53 backupdir,
54 short(node),
54 short(node),
55 hex(totalhash[:4]),
55 hex(totalhash[:4]),
56 suffix,
56 suffix,
57 )
57 )
58
58
59 cgversion = changegroup.localversion(repo)
59 cgversion = changegroup.localversion(repo)
60 comp = None
60 comp = None
61 if cgversion != b'01':
61 if cgversion != b'01':
62 bundletype = b"HG20"
62 bundletype = b"HG20"
63 if compress:
63 if compress:
64 comp = b'BZ'
64 comp = b'BZ'
65 elif compress:
65 elif compress:
66 bundletype = b"HG10BZ"
66 bundletype = b"HG10BZ"
67 else:
67 else:
68 bundletype = b"HG10UN"
68 bundletype = b"HG10UN"
69
69
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 contentopts = {
71 contentopts = {
72 b'cg.version': cgversion,
72 b'cg.version': cgversion,
73 b'obsolescence': obsolescence,
73 b'obsolescence': obsolescence,
74 b'phases': True,
74 b'phases': True,
75 }
75 }
76 return bundle2.writenewbundle(
76 return bundle2.writenewbundle(
77 repo.ui,
77 repo.ui,
78 repo,
78 repo,
79 b'strip',
79 b'strip',
80 name,
80 name,
81 bundletype,
81 bundletype,
82 outgoing,
82 outgoing,
83 contentopts,
83 contentopts,
84 vfs,
84 vfs,
85 compression=comp,
85 compression=comp,
86 )
86 )
87
87
88
88
89 def _collectfiles(repo, striprev):
89 def _collectfiles(repo, striprev):
90 """find out the filelogs affected by the strip"""
90 """find out the filelogs affected by the strip"""
91 files = set()
91 files = set()
92
92
93 for x in range(striprev, len(repo)):
93 for x in range(striprev, len(repo)):
94 files.update(repo[x].files())
94 files.update(repo[x].files())
95
95
96 return sorted(files)
96 return sorted(files)
97
97
98
98
99 def _collectrevlog(revlog, striprev):
99 def _collectrevlog(revlog, striprev):
100 _, brokenset = revlog.getstrippoint(striprev)
100 _, brokenset = revlog.getstrippoint(striprev)
101 return [revlog.linkrev(r) for r in brokenset]
101 return [revlog.linkrev(r) for r in brokenset]
102
102
103
103
104 def _collectbrokencsets(repo, files, striprev):
104 def _collectbrokencsets(repo, files, striprev):
105 """return the changesets which will be broken by the truncation"""
105 """return the changesets which will be broken by the truncation"""
106 s = set()
106 s = set()
107
107
108 for revlog in manifestrevlogs(repo):
108 for revlog in manifestrevlogs(repo):
109 s.update(_collectrevlog(revlog, striprev))
109 s.update(_collectrevlog(revlog, striprev))
110 for fname in files:
110 for fname in files:
111 s.update(_collectrevlog(repo.file(fname), striprev))
111 s.update(_collectrevlog(repo.file(fname), striprev))
112
112
113 return s
113 return s
114
114
115
115
116 def cleanup_undo_files(repo):
117 """remove "undo" files used by the rollback logic
118
119 This is useful to prevent rollback running in situation were it does not
120 make sense. For example after a strip.
121 """
122 for undovfs, undofile in repo.undofiles():
123 try:
124 undovfs.unlink(undofile)
125 except OSError as e:
126 if e.errno != errno.ENOENT:
127 msg = _(b'error removing %s: %s\n')
128 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
129 repo.ui.warn(msg)
130
131
116 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
132 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 # This function requires the caller to lock the repo, but it operates
133 # This function requires the caller to lock the repo, but it operates
118 # within a transaction of its own, and thus requires there to be no current
134 # within a transaction of its own, and thus requires there to be no current
119 # transaction when it is called.
135 # transaction when it is called.
120 if repo.currenttransaction() is not None:
136 if repo.currenttransaction() is not None:
121 raise error.ProgrammingError(b'cannot strip from inside a transaction')
137 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122
138
123 # Simple way to maintain backwards compatibility for this
139 # Simple way to maintain backwards compatibility for this
124 # argument.
140 # argument.
125 if backup in [b'none', b'strip']:
141 if backup in [b'none', b'strip']:
126 backup = False
142 backup = False
127
143
128 repo = repo.unfiltered()
144 repo = repo.unfiltered()
129 repo.destroying()
145 repo.destroying()
130 vfs = repo.vfs
146 vfs = repo.vfs
131 # load bookmark before changelog to avoid side effect from outdated
147 # load bookmark before changelog to avoid side effect from outdated
132 # changelog (see repo._refreshchangelog)
148 # changelog (see repo._refreshchangelog)
133 repo._bookmarks
149 repo._bookmarks
134 cl = repo.changelog
150 cl = repo.changelog
135
151
136 # TODO handle undo of merge sets
152 # TODO handle undo of merge sets
137 if isinstance(nodelist, bytes):
153 if isinstance(nodelist, bytes):
138 nodelist = [nodelist]
154 nodelist = [nodelist]
139 striplist = [cl.rev(node) for node in nodelist]
155 striplist = [cl.rev(node) for node in nodelist]
140 striprev = min(striplist)
156 striprev = min(striplist)
141
157
142 files = _collectfiles(repo, striprev)
158 files = _collectfiles(repo, striprev)
143 saverevs = _collectbrokencsets(repo, files, striprev)
159 saverevs = _collectbrokencsets(repo, files, striprev)
144
160
145 # Some revisions with rev > striprev may not be descendants of striprev.
161 # Some revisions with rev > striprev may not be descendants of striprev.
146 # We have to find these revisions and put them in a bundle, so that
162 # We have to find these revisions and put them in a bundle, so that
147 # we can restore them after the truncations.
163 # we can restore them after the truncations.
148 # To create the bundle we use repo.changegroupsubset which requires
164 # To create the bundle we use repo.changegroupsubset which requires
149 # the list of heads and bases of the set of interesting revisions.
165 # the list of heads and bases of the set of interesting revisions.
150 # (head = revision in the set that has no descendant in the set;
166 # (head = revision in the set that has no descendant in the set;
151 # base = revision in the set that has no ancestor in the set)
167 # base = revision in the set that has no ancestor in the set)
152 tostrip = set(striplist)
168 tostrip = set(striplist)
153 saveheads = set(saverevs)
169 saveheads = set(saverevs)
154 for r in cl.revs(start=striprev + 1):
170 for r in cl.revs(start=striprev + 1):
155 if any(p in tostrip for p in cl.parentrevs(r)):
171 if any(p in tostrip for p in cl.parentrevs(r)):
156 tostrip.add(r)
172 tostrip.add(r)
157
173
158 if r not in tostrip:
174 if r not in tostrip:
159 saverevs.add(r)
175 saverevs.add(r)
160 saveheads.difference_update(cl.parentrevs(r))
176 saveheads.difference_update(cl.parentrevs(r))
161 saveheads.add(r)
177 saveheads.add(r)
162 saveheads = [cl.node(r) for r in saveheads]
178 saveheads = [cl.node(r) for r in saveheads]
163
179
164 # compute base nodes
180 # compute base nodes
165 if saverevs:
181 if saverevs:
166 descendants = set(cl.descendants(saverevs))
182 descendants = set(cl.descendants(saverevs))
167 saverevs.difference_update(descendants)
183 saverevs.difference_update(descendants)
168 savebases = [cl.node(r) for r in saverevs]
184 savebases = [cl.node(r) for r in saverevs]
169 stripbases = [cl.node(r) for r in tostrip]
185 stripbases = [cl.node(r) for r in tostrip]
170
186
171 stripobsidx = obsmarkers = ()
187 stripobsidx = obsmarkers = ()
172 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
188 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
189 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 if obsmarkers:
190 if obsmarkers:
175 stripobsidx = [
191 stripobsidx = [
176 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
192 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 ]
193 ]
178
194
179 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
195 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180
196
181 backupfile = None
197 backupfile = None
182 node = nodelist[-1]
198 node = nodelist[-1]
183 if backup:
199 if backup:
184 backupfile = _createstripbackup(repo, stripbases, node, topic)
200 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 # create a changegroup for all the branches we need to keep
201 # create a changegroup for all the branches we need to keep
186 tmpbundlefile = None
202 tmpbundlefile = None
187 if saveheads:
203 if saveheads:
188 # do not compress temporary bundle if we remove it from disk later
204 # do not compress temporary bundle if we remove it from disk later
189 #
205 #
190 # We do not include obsolescence, it might re-introduce prune markers
206 # We do not include obsolescence, it might re-introduce prune markers
191 # we are trying to strip. This is harmless since the stripped markers
207 # we are trying to strip. This is harmless since the stripped markers
192 # are already backed up and we did not touched the markers for the
208 # are already backed up and we did not touched the markers for the
193 # saved changesets.
209 # saved changesets.
194 tmpbundlefile = backupbundle(
210 tmpbundlefile = backupbundle(
195 repo,
211 repo,
196 savebases,
212 savebases,
197 saveheads,
213 saveheads,
198 node,
214 node,
199 b'temp',
215 b'temp',
200 compress=False,
216 compress=False,
201 obsolescence=False,
217 obsolescence=False,
202 )
218 )
203
219
204 with ui.uninterruptible():
220 with ui.uninterruptible():
205 try:
221 try:
206 with repo.transaction(b"strip") as tr:
222 with repo.transaction(b"strip") as tr:
207 # TODO this code violates the interface abstraction of the
223 # TODO this code violates the interface abstraction of the
208 # transaction and makes assumptions that file storage is
224 # transaction and makes assumptions that file storage is
209 # using append-only files. We'll need some kind of storage
225 # using append-only files. We'll need some kind of storage
210 # API to handle stripping for us.
226 # API to handle stripping for us.
211 oldfiles = set(tr._offsetmap.keys())
227 oldfiles = set(tr._offsetmap.keys())
212 oldfiles.update(tr._newfiles)
228 oldfiles.update(tr._newfiles)
213
229
214 tr.startgroup()
230 tr.startgroup()
215 cl.strip(striprev, tr)
231 cl.strip(striprev, tr)
216 stripmanifest(repo, striprev, tr, files)
232 stripmanifest(repo, striprev, tr, files)
217
233
218 for fn in files:
234 for fn in files:
219 repo.file(fn).strip(striprev, tr)
235 repo.file(fn).strip(striprev, tr)
220 tr.endgroup()
236 tr.endgroup()
221
237
222 entries = tr.readjournal()
238 entries = tr.readjournal()
223
239
224 for file, troffset in entries:
240 for file, troffset in entries:
225 if file in oldfiles:
241 if file in oldfiles:
226 continue
242 continue
227 with repo.svfs(file, b'a', checkambig=True) as fp:
243 with repo.svfs(file, b'a', checkambig=True) as fp:
228 fp.truncate(troffset)
244 fp.truncate(troffset)
229 if troffset == 0:
245 if troffset == 0:
230 repo.store.markremoved(file)
246 repo.store.markremoved(file)
231
247
232 deleteobsmarkers(repo.obsstore, stripobsidx)
248 deleteobsmarkers(repo.obsstore, stripobsidx)
233 del repo.obsstore
249 del repo.obsstore
234 repo.invalidatevolatilesets()
250 repo.invalidatevolatilesets()
235 repo._phasecache.filterunknown(repo)
251 repo._phasecache.filterunknown(repo)
236
252
237 if tmpbundlefile:
253 if tmpbundlefile:
238 ui.note(_(b"adding branch\n"))
254 ui.note(_(b"adding branch\n"))
239 f = vfs.open(tmpbundlefile, b"rb")
255 f = vfs.open(tmpbundlefile, b"rb")
240 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
256 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 # silence internal shuffling chatter
257 # silence internal shuffling chatter
242 maybe_silent = (
258 maybe_silent = (
243 repo.ui.silent()
259 repo.ui.silent()
244 if not repo.ui.verbose
260 if not repo.ui.verbose
245 else util.nullcontextmanager()
261 else util.nullcontextmanager()
246 )
262 )
247 with maybe_silent:
263 with maybe_silent:
248 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
264 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
249 txnname = b'strip'
265 txnname = b'strip'
250 if not isinstance(gen, bundle2.unbundle20):
266 if not isinstance(gen, bundle2.unbundle20):
251 txnname = b"strip\n%s" % urlutil.hidepassword(
267 txnname = b"strip\n%s" % urlutil.hidepassword(
252 tmpbundleurl
268 tmpbundleurl
253 )
269 )
254 with repo.transaction(txnname) as tr:
270 with repo.transaction(txnname) as tr:
255 bundle2.applybundle(
271 bundle2.applybundle(
256 repo, gen, tr, source=b'strip', url=tmpbundleurl
272 repo, gen, tr, source=b'strip', url=tmpbundleurl
257 )
273 )
258 f.close()
274 f.close()
259
275
260 with repo.transaction(b'repair') as tr:
276 with repo.transaction(b'repair') as tr:
261 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
277 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
262 repo._bookmarks.applychanges(repo, tr, bmchanges)
278 repo._bookmarks.applychanges(repo, tr, bmchanges)
263
279
264 # remove undo files
280 cleanup_undo_files(repo)
265 for undovfs, undofile in repo.undofiles():
266 try:
267 undovfs.unlink(undofile)
268 except OSError as e:
269 if e.errno != errno.ENOENT:
270 ui.warn(
271 _(b'error removing %s: %s\n')
272 % (
273 undovfs.join(undofile),
274 stringutil.forcebytestr(e),
275 )
276 )
277
281
278 except: # re-raises
282 except: # re-raises
279 if backupfile:
283 if backupfile:
280 ui.warn(
284 ui.warn(
281 _(b"strip failed, backup bundle stored in '%s'\n")
285 _(b"strip failed, backup bundle stored in '%s'\n")
282 % vfs.join(backupfile)
286 % vfs.join(backupfile)
283 )
287 )
284 if tmpbundlefile:
288 if tmpbundlefile:
285 ui.warn(
289 ui.warn(
286 _(b"strip failed, unrecovered changes stored in '%s'\n")
290 _(b"strip failed, unrecovered changes stored in '%s'\n")
287 % vfs.join(tmpbundlefile)
291 % vfs.join(tmpbundlefile)
288 )
292 )
289 ui.warn(
293 ui.warn(
290 _(
294 _(
291 b"(fix the problem, then recover the changesets with "
295 b"(fix the problem, then recover the changesets with "
292 b"\"hg unbundle '%s'\")\n"
296 b"\"hg unbundle '%s'\")\n"
293 )
297 )
294 % vfs.join(tmpbundlefile)
298 % vfs.join(tmpbundlefile)
295 )
299 )
296 raise
300 raise
297 else:
301 else:
298 if tmpbundlefile:
302 if tmpbundlefile:
299 # Remove temporary bundle only if there were no exceptions
303 # Remove temporary bundle only if there were no exceptions
300 vfs.unlink(tmpbundlefile)
304 vfs.unlink(tmpbundlefile)
301
305
302 repo.destroyed()
306 repo.destroyed()
303 # return the backup file path (or None if 'backup' was False) so
307 # return the backup file path (or None if 'backup' was False) so
304 # extensions can use it
308 # extensions can use it
305 return backupfile
309 return backupfile
306
310
307
311
308 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
312 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
309 """perform a "soft" strip using the archived phase"""
313 """perform a "soft" strip using the archived phase"""
310 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
314 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
311 if not tostrip:
315 if not tostrip:
312 return None
316 return None
313
317
314 backupfile = None
318 backupfile = None
315 if backup:
319 if backup:
316 node = tostrip[0]
320 node = tostrip[0]
317 backupfile = _createstripbackup(repo, tostrip, node, topic)
321 backupfile = _createstripbackup(repo, tostrip, node, topic)
318
322
319 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
323 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
320 with repo.transaction(b'strip') as tr:
324 with repo.transaction(b'strip') as tr:
321 phases.retractboundary(repo, tr, phases.archived, tostrip)
325 phases.retractboundary(repo, tr, phases.archived, tostrip)
322 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
326 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
323 repo._bookmarks.applychanges(repo, tr, bmchanges)
327 repo._bookmarks.applychanges(repo, tr, bmchanges)
324 return backupfile
328 return backupfile
325
329
326
330
327 def _bookmarkmovements(repo, tostrip):
331 def _bookmarkmovements(repo, tostrip):
328 # compute necessary bookmark movement
332 # compute necessary bookmark movement
329 bm = repo._bookmarks
333 bm = repo._bookmarks
330 updatebm = []
334 updatebm = []
331 for m in bm:
335 for m in bm:
332 rev = repo[bm[m]].rev()
336 rev = repo[bm[m]].rev()
333 if rev in tostrip:
337 if rev in tostrip:
334 updatebm.append(m)
338 updatebm.append(m)
335 newbmtarget = None
339 newbmtarget = None
336 # If we need to move bookmarks, compute bookmark
340 # If we need to move bookmarks, compute bookmark
337 # targets. Otherwise we can skip doing this logic.
341 # targets. Otherwise we can skip doing this logic.
338 if updatebm:
342 if updatebm:
339 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
343 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
340 # but is much faster
344 # but is much faster
341 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
345 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
342 if newbmtarget:
346 if newbmtarget:
343 newbmtarget = repo[newbmtarget.first()].node()
347 newbmtarget = repo[newbmtarget.first()].node()
344 else:
348 else:
345 newbmtarget = b'.'
349 newbmtarget = b'.'
346 return newbmtarget, updatebm
350 return newbmtarget, updatebm
347
351
348
352
349 def _createstripbackup(repo, stripbases, node, topic):
353 def _createstripbackup(repo, stripbases, node, topic):
350 # backup the changeset we are about to strip
354 # backup the changeset we are about to strip
351 vfs = repo.vfs
355 vfs = repo.vfs
352 cl = repo.changelog
356 cl = repo.changelog
353 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
357 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
354 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
358 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
355 repo.ui.log(
359 repo.ui.log(
356 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
360 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
357 )
361 )
358 return backupfile
362 return backupfile
359
363
360
364
361 def safestriproots(ui, repo, nodes):
365 def safestriproots(ui, repo, nodes):
362 """return list of roots of nodes where descendants are covered by nodes"""
366 """return list of roots of nodes where descendants are covered by nodes"""
363 torev = repo.unfiltered().changelog.rev
367 torev = repo.unfiltered().changelog.rev
364 revs = {torev(n) for n in nodes}
368 revs = {torev(n) for n in nodes}
365 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
369 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
366 # orphaned = affected - wanted
370 # orphaned = affected - wanted
367 # affected = descendants(roots(wanted))
371 # affected = descendants(roots(wanted))
368 # wanted = revs
372 # wanted = revs
369 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
373 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
370 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
374 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
371 notstrip = revs - tostrip
375 notstrip = revs - tostrip
372 if notstrip:
376 if notstrip:
373 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
377 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
374 ui.warn(
378 ui.warn(
375 _(b'warning: orphaned descendants detected, not stripping %s\n')
379 _(b'warning: orphaned descendants detected, not stripping %s\n')
376 % nodestr
380 % nodestr
377 )
381 )
378 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
382 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
379
383
380
384
381 class stripcallback:
385 class stripcallback:
382 """used as a transaction postclose callback"""
386 """used as a transaction postclose callback"""
383
387
384 def __init__(self, ui, repo, backup, topic):
388 def __init__(self, ui, repo, backup, topic):
385 self.ui = ui
389 self.ui = ui
386 self.repo = repo
390 self.repo = repo
387 self.backup = backup
391 self.backup = backup
388 self.topic = topic or b'backup'
392 self.topic = topic or b'backup'
389 self.nodelist = []
393 self.nodelist = []
390
394
391 def addnodes(self, nodes):
395 def addnodes(self, nodes):
392 self.nodelist.extend(nodes)
396 self.nodelist.extend(nodes)
393
397
394 def __call__(self, tr):
398 def __call__(self, tr):
395 roots = safestriproots(self.ui, self.repo, self.nodelist)
399 roots = safestriproots(self.ui, self.repo, self.nodelist)
396 if roots:
400 if roots:
397 strip(self.ui, self.repo, roots, self.backup, self.topic)
401 strip(self.ui, self.repo, roots, self.backup, self.topic)
398
402
399
403
400 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
404 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
401 """like strip, but works inside transaction and won't strip irreverent revs
405 """like strip, but works inside transaction and won't strip irreverent revs
402
406
403 nodelist must explicitly contain all descendants. Otherwise a warning will
407 nodelist must explicitly contain all descendants. Otherwise a warning will
404 be printed that some nodes are not stripped.
408 be printed that some nodes are not stripped.
405
409
406 Will do a backup if `backup` is True. The last non-None "topic" will be
410 Will do a backup if `backup` is True. The last non-None "topic" will be
407 used as the backup topic name. The default backup topic name is "backup".
411 used as the backup topic name. The default backup topic name is "backup".
408 """
412 """
409 tr = repo.currenttransaction()
413 tr = repo.currenttransaction()
410 if not tr:
414 if not tr:
411 nodes = safestriproots(ui, repo, nodelist)
415 nodes = safestriproots(ui, repo, nodelist)
412 return strip(ui, repo, nodes, backup=backup, topic=topic)
416 return strip(ui, repo, nodes, backup=backup, topic=topic)
413 # transaction postclose callbacks are called in alphabet order.
417 # transaction postclose callbacks are called in alphabet order.
414 # use '\xff' as prefix so we are likely to be called last.
418 # use '\xff' as prefix so we are likely to be called last.
415 callback = tr.getpostclose(b'\xffstrip')
419 callback = tr.getpostclose(b'\xffstrip')
416 if callback is None:
420 if callback is None:
417 callback = stripcallback(ui, repo, backup=backup, topic=topic)
421 callback = stripcallback(ui, repo, backup=backup, topic=topic)
418 tr.addpostclose(b'\xffstrip', callback)
422 tr.addpostclose(b'\xffstrip', callback)
419 if topic:
423 if topic:
420 callback.topic = topic
424 callback.topic = topic
421 callback.addnodes(nodelist)
425 callback.addnodes(nodelist)
422
426
423
427
424 def stripmanifest(repo, striprev, tr, files):
428 def stripmanifest(repo, striprev, tr, files):
425 for revlog in manifestrevlogs(repo):
429 for revlog in manifestrevlogs(repo):
426 revlog.strip(striprev, tr)
430 revlog.strip(striprev, tr)
427
431
428
432
429 def manifestrevlogs(repo):
433 def manifestrevlogs(repo):
430 yield repo.manifestlog.getstorage(b'')
434 yield repo.manifestlog.getstorage(b'')
431 if scmutil.istreemanifest(repo):
435 if scmutil.istreemanifest(repo):
432 # This logic is safe if treemanifest isn't enabled, but also
436 # This logic is safe if treemanifest isn't enabled, but also
433 # pointless, so we skip it if treemanifest isn't enabled.
437 # pointless, so we skip it if treemanifest isn't enabled.
434 for t, unencoded, size in repo.store.datafiles():
438 for t, unencoded, size in repo.store.datafiles():
435 if unencoded.startswith(b'meta/') and unencoded.endswith(
439 if unencoded.startswith(b'meta/') and unencoded.endswith(
436 b'00manifest.i'
440 b'00manifest.i'
437 ):
441 ):
438 dir = unencoded[5:-12]
442 dir = unencoded[5:-12]
439 yield repo.manifestlog.getstorage(dir)
443 yield repo.manifestlog.getstorage(dir)
440
444
441
445
442 def rebuildfncache(ui, repo, only_data=False):
446 def rebuildfncache(ui, repo, only_data=False):
443 """Rebuilds the fncache file from repo history.
447 """Rebuilds the fncache file from repo history.
444
448
445 Missing entries will be added. Extra entries will be removed.
449 Missing entries will be added. Extra entries will be removed.
446 """
450 """
447 repo = repo.unfiltered()
451 repo = repo.unfiltered()
448
452
449 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
453 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
450 ui.warn(
454 ui.warn(
451 _(
455 _(
452 b'(not rebuilding fncache because repository does not '
456 b'(not rebuilding fncache because repository does not '
453 b'support fncache)\n'
457 b'support fncache)\n'
454 )
458 )
455 )
459 )
456 return
460 return
457
461
458 with repo.lock():
462 with repo.lock():
459 fnc = repo.store.fncache
463 fnc = repo.store.fncache
460 fnc.ensureloaded(warn=ui.warn)
464 fnc.ensureloaded(warn=ui.warn)
461
465
462 oldentries = set(fnc.entries)
466 oldentries = set(fnc.entries)
463 newentries = set()
467 newentries = set()
464 seenfiles = set()
468 seenfiles = set()
465
469
466 if only_data:
470 if only_data:
467 # Trust the listing of .i from the fncache, but not the .d. This is
471 # Trust the listing of .i from the fncache, but not the .d. This is
468 # much faster, because we only need to stat every possible .d files,
472 # much faster, because we only need to stat every possible .d files,
469 # instead of reading the full changelog
473 # instead of reading the full changelog
470 for f in fnc:
474 for f in fnc:
471 if f[:5] == b'data/' and f[-2:] == b'.i':
475 if f[:5] == b'data/' and f[-2:] == b'.i':
472 seenfiles.add(f[5:-2])
476 seenfiles.add(f[5:-2])
473 newentries.add(f)
477 newentries.add(f)
474 dataf = f[:-2] + b'.d'
478 dataf = f[:-2] + b'.d'
475 if repo.store._exists(dataf):
479 if repo.store._exists(dataf):
476 newentries.add(dataf)
480 newentries.add(dataf)
477 else:
481 else:
478 progress = ui.makeprogress(
482 progress = ui.makeprogress(
479 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
483 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
480 )
484 )
481 for rev in repo:
485 for rev in repo:
482 progress.update(rev)
486 progress.update(rev)
483
487
484 ctx = repo[rev]
488 ctx = repo[rev]
485 for f in ctx.files():
489 for f in ctx.files():
486 # This is to minimize I/O.
490 # This is to minimize I/O.
487 if f in seenfiles:
491 if f in seenfiles:
488 continue
492 continue
489 seenfiles.add(f)
493 seenfiles.add(f)
490
494
491 i = b'data/%s.i' % f
495 i = b'data/%s.i' % f
492 d = b'data/%s.d' % f
496 d = b'data/%s.d' % f
493
497
494 if repo.store._exists(i):
498 if repo.store._exists(i):
495 newentries.add(i)
499 newentries.add(i)
496 if repo.store._exists(d):
500 if repo.store._exists(d):
497 newentries.add(d)
501 newentries.add(d)
498
502
499 progress.complete()
503 progress.complete()
500
504
501 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
505 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
502 # This logic is safe if treemanifest isn't enabled, but also
506 # This logic is safe if treemanifest isn't enabled, but also
503 # pointless, so we skip it if treemanifest isn't enabled.
507 # pointless, so we skip it if treemanifest isn't enabled.
504 for dir in pathutil.dirs(seenfiles):
508 for dir in pathutil.dirs(seenfiles):
505 i = b'meta/%s/00manifest.i' % dir
509 i = b'meta/%s/00manifest.i' % dir
506 d = b'meta/%s/00manifest.d' % dir
510 d = b'meta/%s/00manifest.d' % dir
507
511
508 if repo.store._exists(i):
512 if repo.store._exists(i):
509 newentries.add(i)
513 newentries.add(i)
510 if repo.store._exists(d):
514 if repo.store._exists(d):
511 newentries.add(d)
515 newentries.add(d)
512
516
513 addcount = len(newentries - oldentries)
517 addcount = len(newentries - oldentries)
514 removecount = len(oldentries - newentries)
518 removecount = len(oldentries - newentries)
515 for p in sorted(oldentries - newentries):
519 for p in sorted(oldentries - newentries):
516 ui.write(_(b'removing %s\n') % p)
520 ui.write(_(b'removing %s\n') % p)
517 for p in sorted(newentries - oldentries):
521 for p in sorted(newentries - oldentries):
518 ui.write(_(b'adding %s\n') % p)
522 ui.write(_(b'adding %s\n') % p)
519
523
520 if addcount or removecount:
524 if addcount or removecount:
521 ui.write(
525 ui.write(
522 _(b'%d items added, %d removed from fncache\n')
526 _(b'%d items added, %d removed from fncache\n')
523 % (addcount, removecount)
527 % (addcount, removecount)
524 )
528 )
525 fnc.entries = newentries
529 fnc.entries = newentries
526 fnc._dirty = True
530 fnc._dirty = True
527
531
528 with repo.transaction(b'fncache') as tr:
532 with repo.transaction(b'fncache') as tr:
529 fnc.write(tr)
533 fnc.write(tr)
530 else:
534 else:
531 ui.write(_(b'fncache already up to date\n'))
535 ui.write(_(b'fncache already up to date\n'))
532
536
533
537
534 def deleteobsmarkers(obsstore, indices):
538 def deleteobsmarkers(obsstore, indices):
535 """Delete some obsmarkers from obsstore and return how many were deleted
539 """Delete some obsmarkers from obsstore and return how many were deleted
536
540
537 'indices' is a list of ints which are the indices
541 'indices' is a list of ints which are the indices
538 of the markers to be deleted.
542 of the markers to be deleted.
539
543
540 Every invocation of this function completely rewrites the obsstore file,
544 Every invocation of this function completely rewrites the obsstore file,
541 skipping the markers we want to be removed. The new temporary file is
545 skipping the markers we want to be removed. The new temporary file is
542 created, remaining markers are written there and on .close() this file
546 created, remaining markers are written there and on .close() this file
543 gets atomically renamed to obsstore, thus guaranteeing consistency."""
547 gets atomically renamed to obsstore, thus guaranteeing consistency."""
544 if not indices:
548 if not indices:
545 # we don't want to rewrite the obsstore with the same content
549 # we don't want to rewrite the obsstore with the same content
546 return
550 return
547
551
548 left = []
552 left = []
549 current = obsstore._all
553 current = obsstore._all
550 n = 0
554 n = 0
551 for i, m in enumerate(current):
555 for i, m in enumerate(current):
552 if i in indices:
556 if i in indices:
553 n += 1
557 n += 1
554 continue
558 continue
555 left.append(m)
559 left.append(m)
556
560
557 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
561 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
558 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
562 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
559 newobsstorefile.write(bytes)
563 newobsstorefile.write(bytes)
560 newobsstorefile.close()
564 newobsstorefile.close()
561 return n
565 return n
General Comments 0
You need to be logged in to leave comments. Login now