##// END OF EJS Templates
undo-files: also remove the undo.backupfiles...
marmoute -
r51186:cd680b45 stable
parent child Browse files
Show More
@@ -1,565 +1,568 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Olivia Mackall
4 # Copyright 2007 Olivia Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17 from . import (
17 from . import (
18 bundle2,
18 bundle2,
19 changegroup,
19 changegroup,
20 discovery,
20 discovery,
21 error,
21 error,
22 exchange,
22 exchange,
23 obsolete,
23 obsolete,
24 obsutil,
24 obsutil,
25 pathutil,
25 pathutil,
26 phases,
26 phases,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 hashutil,
32 hashutil,
33 stringutil,
33 stringutil,
34 urlutil,
34 urlutil,
35 )
35 )
36
36
37
37
38 def backupbundle(
38 def backupbundle(
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 ):
40 ):
41 """create a bundle with the specified revisions as a backup"""
41 """create a bundle with the specified revisions as a backup"""
42
42
43 backupdir = b"strip-backup"
43 backupdir = b"strip-backup"
44 vfs = repo.vfs
44 vfs = repo.vfs
45 if not vfs.isdir(backupdir):
45 if not vfs.isdir(backupdir):
46 vfs.mkdir(backupdir)
46 vfs.mkdir(backupdir)
47
47
48 # Include a hash of all the nodes in the filename for uniqueness
48 # Include a hash of all the nodes in the filename for uniqueness
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allhashes = sorted(c.hex() for c in allcommits)
50 allhashes = sorted(c.hex() for c in allcommits)
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 name = b"%s/%s-%s-%s.hg" % (
52 name = b"%s/%s-%s-%s.hg" % (
53 backupdir,
53 backupdir,
54 short(node),
54 short(node),
55 hex(totalhash[:4]),
55 hex(totalhash[:4]),
56 suffix,
56 suffix,
57 )
57 )
58
58
59 cgversion = changegroup.localversion(repo)
59 cgversion = changegroup.localversion(repo)
60 comp = None
60 comp = None
61 if cgversion != b'01':
61 if cgversion != b'01':
62 bundletype = b"HG20"
62 bundletype = b"HG20"
63 if compress:
63 if compress:
64 comp = b'BZ'
64 comp = b'BZ'
65 elif compress:
65 elif compress:
66 bundletype = b"HG10BZ"
66 bundletype = b"HG10BZ"
67 else:
67 else:
68 bundletype = b"HG10UN"
68 bundletype = b"HG10UN"
69
69
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 contentopts = {
71 contentopts = {
72 b'cg.version': cgversion,
72 b'cg.version': cgversion,
73 b'obsolescence': obsolescence,
73 b'obsolescence': obsolescence,
74 b'phases': True,
74 b'phases': True,
75 }
75 }
76 return bundle2.writenewbundle(
76 return bundle2.writenewbundle(
77 repo.ui,
77 repo.ui,
78 repo,
78 repo,
79 b'strip',
79 b'strip',
80 name,
80 name,
81 bundletype,
81 bundletype,
82 outgoing,
82 outgoing,
83 contentopts,
83 contentopts,
84 vfs,
84 vfs,
85 compression=comp,
85 compression=comp,
86 )
86 )
87
87
88
88
89 def _collectfiles(repo, striprev):
89 def _collectfiles(repo, striprev):
90 """find out the filelogs affected by the strip"""
90 """find out the filelogs affected by the strip"""
91 files = set()
91 files = set()
92
92
93 for x in range(striprev, len(repo)):
93 for x in range(striprev, len(repo)):
94 files.update(repo[x].files())
94 files.update(repo[x].files())
95
95
96 return sorted(files)
96 return sorted(files)
97
97
98
98
99 def _collectrevlog(revlog, striprev):
99 def _collectrevlog(revlog, striprev):
100 _, brokenset = revlog.getstrippoint(striprev)
100 _, brokenset = revlog.getstrippoint(striprev)
101 return [revlog.linkrev(r) for r in brokenset]
101 return [revlog.linkrev(r) for r in brokenset]
102
102
103
103
104 def _collectbrokencsets(repo, files, striprev):
104 def _collectbrokencsets(repo, files, striprev):
105 """return the changesets which will be broken by the truncation"""
105 """return the changesets which will be broken by the truncation"""
106 s = set()
106 s = set()
107
107
108 for revlog in manifestrevlogs(repo):
108 for revlog in manifestrevlogs(repo):
109 s.update(_collectrevlog(revlog, striprev))
109 s.update(_collectrevlog(revlog, striprev))
110 for fname in files:
110 for fname in files:
111 s.update(_collectrevlog(repo.file(fname), striprev))
111 s.update(_collectrevlog(repo.file(fname), striprev))
112
112
113 return s
113 return s
114
114
115
115
116 def cleanup_undo_files(repo):
116 def cleanup_undo_files(repo):
117 """remove "undo" files used by the rollback logic
117 """remove "undo" files used by the rollback logic
118
118
119 This is useful to prevent rollback running in situation were it does not
119 This is useful to prevent rollback running in situation were it does not
120 make sense. For example after a strip.
120 make sense. For example after a strip.
121 """
121 """
122 for undovfs, undofile in repo.undofiles():
122 # XXX need to remove the backups themselve too
123 undo_files = [(repo.svfs, b'undo.backupfiles')]
124 undo_files.extend(repo.undofiles())
125 for undovfs, undofile in undo_files:
123 try:
126 try:
124 undovfs.unlink(undofile)
127 undovfs.unlink(undofile)
125 except OSError as e:
128 except OSError as e:
126 if e.errno != errno.ENOENT:
129 if e.errno != errno.ENOENT:
127 msg = _(b'error removing %s: %s\n')
130 msg = _(b'error removing %s: %s\n')
128 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
131 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
129 repo.ui.warn(msg)
132 repo.ui.warn(msg)
130
133
131
134
132 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
135 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
133 # This function requires the caller to lock the repo, but it operates
136 # This function requires the caller to lock the repo, but it operates
134 # within a transaction of its own, and thus requires there to be no current
137 # within a transaction of its own, and thus requires there to be no current
135 # transaction when it is called.
138 # transaction when it is called.
136 if repo.currenttransaction() is not None:
139 if repo.currenttransaction() is not None:
137 raise error.ProgrammingError(b'cannot strip from inside a transaction')
140 raise error.ProgrammingError(b'cannot strip from inside a transaction')
138
141
139 # Simple way to maintain backwards compatibility for this
142 # Simple way to maintain backwards compatibility for this
140 # argument.
143 # argument.
141 if backup in [b'none', b'strip']:
144 if backup in [b'none', b'strip']:
142 backup = False
145 backup = False
143
146
144 repo = repo.unfiltered()
147 repo = repo.unfiltered()
145 repo.destroying()
148 repo.destroying()
146 vfs = repo.vfs
149 vfs = repo.vfs
147 # load bookmark before changelog to avoid side effect from outdated
150 # load bookmark before changelog to avoid side effect from outdated
148 # changelog (see repo._refreshchangelog)
151 # changelog (see repo._refreshchangelog)
149 repo._bookmarks
152 repo._bookmarks
150 cl = repo.changelog
153 cl = repo.changelog
151
154
152 # TODO handle undo of merge sets
155 # TODO handle undo of merge sets
153 if isinstance(nodelist, bytes):
156 if isinstance(nodelist, bytes):
154 nodelist = [nodelist]
157 nodelist = [nodelist]
155 striplist = [cl.rev(node) for node in nodelist]
158 striplist = [cl.rev(node) for node in nodelist]
156 striprev = min(striplist)
159 striprev = min(striplist)
157
160
158 files = _collectfiles(repo, striprev)
161 files = _collectfiles(repo, striprev)
159 saverevs = _collectbrokencsets(repo, files, striprev)
162 saverevs = _collectbrokencsets(repo, files, striprev)
160
163
161 # Some revisions with rev > striprev may not be descendants of striprev.
164 # Some revisions with rev > striprev may not be descendants of striprev.
162 # We have to find these revisions and put them in a bundle, so that
165 # We have to find these revisions and put them in a bundle, so that
163 # we can restore them after the truncations.
166 # we can restore them after the truncations.
164 # To create the bundle we use repo.changegroupsubset which requires
167 # To create the bundle we use repo.changegroupsubset which requires
165 # the list of heads and bases of the set of interesting revisions.
168 # the list of heads and bases of the set of interesting revisions.
166 # (head = revision in the set that has no descendant in the set;
169 # (head = revision in the set that has no descendant in the set;
167 # base = revision in the set that has no ancestor in the set)
170 # base = revision in the set that has no ancestor in the set)
168 tostrip = set(striplist)
171 tostrip = set(striplist)
169 saveheads = set(saverevs)
172 saveheads = set(saverevs)
170 for r in cl.revs(start=striprev + 1):
173 for r in cl.revs(start=striprev + 1):
171 if any(p in tostrip for p in cl.parentrevs(r)):
174 if any(p in tostrip for p in cl.parentrevs(r)):
172 tostrip.add(r)
175 tostrip.add(r)
173
176
174 if r not in tostrip:
177 if r not in tostrip:
175 saverevs.add(r)
178 saverevs.add(r)
176 saveheads.difference_update(cl.parentrevs(r))
179 saveheads.difference_update(cl.parentrevs(r))
177 saveheads.add(r)
180 saveheads.add(r)
178 saveheads = [cl.node(r) for r in saveheads]
181 saveheads = [cl.node(r) for r in saveheads]
179
182
180 # compute base nodes
183 # compute base nodes
181 if saverevs:
184 if saverevs:
182 descendants = set(cl.descendants(saverevs))
185 descendants = set(cl.descendants(saverevs))
183 saverevs.difference_update(descendants)
186 saverevs.difference_update(descendants)
184 savebases = [cl.node(r) for r in saverevs]
187 savebases = [cl.node(r) for r in saverevs]
185 stripbases = [cl.node(r) for r in tostrip]
188 stripbases = [cl.node(r) for r in tostrip]
186
189
187 stripobsidx = obsmarkers = ()
190 stripobsidx = obsmarkers = ()
188 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
191 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
189 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
192 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
190 if obsmarkers:
193 if obsmarkers:
191 stripobsidx = [
194 stripobsidx = [
192 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
195 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
193 ]
196 ]
194
197
195 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
198 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
196
199
197 backupfile = None
200 backupfile = None
198 node = nodelist[-1]
201 node = nodelist[-1]
199 if backup:
202 if backup:
200 backupfile = _createstripbackup(repo, stripbases, node, topic)
203 backupfile = _createstripbackup(repo, stripbases, node, topic)
201 # create a changegroup for all the branches we need to keep
204 # create a changegroup for all the branches we need to keep
202 tmpbundlefile = None
205 tmpbundlefile = None
203 if saveheads:
206 if saveheads:
204 # do not compress temporary bundle if we remove it from disk later
207 # do not compress temporary bundle if we remove it from disk later
205 #
208 #
206 # We do not include obsolescence, it might re-introduce prune markers
209 # We do not include obsolescence, it might re-introduce prune markers
207 # we are trying to strip. This is harmless since the stripped markers
210 # we are trying to strip. This is harmless since the stripped markers
208 # are already backed up and we did not touched the markers for the
211 # are already backed up and we did not touched the markers for the
209 # saved changesets.
212 # saved changesets.
210 tmpbundlefile = backupbundle(
213 tmpbundlefile = backupbundle(
211 repo,
214 repo,
212 savebases,
215 savebases,
213 saveheads,
216 saveheads,
214 node,
217 node,
215 b'temp',
218 b'temp',
216 compress=False,
219 compress=False,
217 obsolescence=False,
220 obsolescence=False,
218 )
221 )
219
222
220 with ui.uninterruptible():
223 with ui.uninterruptible():
221 try:
224 try:
222 with repo.transaction(b"strip") as tr:
225 with repo.transaction(b"strip") as tr:
223 # TODO this code violates the interface abstraction of the
226 # TODO this code violates the interface abstraction of the
224 # transaction and makes assumptions that file storage is
227 # transaction and makes assumptions that file storage is
225 # using append-only files. We'll need some kind of storage
228 # using append-only files. We'll need some kind of storage
226 # API to handle stripping for us.
229 # API to handle stripping for us.
227 oldfiles = set(tr._offsetmap.keys())
230 oldfiles = set(tr._offsetmap.keys())
228 oldfiles.update(tr._newfiles)
231 oldfiles.update(tr._newfiles)
229
232
230 tr.startgroup()
233 tr.startgroup()
231 cl.strip(striprev, tr)
234 cl.strip(striprev, tr)
232 stripmanifest(repo, striprev, tr, files)
235 stripmanifest(repo, striprev, tr, files)
233
236
234 for fn in files:
237 for fn in files:
235 repo.file(fn).strip(striprev, tr)
238 repo.file(fn).strip(striprev, tr)
236 tr.endgroup()
239 tr.endgroup()
237
240
238 entries = tr.readjournal()
241 entries = tr.readjournal()
239
242
240 for file, troffset in entries:
243 for file, troffset in entries:
241 if file in oldfiles:
244 if file in oldfiles:
242 continue
245 continue
243 with repo.svfs(file, b'a', checkambig=True) as fp:
246 with repo.svfs(file, b'a', checkambig=True) as fp:
244 fp.truncate(troffset)
247 fp.truncate(troffset)
245 if troffset == 0:
248 if troffset == 0:
246 repo.store.markremoved(file)
249 repo.store.markremoved(file)
247
250
248 deleteobsmarkers(repo.obsstore, stripobsidx)
251 deleteobsmarkers(repo.obsstore, stripobsidx)
249 del repo.obsstore
252 del repo.obsstore
250 repo.invalidatevolatilesets()
253 repo.invalidatevolatilesets()
251 repo._phasecache.filterunknown(repo)
254 repo._phasecache.filterunknown(repo)
252
255
253 if tmpbundlefile:
256 if tmpbundlefile:
254 ui.note(_(b"adding branch\n"))
257 ui.note(_(b"adding branch\n"))
255 f = vfs.open(tmpbundlefile, b"rb")
258 f = vfs.open(tmpbundlefile, b"rb")
256 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
259 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
257 # silence internal shuffling chatter
260 # silence internal shuffling chatter
258 maybe_silent = (
261 maybe_silent = (
259 repo.ui.silent()
262 repo.ui.silent()
260 if not repo.ui.verbose
263 if not repo.ui.verbose
261 else util.nullcontextmanager()
264 else util.nullcontextmanager()
262 )
265 )
263 with maybe_silent:
266 with maybe_silent:
264 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
267 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
265 txnname = b'strip'
268 txnname = b'strip'
266 if not isinstance(gen, bundle2.unbundle20):
269 if not isinstance(gen, bundle2.unbundle20):
267 txnname = b"strip\n%s" % urlutil.hidepassword(
270 txnname = b"strip\n%s" % urlutil.hidepassword(
268 tmpbundleurl
271 tmpbundleurl
269 )
272 )
270 with repo.transaction(txnname) as tr:
273 with repo.transaction(txnname) as tr:
271 bundle2.applybundle(
274 bundle2.applybundle(
272 repo, gen, tr, source=b'strip', url=tmpbundleurl
275 repo, gen, tr, source=b'strip', url=tmpbundleurl
273 )
276 )
274 f.close()
277 f.close()
275
278
276 with repo.transaction(b'repair') as tr:
279 with repo.transaction(b'repair') as tr:
277 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
280 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
278 repo._bookmarks.applychanges(repo, tr, bmchanges)
281 repo._bookmarks.applychanges(repo, tr, bmchanges)
279
282
280 cleanup_undo_files(repo)
283 cleanup_undo_files(repo)
281
284
282 except: # re-raises
285 except: # re-raises
283 if backupfile:
286 if backupfile:
284 ui.warn(
287 ui.warn(
285 _(b"strip failed, backup bundle stored in '%s'\n")
288 _(b"strip failed, backup bundle stored in '%s'\n")
286 % vfs.join(backupfile)
289 % vfs.join(backupfile)
287 )
290 )
288 if tmpbundlefile:
291 if tmpbundlefile:
289 ui.warn(
292 ui.warn(
290 _(b"strip failed, unrecovered changes stored in '%s'\n")
293 _(b"strip failed, unrecovered changes stored in '%s'\n")
291 % vfs.join(tmpbundlefile)
294 % vfs.join(tmpbundlefile)
292 )
295 )
293 ui.warn(
296 ui.warn(
294 _(
297 _(
295 b"(fix the problem, then recover the changesets with "
298 b"(fix the problem, then recover the changesets with "
296 b"\"hg unbundle '%s'\")\n"
299 b"\"hg unbundle '%s'\")\n"
297 )
300 )
298 % vfs.join(tmpbundlefile)
301 % vfs.join(tmpbundlefile)
299 )
302 )
300 raise
303 raise
301 else:
304 else:
302 if tmpbundlefile:
305 if tmpbundlefile:
303 # Remove temporary bundle only if there were no exceptions
306 # Remove temporary bundle only if there were no exceptions
304 vfs.unlink(tmpbundlefile)
307 vfs.unlink(tmpbundlefile)
305
308
306 repo.destroyed()
309 repo.destroyed()
307 # return the backup file path (or None if 'backup' was False) so
310 # return the backup file path (or None if 'backup' was False) so
308 # extensions can use it
311 # extensions can use it
309 return backupfile
312 return backupfile
310
313
311
314
312 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
315 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
313 """perform a "soft" strip using the archived phase"""
316 """perform a "soft" strip using the archived phase"""
314 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
317 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
315 if not tostrip:
318 if not tostrip:
316 return None
319 return None
317
320
318 backupfile = None
321 backupfile = None
319 if backup:
322 if backup:
320 node = tostrip[0]
323 node = tostrip[0]
321 backupfile = _createstripbackup(repo, tostrip, node, topic)
324 backupfile = _createstripbackup(repo, tostrip, node, topic)
322
325
323 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
326 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
324 with repo.transaction(b'strip') as tr:
327 with repo.transaction(b'strip') as tr:
325 phases.retractboundary(repo, tr, phases.archived, tostrip)
328 phases.retractboundary(repo, tr, phases.archived, tostrip)
326 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
329 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
327 repo._bookmarks.applychanges(repo, tr, bmchanges)
330 repo._bookmarks.applychanges(repo, tr, bmchanges)
328 return backupfile
331 return backupfile
329
332
330
333
331 def _bookmarkmovements(repo, tostrip):
334 def _bookmarkmovements(repo, tostrip):
332 # compute necessary bookmark movement
335 # compute necessary bookmark movement
333 bm = repo._bookmarks
336 bm = repo._bookmarks
334 updatebm = []
337 updatebm = []
335 for m in bm:
338 for m in bm:
336 rev = repo[bm[m]].rev()
339 rev = repo[bm[m]].rev()
337 if rev in tostrip:
340 if rev in tostrip:
338 updatebm.append(m)
341 updatebm.append(m)
339 newbmtarget = None
342 newbmtarget = None
340 # If we need to move bookmarks, compute bookmark
343 # If we need to move bookmarks, compute bookmark
341 # targets. Otherwise we can skip doing this logic.
344 # targets. Otherwise we can skip doing this logic.
342 if updatebm:
345 if updatebm:
343 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
346 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
344 # but is much faster
347 # but is much faster
345 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
348 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
346 if newbmtarget:
349 if newbmtarget:
347 newbmtarget = repo[newbmtarget.first()].node()
350 newbmtarget = repo[newbmtarget.first()].node()
348 else:
351 else:
349 newbmtarget = b'.'
352 newbmtarget = b'.'
350 return newbmtarget, updatebm
353 return newbmtarget, updatebm
351
354
352
355
353 def _createstripbackup(repo, stripbases, node, topic):
356 def _createstripbackup(repo, stripbases, node, topic):
354 # backup the changeset we are about to strip
357 # backup the changeset we are about to strip
355 vfs = repo.vfs
358 vfs = repo.vfs
356 cl = repo.changelog
359 cl = repo.changelog
357 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
360 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
358 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
361 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
359 repo.ui.log(
362 repo.ui.log(
360 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
363 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
361 )
364 )
362 return backupfile
365 return backupfile
363
366
364
367
365 def safestriproots(ui, repo, nodes):
368 def safestriproots(ui, repo, nodes):
366 """return list of roots of nodes where descendants are covered by nodes"""
369 """return list of roots of nodes where descendants are covered by nodes"""
367 torev = repo.unfiltered().changelog.rev
370 torev = repo.unfiltered().changelog.rev
368 revs = {torev(n) for n in nodes}
371 revs = {torev(n) for n in nodes}
369 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
372 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
370 # orphaned = affected - wanted
373 # orphaned = affected - wanted
371 # affected = descendants(roots(wanted))
374 # affected = descendants(roots(wanted))
372 # wanted = revs
375 # wanted = revs
373 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
376 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
374 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
377 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
375 notstrip = revs - tostrip
378 notstrip = revs - tostrip
376 if notstrip:
379 if notstrip:
377 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
380 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
378 ui.warn(
381 ui.warn(
379 _(b'warning: orphaned descendants detected, not stripping %s\n')
382 _(b'warning: orphaned descendants detected, not stripping %s\n')
380 % nodestr
383 % nodestr
381 )
384 )
382 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
385 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
383
386
384
387
385 class stripcallback:
388 class stripcallback:
386 """used as a transaction postclose callback"""
389 """used as a transaction postclose callback"""
387
390
388 def __init__(self, ui, repo, backup, topic):
391 def __init__(self, ui, repo, backup, topic):
389 self.ui = ui
392 self.ui = ui
390 self.repo = repo
393 self.repo = repo
391 self.backup = backup
394 self.backup = backup
392 self.topic = topic or b'backup'
395 self.topic = topic or b'backup'
393 self.nodelist = []
396 self.nodelist = []
394
397
395 def addnodes(self, nodes):
398 def addnodes(self, nodes):
396 self.nodelist.extend(nodes)
399 self.nodelist.extend(nodes)
397
400
398 def __call__(self, tr):
401 def __call__(self, tr):
399 roots = safestriproots(self.ui, self.repo, self.nodelist)
402 roots = safestriproots(self.ui, self.repo, self.nodelist)
400 if roots:
403 if roots:
401 strip(self.ui, self.repo, roots, self.backup, self.topic)
404 strip(self.ui, self.repo, roots, self.backup, self.topic)
402
405
403
406
404 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
407 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
405 """like strip, but works inside transaction and won't strip irreverent revs
408 """like strip, but works inside transaction and won't strip irreverent revs
406
409
407 nodelist must explicitly contain all descendants. Otherwise a warning will
410 nodelist must explicitly contain all descendants. Otherwise a warning will
408 be printed that some nodes are not stripped.
411 be printed that some nodes are not stripped.
409
412
410 Will do a backup if `backup` is True. The last non-None "topic" will be
413 Will do a backup if `backup` is True. The last non-None "topic" will be
411 used as the backup topic name. The default backup topic name is "backup".
414 used as the backup topic name. The default backup topic name is "backup".
412 """
415 """
413 tr = repo.currenttransaction()
416 tr = repo.currenttransaction()
414 if not tr:
417 if not tr:
415 nodes = safestriproots(ui, repo, nodelist)
418 nodes = safestriproots(ui, repo, nodelist)
416 return strip(ui, repo, nodes, backup=backup, topic=topic)
419 return strip(ui, repo, nodes, backup=backup, topic=topic)
417 # transaction postclose callbacks are called in alphabet order.
420 # transaction postclose callbacks are called in alphabet order.
418 # use '\xff' as prefix so we are likely to be called last.
421 # use '\xff' as prefix so we are likely to be called last.
419 callback = tr.getpostclose(b'\xffstrip')
422 callback = tr.getpostclose(b'\xffstrip')
420 if callback is None:
423 if callback is None:
421 callback = stripcallback(ui, repo, backup=backup, topic=topic)
424 callback = stripcallback(ui, repo, backup=backup, topic=topic)
422 tr.addpostclose(b'\xffstrip', callback)
425 tr.addpostclose(b'\xffstrip', callback)
423 if topic:
426 if topic:
424 callback.topic = topic
427 callback.topic = topic
425 callback.addnodes(nodelist)
428 callback.addnodes(nodelist)
426
429
427
430
428 def stripmanifest(repo, striprev, tr, files):
431 def stripmanifest(repo, striprev, tr, files):
429 for revlog in manifestrevlogs(repo):
432 for revlog in manifestrevlogs(repo):
430 revlog.strip(striprev, tr)
433 revlog.strip(striprev, tr)
431
434
432
435
433 def manifestrevlogs(repo):
436 def manifestrevlogs(repo):
434 yield repo.manifestlog.getstorage(b'')
437 yield repo.manifestlog.getstorage(b'')
435 if scmutil.istreemanifest(repo):
438 if scmutil.istreemanifest(repo):
436 # This logic is safe if treemanifest isn't enabled, but also
439 # This logic is safe if treemanifest isn't enabled, but also
437 # pointless, so we skip it if treemanifest isn't enabled.
440 # pointless, so we skip it if treemanifest isn't enabled.
438 for t, unencoded, size in repo.store.datafiles():
441 for t, unencoded, size in repo.store.datafiles():
439 if unencoded.startswith(b'meta/') and unencoded.endswith(
442 if unencoded.startswith(b'meta/') and unencoded.endswith(
440 b'00manifest.i'
443 b'00manifest.i'
441 ):
444 ):
442 dir = unencoded[5:-12]
445 dir = unencoded[5:-12]
443 yield repo.manifestlog.getstorage(dir)
446 yield repo.manifestlog.getstorage(dir)
444
447
445
448
446 def rebuildfncache(ui, repo, only_data=False):
449 def rebuildfncache(ui, repo, only_data=False):
447 """Rebuilds the fncache file from repo history.
450 """Rebuilds the fncache file from repo history.
448
451
449 Missing entries will be added. Extra entries will be removed.
452 Missing entries will be added. Extra entries will be removed.
450 """
453 """
451 repo = repo.unfiltered()
454 repo = repo.unfiltered()
452
455
453 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
456 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
454 ui.warn(
457 ui.warn(
455 _(
458 _(
456 b'(not rebuilding fncache because repository does not '
459 b'(not rebuilding fncache because repository does not '
457 b'support fncache)\n'
460 b'support fncache)\n'
458 )
461 )
459 )
462 )
460 return
463 return
461
464
462 with repo.lock():
465 with repo.lock():
463 fnc = repo.store.fncache
466 fnc = repo.store.fncache
464 fnc.ensureloaded(warn=ui.warn)
467 fnc.ensureloaded(warn=ui.warn)
465
468
466 oldentries = set(fnc.entries)
469 oldentries = set(fnc.entries)
467 newentries = set()
470 newentries = set()
468 seenfiles = set()
471 seenfiles = set()
469
472
470 if only_data:
473 if only_data:
471 # Trust the listing of .i from the fncache, but not the .d. This is
474 # Trust the listing of .i from the fncache, but not the .d. This is
472 # much faster, because we only need to stat every possible .d files,
475 # much faster, because we only need to stat every possible .d files,
473 # instead of reading the full changelog
476 # instead of reading the full changelog
474 for f in fnc:
477 for f in fnc:
475 if f[:5] == b'data/' and f[-2:] == b'.i':
478 if f[:5] == b'data/' and f[-2:] == b'.i':
476 seenfiles.add(f[5:-2])
479 seenfiles.add(f[5:-2])
477 newentries.add(f)
480 newentries.add(f)
478 dataf = f[:-2] + b'.d'
481 dataf = f[:-2] + b'.d'
479 if repo.store._exists(dataf):
482 if repo.store._exists(dataf):
480 newentries.add(dataf)
483 newentries.add(dataf)
481 else:
484 else:
482 progress = ui.makeprogress(
485 progress = ui.makeprogress(
483 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
486 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
484 )
487 )
485 for rev in repo:
488 for rev in repo:
486 progress.update(rev)
489 progress.update(rev)
487
490
488 ctx = repo[rev]
491 ctx = repo[rev]
489 for f in ctx.files():
492 for f in ctx.files():
490 # This is to minimize I/O.
493 # This is to minimize I/O.
491 if f in seenfiles:
494 if f in seenfiles:
492 continue
495 continue
493 seenfiles.add(f)
496 seenfiles.add(f)
494
497
495 i = b'data/%s.i' % f
498 i = b'data/%s.i' % f
496 d = b'data/%s.d' % f
499 d = b'data/%s.d' % f
497
500
498 if repo.store._exists(i):
501 if repo.store._exists(i):
499 newentries.add(i)
502 newentries.add(i)
500 if repo.store._exists(d):
503 if repo.store._exists(d):
501 newentries.add(d)
504 newentries.add(d)
502
505
503 progress.complete()
506 progress.complete()
504
507
505 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
508 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
506 # This logic is safe if treemanifest isn't enabled, but also
509 # This logic is safe if treemanifest isn't enabled, but also
507 # pointless, so we skip it if treemanifest isn't enabled.
510 # pointless, so we skip it if treemanifest isn't enabled.
508 for dir in pathutil.dirs(seenfiles):
511 for dir in pathutil.dirs(seenfiles):
509 i = b'meta/%s/00manifest.i' % dir
512 i = b'meta/%s/00manifest.i' % dir
510 d = b'meta/%s/00manifest.d' % dir
513 d = b'meta/%s/00manifest.d' % dir
511
514
512 if repo.store._exists(i):
515 if repo.store._exists(i):
513 newentries.add(i)
516 newentries.add(i)
514 if repo.store._exists(d):
517 if repo.store._exists(d):
515 newentries.add(d)
518 newentries.add(d)
516
519
517 addcount = len(newentries - oldentries)
520 addcount = len(newentries - oldentries)
518 removecount = len(oldentries - newentries)
521 removecount = len(oldentries - newentries)
519 for p in sorted(oldentries - newentries):
522 for p in sorted(oldentries - newentries):
520 ui.write(_(b'removing %s\n') % p)
523 ui.write(_(b'removing %s\n') % p)
521 for p in sorted(newentries - oldentries):
524 for p in sorted(newentries - oldentries):
522 ui.write(_(b'adding %s\n') % p)
525 ui.write(_(b'adding %s\n') % p)
523
526
524 if addcount or removecount:
527 if addcount or removecount:
525 ui.write(
528 ui.write(
526 _(b'%d items added, %d removed from fncache\n')
529 _(b'%d items added, %d removed from fncache\n')
527 % (addcount, removecount)
530 % (addcount, removecount)
528 )
531 )
529 fnc.entries = newentries
532 fnc.entries = newentries
530 fnc._dirty = True
533 fnc._dirty = True
531
534
532 with repo.transaction(b'fncache') as tr:
535 with repo.transaction(b'fncache') as tr:
533 fnc.write(tr)
536 fnc.write(tr)
534 else:
537 else:
535 ui.write(_(b'fncache already up to date\n'))
538 ui.write(_(b'fncache already up to date\n'))
536
539
537
540
538 def deleteobsmarkers(obsstore, indices):
541 def deleteobsmarkers(obsstore, indices):
539 """Delete some obsmarkers from obsstore and return how many were deleted
542 """Delete some obsmarkers from obsstore and return how many were deleted
540
543
541 'indices' is a list of ints which are the indices
544 'indices' is a list of ints which are the indices
542 of the markers to be deleted.
545 of the markers to be deleted.
543
546
544 Every invocation of this function completely rewrites the obsstore file,
547 Every invocation of this function completely rewrites the obsstore file,
545 skipping the markers we want to be removed. The new temporary file is
548 skipping the markers we want to be removed. The new temporary file is
546 created, remaining markers are written there and on .close() this file
549 created, remaining markers are written there and on .close() this file
547 gets atomically renamed to obsstore, thus guaranteeing consistency."""
550 gets atomically renamed to obsstore, thus guaranteeing consistency."""
548 if not indices:
551 if not indices:
549 # we don't want to rewrite the obsstore with the same content
552 # we don't want to rewrite the obsstore with the same content
550 return
553 return
551
554
552 left = []
555 left = []
553 current = obsstore._all
556 current = obsstore._all
554 n = 0
557 n = 0
555 for i, m in enumerate(current):
558 for i, m in enumerate(current):
556 if i in indices:
559 if i in indices:
557 n += 1
560 n += 1
558 continue
561 continue
559 left.append(m)
562 left.append(m)
560
563
561 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
564 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
562 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
565 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
563 newobsstorefile.write(bytes)
566 newobsstorefile.write(bytes)
564 newobsstorefile.close()
567 newobsstorefile.close()
565 return n
568 return n
General Comments 0
You need to be logged in to leave comments. Login now