##// END OF EJS Templates
store: use StoreEntry API instead of parsing filename when listing manifestlog...
marmoute -
r51380:92611344 default
parent child Browse files
Show More
@@ -1,576 +1,576 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Olivia Mackall
4 # Copyright 2007 Olivia Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 short,
13 short,
14 )
14 )
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 discovery,
18 discovery,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 obsutil,
22 obsutil,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 requirements,
25 requirements,
26 scmutil,
26 scmutil,
27 store,
27 transaction,
28 transaction,
28 util,
29 util,
29 )
30 )
30 from .utils import (
31 from .utils import (
31 hashutil,
32 hashutil,
32 urlutil,
33 urlutil,
33 )
34 )
34
35
35
36
36 def backupbundle(
37 def backupbundle(
37 repo,
38 repo,
38 bases,
39 bases,
39 heads,
40 heads,
40 node,
41 node,
41 suffix,
42 suffix,
42 compress=True,
43 compress=True,
43 obsolescence=True,
44 obsolescence=True,
44 tmp_backup=False,
45 tmp_backup=False,
45 ):
46 ):
46 """create a bundle with the specified revisions as a backup"""
47 """create a bundle with the specified revisions as a backup"""
47
48
48 backupdir = b"strip-backup"
49 backupdir = b"strip-backup"
49 vfs = repo.vfs
50 vfs = repo.vfs
50 if not vfs.isdir(backupdir):
51 if not vfs.isdir(backupdir):
51 vfs.mkdir(backupdir)
52 vfs.mkdir(backupdir)
52
53
53 # Include a hash of all the nodes in the filename for uniqueness
54 # Include a hash of all the nodes in the filename for uniqueness
54 allcommits = repo.set(b'%ln::%ln', bases, heads)
55 allcommits = repo.set(b'%ln::%ln', bases, heads)
55 allhashes = sorted(c.hex() for c in allcommits)
56 allhashes = sorted(c.hex() for c in allcommits)
56 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
57 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
57 name = b"%s/%s-%s-%s.hg" % (
58 name = b"%s/%s-%s-%s.hg" % (
58 backupdir,
59 backupdir,
59 short(node),
60 short(node),
60 hex(totalhash[:4]),
61 hex(totalhash[:4]),
61 suffix,
62 suffix,
62 )
63 )
63
64
64 cgversion = changegroup.localversion(repo)
65 cgversion = changegroup.localversion(repo)
65 comp = None
66 comp = None
66 if cgversion != b'01':
67 if cgversion != b'01':
67 bundletype = b"HG20"
68 bundletype = b"HG20"
68 if compress:
69 if compress:
69 comp = b'BZ'
70 comp = b'BZ'
70 elif compress:
71 elif compress:
71 bundletype = b"HG10BZ"
72 bundletype = b"HG10BZ"
72 else:
73 else:
73 bundletype = b"HG10UN"
74 bundletype = b"HG10UN"
74
75
75 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
76 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
76 contentopts = {
77 contentopts = {
77 b'cg.version': cgversion,
78 b'cg.version': cgversion,
78 b'obsolescence': obsolescence,
79 b'obsolescence': obsolescence,
79 b'phases': True,
80 b'phases': True,
80 }
81 }
81 return bundle2.writenewbundle(
82 return bundle2.writenewbundle(
82 repo.ui,
83 repo.ui,
83 repo,
84 repo,
84 b'strip',
85 b'strip',
85 name,
86 name,
86 bundletype,
87 bundletype,
87 outgoing,
88 outgoing,
88 contentopts,
89 contentopts,
89 vfs,
90 vfs,
90 compression=comp,
91 compression=comp,
91 allow_internal=tmp_backup,
92 allow_internal=tmp_backup,
92 )
93 )
93
94
94
95
95 def _collectfiles(repo, striprev):
96 def _collectfiles(repo, striprev):
96 """find out the filelogs affected by the strip"""
97 """find out the filelogs affected by the strip"""
97 files = set()
98 files = set()
98
99
99 for x in range(striprev, len(repo)):
100 for x in range(striprev, len(repo)):
100 files.update(repo[x].files())
101 files.update(repo[x].files())
101
102
102 return sorted(files)
103 return sorted(files)
103
104
104
105
105 def _collectrevlog(revlog, striprev):
106 def _collectrevlog(revlog, striprev):
106 _, brokenset = revlog.getstrippoint(striprev)
107 _, brokenset = revlog.getstrippoint(striprev)
107 return [revlog.linkrev(r) for r in brokenset]
108 return [revlog.linkrev(r) for r in brokenset]
108
109
109
110
110 def _collectbrokencsets(repo, files, striprev):
111 def _collectbrokencsets(repo, files, striprev):
111 """return the changesets which will be broken by the truncation"""
112 """return the changesets which will be broken by the truncation"""
112 s = set()
113 s = set()
113
114
114 for revlog in manifestrevlogs(repo):
115 for revlog in manifestrevlogs(repo):
115 s.update(_collectrevlog(revlog, striprev))
116 s.update(_collectrevlog(revlog, striprev))
116 for fname in files:
117 for fname in files:
117 s.update(_collectrevlog(repo.file(fname), striprev))
118 s.update(_collectrevlog(repo.file(fname), striprev))
118
119
119 return s
120 return s
120
121
121
122
122 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
123 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
123 # This function requires the caller to lock the repo, but it operates
124 # This function requires the caller to lock the repo, but it operates
124 # within a transaction of its own, and thus requires there to be no current
125 # within a transaction of its own, and thus requires there to be no current
125 # transaction when it is called.
126 # transaction when it is called.
126 if repo.currenttransaction() is not None:
127 if repo.currenttransaction() is not None:
127 raise error.ProgrammingError(b'cannot strip from inside a transaction')
128 raise error.ProgrammingError(b'cannot strip from inside a transaction')
128
129
129 # Simple way to maintain backwards compatibility for this
130 # Simple way to maintain backwards compatibility for this
130 # argument.
131 # argument.
131 if backup in [b'none', b'strip']:
132 if backup in [b'none', b'strip']:
132 backup = False
133 backup = False
133
134
134 repo = repo.unfiltered()
135 repo = repo.unfiltered()
135 repo.destroying()
136 repo.destroying()
136 vfs = repo.vfs
137 vfs = repo.vfs
137 # load bookmark before changelog to avoid side effect from outdated
138 # load bookmark before changelog to avoid side effect from outdated
138 # changelog (see repo._refreshchangelog)
139 # changelog (see repo._refreshchangelog)
139 repo._bookmarks
140 repo._bookmarks
140 cl = repo.changelog
141 cl = repo.changelog
141
142
142 # TODO handle undo of merge sets
143 # TODO handle undo of merge sets
143 if isinstance(nodelist, bytes):
144 if isinstance(nodelist, bytes):
144 nodelist = [nodelist]
145 nodelist = [nodelist]
145 striplist = [cl.rev(node) for node in nodelist]
146 striplist = [cl.rev(node) for node in nodelist]
146 striprev = min(striplist)
147 striprev = min(striplist)
147
148
148 files = _collectfiles(repo, striprev)
149 files = _collectfiles(repo, striprev)
149 saverevs = _collectbrokencsets(repo, files, striprev)
150 saverevs = _collectbrokencsets(repo, files, striprev)
150
151
151 # Some revisions with rev > striprev may not be descendants of striprev.
152 # Some revisions with rev > striprev may not be descendants of striprev.
152 # We have to find these revisions and put them in a bundle, so that
153 # We have to find these revisions and put them in a bundle, so that
153 # we can restore them after the truncations.
154 # we can restore them after the truncations.
154 # To create the bundle we use repo.changegroupsubset which requires
155 # To create the bundle we use repo.changegroupsubset which requires
155 # the list of heads and bases of the set of interesting revisions.
156 # the list of heads and bases of the set of interesting revisions.
156 # (head = revision in the set that has no descendant in the set;
157 # (head = revision in the set that has no descendant in the set;
157 # base = revision in the set that has no ancestor in the set)
158 # base = revision in the set that has no ancestor in the set)
158 tostrip = set(striplist)
159 tostrip = set(striplist)
159 saveheads = set(saverevs)
160 saveheads = set(saverevs)
160 for r in cl.revs(start=striprev + 1):
161 for r in cl.revs(start=striprev + 1):
161 if any(p in tostrip for p in cl.parentrevs(r)):
162 if any(p in tostrip for p in cl.parentrevs(r)):
162 tostrip.add(r)
163 tostrip.add(r)
163
164
164 if r not in tostrip:
165 if r not in tostrip:
165 saverevs.add(r)
166 saverevs.add(r)
166 saveheads.difference_update(cl.parentrevs(r))
167 saveheads.difference_update(cl.parentrevs(r))
167 saveheads.add(r)
168 saveheads.add(r)
168 saveheads = [cl.node(r) for r in saveheads]
169 saveheads = [cl.node(r) for r in saveheads]
169
170
170 # compute base nodes
171 # compute base nodes
171 if saverevs:
172 if saverevs:
172 descendants = set(cl.descendants(saverevs))
173 descendants = set(cl.descendants(saverevs))
173 saverevs.difference_update(descendants)
174 saverevs.difference_update(descendants)
174 savebases = [cl.node(r) for r in saverevs]
175 savebases = [cl.node(r) for r in saverevs]
175 stripbases = [cl.node(r) for r in tostrip]
176 stripbases = [cl.node(r) for r in tostrip]
176
177
177 stripobsidx = obsmarkers = ()
178 stripobsidx = obsmarkers = ()
178 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
179 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
179 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
180 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
180 if obsmarkers:
181 if obsmarkers:
181 stripobsidx = [
182 stripobsidx = [
182 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
183 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
183 ]
184 ]
184
185
185 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
186 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
186
187
187 backupfile = None
188 backupfile = None
188 node = nodelist[-1]
189 node = nodelist[-1]
189 if backup:
190 if backup:
190 backupfile = _createstripbackup(repo, stripbases, node, topic)
191 backupfile = _createstripbackup(repo, stripbases, node, topic)
191 # create a changegroup for all the branches we need to keep
192 # create a changegroup for all the branches we need to keep
192 tmpbundlefile = None
193 tmpbundlefile = None
193 if saveheads:
194 if saveheads:
194 # do not compress temporary bundle if we remove it from disk later
195 # do not compress temporary bundle if we remove it from disk later
195 #
196 #
196 # We do not include obsolescence, it might re-introduce prune markers
197 # We do not include obsolescence, it might re-introduce prune markers
197 # we are trying to strip. This is harmless since the stripped markers
198 # we are trying to strip. This is harmless since the stripped markers
198 # are already backed up and we did not touched the markers for the
199 # are already backed up and we did not touched the markers for the
199 # saved changesets.
200 # saved changesets.
200 tmpbundlefile = backupbundle(
201 tmpbundlefile = backupbundle(
201 repo,
202 repo,
202 savebases,
203 savebases,
203 saveheads,
204 saveheads,
204 node,
205 node,
205 b'temp',
206 b'temp',
206 compress=False,
207 compress=False,
207 obsolescence=False,
208 obsolescence=False,
208 tmp_backup=True,
209 tmp_backup=True,
209 )
210 )
210
211
211 with ui.uninterruptible():
212 with ui.uninterruptible():
212 try:
213 try:
213 with repo.transaction(b"strip") as tr:
214 with repo.transaction(b"strip") as tr:
214 # TODO this code violates the interface abstraction of the
215 # TODO this code violates the interface abstraction of the
215 # transaction and makes assumptions that file storage is
216 # transaction and makes assumptions that file storage is
216 # using append-only files. We'll need some kind of storage
217 # using append-only files. We'll need some kind of storage
217 # API to handle stripping for us.
218 # API to handle stripping for us.
218 oldfiles = set(tr._offsetmap.keys())
219 oldfiles = set(tr._offsetmap.keys())
219 oldfiles.update(tr._newfiles)
220 oldfiles.update(tr._newfiles)
220
221
221 tr.startgroup()
222 tr.startgroup()
222 cl.strip(striprev, tr)
223 cl.strip(striprev, tr)
223 stripmanifest(repo, striprev, tr, files)
224 stripmanifest(repo, striprev, tr, files)
224
225
225 for fn in files:
226 for fn in files:
226 repo.file(fn).strip(striprev, tr)
227 repo.file(fn).strip(striprev, tr)
227 tr.endgroup()
228 tr.endgroup()
228
229
229 entries = tr.readjournal()
230 entries = tr.readjournal()
230
231
231 for file, troffset in entries:
232 for file, troffset in entries:
232 if file in oldfiles:
233 if file in oldfiles:
233 continue
234 continue
234 with repo.svfs(file, b'a', checkambig=True) as fp:
235 with repo.svfs(file, b'a', checkambig=True) as fp:
235 fp.truncate(troffset)
236 fp.truncate(troffset)
236 if troffset == 0:
237 if troffset == 0:
237 repo.store.markremoved(file)
238 repo.store.markremoved(file)
238
239
239 deleteobsmarkers(repo.obsstore, stripobsidx)
240 deleteobsmarkers(repo.obsstore, stripobsidx)
240 del repo.obsstore
241 del repo.obsstore
241 repo.invalidatevolatilesets()
242 repo.invalidatevolatilesets()
242 repo._phasecache.filterunknown(repo)
243 repo._phasecache.filterunknown(repo)
243
244
244 if tmpbundlefile:
245 if tmpbundlefile:
245 ui.note(_(b"adding branch\n"))
246 ui.note(_(b"adding branch\n"))
246 f = vfs.open(tmpbundlefile, b"rb")
247 f = vfs.open(tmpbundlefile, b"rb")
247 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
248 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
248 # silence internal shuffling chatter
249 # silence internal shuffling chatter
249 maybe_silent = (
250 maybe_silent = (
250 repo.ui.silent()
251 repo.ui.silent()
251 if not repo.ui.verbose
252 if not repo.ui.verbose
252 else util.nullcontextmanager()
253 else util.nullcontextmanager()
253 )
254 )
254 with maybe_silent:
255 with maybe_silent:
255 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
256 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
256 txnname = b'strip'
257 txnname = b'strip'
257 if not isinstance(gen, bundle2.unbundle20):
258 if not isinstance(gen, bundle2.unbundle20):
258 txnname = b"strip\n%s" % urlutil.hidepassword(
259 txnname = b"strip\n%s" % urlutil.hidepassword(
259 tmpbundleurl
260 tmpbundleurl
260 )
261 )
261 with repo.transaction(txnname) as tr:
262 with repo.transaction(txnname) as tr:
262 bundle2.applybundle(
263 bundle2.applybundle(
263 repo, gen, tr, source=b'strip', url=tmpbundleurl
264 repo, gen, tr, source=b'strip', url=tmpbundleurl
264 )
265 )
265 f.close()
266 f.close()
266
267
267 with repo.transaction(b'repair') as tr:
268 with repo.transaction(b'repair') as tr:
268 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
269 repo._bookmarks.applychanges(repo, tr, bmchanges)
270 repo._bookmarks.applychanges(repo, tr, bmchanges)
270
271
271 transaction.cleanup_undo_files(repo.ui.warn, repo.vfs_map)
272 transaction.cleanup_undo_files(repo.ui.warn, repo.vfs_map)
272
273
273 except: # re-raises
274 except: # re-raises
274 if backupfile:
275 if backupfile:
275 ui.warn(
276 ui.warn(
276 _(b"strip failed, backup bundle stored in '%s'\n")
277 _(b"strip failed, backup bundle stored in '%s'\n")
277 % vfs.join(backupfile)
278 % vfs.join(backupfile)
278 )
279 )
279 if tmpbundlefile:
280 if tmpbundlefile:
280 ui.warn(
281 ui.warn(
281 _(b"strip failed, unrecovered changes stored in '%s'\n")
282 _(b"strip failed, unrecovered changes stored in '%s'\n")
282 % vfs.join(tmpbundlefile)
283 % vfs.join(tmpbundlefile)
283 )
284 )
284 ui.warn(
285 ui.warn(
285 _(
286 _(
286 b"(fix the problem, then recover the changesets with "
287 b"(fix the problem, then recover the changesets with "
287 b"\"hg unbundle '%s'\")\n"
288 b"\"hg unbundle '%s'\")\n"
288 )
289 )
289 % vfs.join(tmpbundlefile)
290 % vfs.join(tmpbundlefile)
290 )
291 )
291 raise
292 raise
292 else:
293 else:
293 if tmpbundlefile:
294 if tmpbundlefile:
294 # Remove temporary bundle only if there were no exceptions
295 # Remove temporary bundle only if there were no exceptions
295 vfs.unlink(tmpbundlefile)
296 vfs.unlink(tmpbundlefile)
296
297
297 repo.destroyed()
298 repo.destroyed()
298 # return the backup file path (or None if 'backup' was False) so
299 # return the backup file path (or None if 'backup' was False) so
299 # extensions can use it
300 # extensions can use it
300 return backupfile
301 return backupfile
301
302
302
303
303 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
304 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
304 """perform a "soft" strip using the archived phase"""
305 """perform a "soft" strip using the archived phase"""
305 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
306 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
306 if not tostrip:
307 if not tostrip:
307 return None
308 return None
308
309
309 backupfile = None
310 backupfile = None
310 if backup:
311 if backup:
311 node = tostrip[0]
312 node = tostrip[0]
312 backupfile = _createstripbackup(repo, tostrip, node, topic)
313 backupfile = _createstripbackup(repo, tostrip, node, topic)
313
314
314 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
315 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
315 with repo.transaction(b'strip') as tr:
316 with repo.transaction(b'strip') as tr:
316 phases.retractboundary(repo, tr, phases.archived, tostrip)
317 phases.retractboundary(repo, tr, phases.archived, tostrip)
317 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
318 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
318 repo._bookmarks.applychanges(repo, tr, bmchanges)
319 repo._bookmarks.applychanges(repo, tr, bmchanges)
319 return backupfile
320 return backupfile
320
321
321
322
322 def _bookmarkmovements(repo, tostrip):
323 def _bookmarkmovements(repo, tostrip):
323 # compute necessary bookmark movement
324 # compute necessary bookmark movement
324 bm = repo._bookmarks
325 bm = repo._bookmarks
325 updatebm = []
326 updatebm = []
326 for m in bm:
327 for m in bm:
327 rev = repo[bm[m]].rev()
328 rev = repo[bm[m]].rev()
328 if rev in tostrip:
329 if rev in tostrip:
329 updatebm.append(m)
330 updatebm.append(m)
330 newbmtarget = None
331 newbmtarget = None
331 # If we need to move bookmarks, compute bookmark
332 # If we need to move bookmarks, compute bookmark
332 # targets. Otherwise we can skip doing this logic.
333 # targets. Otherwise we can skip doing this logic.
333 if updatebm:
334 if updatebm:
334 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
335 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
335 # but is much faster
336 # but is much faster
336 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
337 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
337 if newbmtarget:
338 if newbmtarget:
338 newbmtarget = repo[newbmtarget.first()].node()
339 newbmtarget = repo[newbmtarget.first()].node()
339 else:
340 else:
340 newbmtarget = b'.'
341 newbmtarget = b'.'
341 return newbmtarget, updatebm
342 return newbmtarget, updatebm
342
343
343
344
344 def _createstripbackup(repo, stripbases, node, topic):
345 def _createstripbackup(repo, stripbases, node, topic):
345 # backup the changeset we are about to strip
346 # backup the changeset we are about to strip
346 vfs = repo.vfs
347 vfs = repo.vfs
347 unfi = repo.unfiltered()
348 unfi = repo.unfiltered()
348 to_node = unfi.changelog.node
349 to_node = unfi.changelog.node
349 # internal changeset are internal implementation details that should not
350 # internal changeset are internal implementation details that should not
350 # leave the repository and not be exposed to the users. In addition feature
351 # leave the repository and not be exposed to the users. In addition feature
351 # using them requires to be resistant to strip. See test case for more
352 # using them requires to be resistant to strip. See test case for more
352 # details.
353 # details.
353 all_backup = unfi.revs(
354 all_backup = unfi.revs(
354 b"(%ln)::(%ld) and not _internal()",
355 b"(%ln)::(%ld) and not _internal()",
355 stripbases,
356 stripbases,
356 unfi.changelog.headrevs(),
357 unfi.changelog.headrevs(),
357 )
358 )
358 if not all_backup:
359 if not all_backup:
359 return None
360 return None
360
361
361 def to_nodes(revs):
362 def to_nodes(revs):
362 return [to_node(r) for r in revs]
363 return [to_node(r) for r in revs]
363
364
364 bases = to_nodes(unfi.revs("roots(%ld)", all_backup))
365 bases = to_nodes(unfi.revs("roots(%ld)", all_backup))
365 heads = to_nodes(unfi.revs("heads(%ld)", all_backup))
366 heads = to_nodes(unfi.revs("heads(%ld)", all_backup))
366 backupfile = backupbundle(repo, bases, heads, node, topic)
367 backupfile = backupbundle(repo, bases, heads, node, topic)
367 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
368 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
368 repo.ui.log(
369 repo.ui.log(
369 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
370 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
370 )
371 )
371 return backupfile
372 return backupfile
372
373
373
374
374 def safestriproots(ui, repo, nodes):
375 def safestriproots(ui, repo, nodes):
375 """return list of roots of nodes where descendants are covered by nodes"""
376 """return list of roots of nodes where descendants are covered by nodes"""
376 torev = repo.unfiltered().changelog.rev
377 torev = repo.unfiltered().changelog.rev
377 revs = {torev(n) for n in nodes}
378 revs = {torev(n) for n in nodes}
378 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
379 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
379 # orphaned = affected - wanted
380 # orphaned = affected - wanted
380 # affected = descendants(roots(wanted))
381 # affected = descendants(roots(wanted))
381 # wanted = revs
382 # wanted = revs
382 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
383 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
383 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
384 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
384 notstrip = revs - tostrip
385 notstrip = revs - tostrip
385 if notstrip:
386 if notstrip:
386 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
387 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
387 ui.warn(
388 ui.warn(
388 _(b'warning: orphaned descendants detected, not stripping %s\n')
389 _(b'warning: orphaned descendants detected, not stripping %s\n')
389 % nodestr
390 % nodestr
390 )
391 )
391 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
392 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
392
393
393
394
394 class stripcallback:
395 class stripcallback:
395 """used as a transaction postclose callback"""
396 """used as a transaction postclose callback"""
396
397
397 def __init__(self, ui, repo, backup, topic):
398 def __init__(self, ui, repo, backup, topic):
398 self.ui = ui
399 self.ui = ui
399 self.repo = repo
400 self.repo = repo
400 self.backup = backup
401 self.backup = backup
401 self.topic = topic or b'backup'
402 self.topic = topic or b'backup'
402 self.nodelist = []
403 self.nodelist = []
403
404
404 def addnodes(self, nodes):
405 def addnodes(self, nodes):
405 self.nodelist.extend(nodes)
406 self.nodelist.extend(nodes)
406
407
407 def __call__(self, tr):
408 def __call__(self, tr):
408 roots = safestriproots(self.ui, self.repo, self.nodelist)
409 roots = safestriproots(self.ui, self.repo, self.nodelist)
409 if roots:
410 if roots:
410 strip(self.ui, self.repo, roots, self.backup, self.topic)
411 strip(self.ui, self.repo, roots, self.backup, self.topic)
411
412
412
413
413 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
414 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
414 """like strip, but works inside transaction and won't strip irreverent revs
415 """like strip, but works inside transaction and won't strip irreverent revs
415
416
416 nodelist must explicitly contain all descendants. Otherwise a warning will
417 nodelist must explicitly contain all descendants. Otherwise a warning will
417 be printed that some nodes are not stripped.
418 be printed that some nodes are not stripped.
418
419
419 Will do a backup if `backup` is True. The last non-None "topic" will be
420 Will do a backup if `backup` is True. The last non-None "topic" will be
420 used as the backup topic name. The default backup topic name is "backup".
421 used as the backup topic name. The default backup topic name is "backup".
421 """
422 """
422 tr = repo.currenttransaction()
423 tr = repo.currenttransaction()
423 if not tr:
424 if not tr:
424 nodes = safestriproots(ui, repo, nodelist)
425 nodes = safestriproots(ui, repo, nodelist)
425 return strip(ui, repo, nodes, backup=backup, topic=topic)
426 return strip(ui, repo, nodes, backup=backup, topic=topic)
426 # transaction postclose callbacks are called in alphabet order.
427 # transaction postclose callbacks are called in alphabet order.
427 # use '\xff' as prefix so we are likely to be called last.
428 # use '\xff' as prefix so we are likely to be called last.
428 callback = tr.getpostclose(b'\xffstrip')
429 callback = tr.getpostclose(b'\xffstrip')
429 if callback is None:
430 if callback is None:
430 callback = stripcallback(ui, repo, backup=backup, topic=topic)
431 callback = stripcallback(ui, repo, backup=backup, topic=topic)
431 tr.addpostclose(b'\xffstrip', callback)
432 tr.addpostclose(b'\xffstrip', callback)
432 if topic:
433 if topic:
433 callback.topic = topic
434 callback.topic = topic
434 callback.addnodes(nodelist)
435 callback.addnodes(nodelist)
435
436
436
437
437 def stripmanifest(repo, striprev, tr, files):
438 def stripmanifest(repo, striprev, tr, files):
438 for revlog in manifestrevlogs(repo):
439 for revlog in manifestrevlogs(repo):
439 revlog.strip(striprev, tr)
440 revlog.strip(striprev, tr)
440
441
441
442
442 def manifestrevlogs(repo):
443 def manifestrevlogs(repo):
443 yield repo.manifestlog.getstorage(b'')
444 yield repo.manifestlog.getstorage(b'')
444 if scmutil.istreemanifest(repo):
445 if scmutil.istreemanifest(repo):
445 # This logic is safe if treemanifest isn't enabled, but also
446 # This logic is safe if treemanifest isn't enabled, but also
446 # pointless, so we skip it if treemanifest isn't enabled.
447 # pointless, so we skip it if treemanifest isn't enabled.
447 for entry in repo.store.datafiles():
448 for entry in repo.store.datafiles():
448 unencoded = entry.unencoded_path
449 if not entry.is_revlog:
449 # XXX use the entry.revlog_type instead
450 continue
450 if unencoded.startswith(b'meta/') and unencoded.endswith(
451 if not entry.revlog_type == store.FILEFLAGS_MANIFESTLOG:
451 b'00manifest.i'
452 continue
452 ):
453 if entry.is_revlog_main:
453 dir = unencoded[5:-12]
454 yield repo.manifestlog.getstorage(entry.target_id)
454 yield repo.manifestlog.getstorage(dir)
455
455
456
456
457 def rebuildfncache(ui, repo, only_data=False):
457 def rebuildfncache(ui, repo, only_data=False):
458 """Rebuilds the fncache file from repo history.
458 """Rebuilds the fncache file from repo history.
459
459
460 Missing entries will be added. Extra entries will be removed.
460 Missing entries will be added. Extra entries will be removed.
461 """
461 """
462 repo = repo.unfiltered()
462 repo = repo.unfiltered()
463
463
464 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
464 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
465 ui.warn(
465 ui.warn(
466 _(
466 _(
467 b'(not rebuilding fncache because repository does not '
467 b'(not rebuilding fncache because repository does not '
468 b'support fncache)\n'
468 b'support fncache)\n'
469 )
469 )
470 )
470 )
471 return
471 return
472
472
473 with repo.lock():
473 with repo.lock():
474 fnc = repo.store.fncache
474 fnc = repo.store.fncache
475 fnc.ensureloaded(warn=ui.warn)
475 fnc.ensureloaded(warn=ui.warn)
476
476
477 oldentries = set(fnc.entries)
477 oldentries = set(fnc.entries)
478 newentries = set()
478 newentries = set()
479 seenfiles = set()
479 seenfiles = set()
480
480
481 if only_data:
481 if only_data:
482 # Trust the listing of .i from the fncache, but not the .d. This is
482 # Trust the listing of .i from the fncache, but not the .d. This is
483 # much faster, because we only need to stat every possible .d files,
483 # much faster, because we only need to stat every possible .d files,
484 # instead of reading the full changelog
484 # instead of reading the full changelog
485 for f in fnc:
485 for f in fnc:
486 if f[:5] == b'data/' and f[-2:] == b'.i':
486 if f[:5] == b'data/' and f[-2:] == b'.i':
487 seenfiles.add(f[5:-2])
487 seenfiles.add(f[5:-2])
488 newentries.add(f)
488 newentries.add(f)
489 dataf = f[:-2] + b'.d'
489 dataf = f[:-2] + b'.d'
490 if repo.store._exists(dataf):
490 if repo.store._exists(dataf):
491 newentries.add(dataf)
491 newentries.add(dataf)
492 else:
492 else:
493 progress = ui.makeprogress(
493 progress = ui.makeprogress(
494 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
494 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
495 )
495 )
496 for rev in repo:
496 for rev in repo:
497 progress.update(rev)
497 progress.update(rev)
498
498
499 ctx = repo[rev]
499 ctx = repo[rev]
500 for f in ctx.files():
500 for f in ctx.files():
501 # This is to minimize I/O.
501 # This is to minimize I/O.
502 if f in seenfiles:
502 if f in seenfiles:
503 continue
503 continue
504 seenfiles.add(f)
504 seenfiles.add(f)
505
505
506 i = b'data/%s.i' % f
506 i = b'data/%s.i' % f
507 d = b'data/%s.d' % f
507 d = b'data/%s.d' % f
508
508
509 if repo.store._exists(i):
509 if repo.store._exists(i):
510 newentries.add(i)
510 newentries.add(i)
511 if repo.store._exists(d):
511 if repo.store._exists(d):
512 newentries.add(d)
512 newentries.add(d)
513
513
514 progress.complete()
514 progress.complete()
515
515
516 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
516 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
517 # This logic is safe if treemanifest isn't enabled, but also
517 # This logic is safe if treemanifest isn't enabled, but also
518 # pointless, so we skip it if treemanifest isn't enabled.
518 # pointless, so we skip it if treemanifest isn't enabled.
519 for dir in pathutil.dirs(seenfiles):
519 for dir in pathutil.dirs(seenfiles):
520 i = b'meta/%s/00manifest.i' % dir
520 i = b'meta/%s/00manifest.i' % dir
521 d = b'meta/%s/00manifest.d' % dir
521 d = b'meta/%s/00manifest.d' % dir
522
522
523 if repo.store._exists(i):
523 if repo.store._exists(i):
524 newentries.add(i)
524 newentries.add(i)
525 if repo.store._exists(d):
525 if repo.store._exists(d):
526 newentries.add(d)
526 newentries.add(d)
527
527
528 addcount = len(newentries - oldentries)
528 addcount = len(newentries - oldentries)
529 removecount = len(oldentries - newentries)
529 removecount = len(oldentries - newentries)
530 for p in sorted(oldentries - newentries):
530 for p in sorted(oldentries - newentries):
531 ui.write(_(b'removing %s\n') % p)
531 ui.write(_(b'removing %s\n') % p)
532 for p in sorted(newentries - oldentries):
532 for p in sorted(newentries - oldentries):
533 ui.write(_(b'adding %s\n') % p)
533 ui.write(_(b'adding %s\n') % p)
534
534
535 if addcount or removecount:
535 if addcount or removecount:
536 ui.write(
536 ui.write(
537 _(b'%d items added, %d removed from fncache\n')
537 _(b'%d items added, %d removed from fncache\n')
538 % (addcount, removecount)
538 % (addcount, removecount)
539 )
539 )
540 fnc.entries = newentries
540 fnc.entries = newentries
541 fnc._dirty = True
541 fnc._dirty = True
542
542
543 with repo.transaction(b'fncache') as tr:
543 with repo.transaction(b'fncache') as tr:
544 fnc.write(tr)
544 fnc.write(tr)
545 else:
545 else:
546 ui.write(_(b'fncache already up to date\n'))
546 ui.write(_(b'fncache already up to date\n'))
547
547
548
548
549 def deleteobsmarkers(obsstore, indices):
549 def deleteobsmarkers(obsstore, indices):
550 """Delete some obsmarkers from obsstore and return how many were deleted
550 """Delete some obsmarkers from obsstore and return how many were deleted
551
551
552 'indices' is a list of ints which are the indices
552 'indices' is a list of ints which are the indices
553 of the markers to be deleted.
553 of the markers to be deleted.
554
554
555 Every invocation of this function completely rewrites the obsstore file,
555 Every invocation of this function completely rewrites the obsstore file,
556 skipping the markers we want to be removed. The new temporary file is
556 skipping the markers we want to be removed. The new temporary file is
557 created, remaining markers are written there and on .close() this file
557 created, remaining markers are written there and on .close() this file
558 gets atomically renamed to obsstore, thus guaranteeing consistency."""
558 gets atomically renamed to obsstore, thus guaranteeing consistency."""
559 if not indices:
559 if not indices:
560 # we don't want to rewrite the obsstore with the same content
560 # we don't want to rewrite the obsstore with the same content
561 return
561 return
562
562
563 left = []
563 left = []
564 current = obsstore._all
564 current = obsstore._all
565 n = 0
565 n = 0
566 for i, m in enumerate(current):
566 for i, m in enumerate(current):
567 if i in indices:
567 if i in indices:
568 n += 1
568 n += 1
569 continue
569 continue
570 left.append(m)
570 left.append(m)
571
571
572 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
572 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
573 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
573 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
574 newobsstorefile.write(bytes)
574 newobsstorefile.write(bytes)
575 newobsstorefile.close()
575 newobsstorefile.close()
576 return n
576 return n
General Comments 0
You need to be logged in to leave comments. Login now