Show More
@@ -1,574 +1,571 b'' | |||||
1 | # repair.py - functions for repository repair for mercurial |
|
1 | # repair.py - functions for repository repair for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # Copyright 2007 Olivia Mackall |
|
4 | # Copyright 2007 Olivia Mackall | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 |
|
9 | |||
10 | from .i18n import _ |
|
10 | from .i18n import _ | |
11 | from .node import ( |
|
11 | from .node import ( | |
12 | hex, |
|
12 | hex, | |
13 | short, |
|
13 | short, | |
14 | ) |
|
14 | ) | |
15 | from . import ( |
|
15 | from . import ( | |
16 | bundle2, |
|
16 | bundle2, | |
17 | changegroup, |
|
17 | changegroup, | |
18 | discovery, |
|
18 | discovery, | |
19 | error, |
|
19 | error, | |
20 | exchange, |
|
20 | exchange, | |
21 | obsolete, |
|
21 | obsolete, | |
22 | obsutil, |
|
22 | obsutil, | |
23 | pathutil, |
|
23 | pathutil, | |
24 | phases, |
|
24 | phases, | |
25 | requirements, |
|
25 | requirements, | |
26 | scmutil, |
|
26 | scmutil, | |
27 | store, |
|
|||
28 | transaction, |
|
27 | transaction, | |
29 | util, |
|
28 | util, | |
30 | ) |
|
29 | ) | |
31 | from .utils import ( |
|
30 | from .utils import ( | |
32 | hashutil, |
|
31 | hashutil, | |
33 | urlutil, |
|
32 | urlutil, | |
34 | ) |
|
33 | ) | |
35 |
|
34 | |||
36 |
|
35 | |||
37 | def backupbundle( |
|
36 | def backupbundle( | |
38 | repo, |
|
37 | repo, | |
39 | bases, |
|
38 | bases, | |
40 | heads, |
|
39 | heads, | |
41 | node, |
|
40 | node, | |
42 | suffix, |
|
41 | suffix, | |
43 | compress=True, |
|
42 | compress=True, | |
44 | obsolescence=True, |
|
43 | obsolescence=True, | |
45 | tmp_backup=False, |
|
44 | tmp_backup=False, | |
46 | ): |
|
45 | ): | |
47 | """create a bundle with the specified revisions as a backup""" |
|
46 | """create a bundle with the specified revisions as a backup""" | |
48 |
|
47 | |||
49 | backupdir = b"strip-backup" |
|
48 | backupdir = b"strip-backup" | |
50 | vfs = repo.vfs |
|
49 | vfs = repo.vfs | |
51 | if not vfs.isdir(backupdir): |
|
50 | if not vfs.isdir(backupdir): | |
52 | vfs.mkdir(backupdir) |
|
51 | vfs.mkdir(backupdir) | |
53 |
|
52 | |||
54 | # Include a hash of all the nodes in the filename for uniqueness |
|
53 | # Include a hash of all the nodes in the filename for uniqueness | |
55 | allcommits = repo.set(b'%ln::%ln', bases, heads) |
|
54 | allcommits = repo.set(b'%ln::%ln', bases, heads) | |
56 | allhashes = sorted(c.hex() for c in allcommits) |
|
55 | allhashes = sorted(c.hex() for c in allcommits) | |
57 | totalhash = hashutil.sha1(b''.join(allhashes)).digest() |
|
56 | totalhash = hashutil.sha1(b''.join(allhashes)).digest() | |
58 | name = b"%s/%s-%s-%s.hg" % ( |
|
57 | name = b"%s/%s-%s-%s.hg" % ( | |
59 | backupdir, |
|
58 | backupdir, | |
60 | short(node), |
|
59 | short(node), | |
61 | hex(totalhash[:4]), |
|
60 | hex(totalhash[:4]), | |
62 | suffix, |
|
61 | suffix, | |
63 | ) |
|
62 | ) | |
64 |
|
63 | |||
65 | cgversion = changegroup.localversion(repo) |
|
64 | cgversion = changegroup.localversion(repo) | |
66 | comp = None |
|
65 | comp = None | |
67 | if cgversion != b'01': |
|
66 | if cgversion != b'01': | |
68 | bundletype = b"HG20" |
|
67 | bundletype = b"HG20" | |
69 | if compress: |
|
68 | if compress: | |
70 | comp = b'BZ' |
|
69 | comp = b'BZ' | |
71 | elif compress: |
|
70 | elif compress: | |
72 | bundletype = b"HG10BZ" |
|
71 | bundletype = b"HG10BZ" | |
73 | else: |
|
72 | else: | |
74 | bundletype = b"HG10UN" |
|
73 | bundletype = b"HG10UN" | |
75 |
|
74 | |||
76 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) |
|
75 | outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads) | |
77 | contentopts = { |
|
76 | contentopts = { | |
78 | b'cg.version': cgversion, |
|
77 | b'cg.version': cgversion, | |
79 | b'obsolescence': obsolescence, |
|
78 | b'obsolescence': obsolescence, | |
80 | b'phases': True, |
|
79 | b'phases': True, | |
81 | } |
|
80 | } | |
82 | return bundle2.writenewbundle( |
|
81 | return bundle2.writenewbundle( | |
83 | repo.ui, |
|
82 | repo.ui, | |
84 | repo, |
|
83 | repo, | |
85 | b'strip', |
|
84 | b'strip', | |
86 | name, |
|
85 | name, | |
87 | bundletype, |
|
86 | bundletype, | |
88 | outgoing, |
|
87 | outgoing, | |
89 | contentopts, |
|
88 | contentopts, | |
90 | vfs, |
|
89 | vfs, | |
91 | compression=comp, |
|
90 | compression=comp, | |
92 | allow_internal=tmp_backup, |
|
91 | allow_internal=tmp_backup, | |
93 | ) |
|
92 | ) | |
94 |
|
93 | |||
95 |
|
94 | |||
96 | def _collectfiles(repo, striprev): |
|
95 | def _collectfiles(repo, striprev): | |
97 | """find out the filelogs affected by the strip""" |
|
96 | """find out the filelogs affected by the strip""" | |
98 | files = set() |
|
97 | files = set() | |
99 |
|
98 | |||
100 | for x in range(striprev, len(repo)): |
|
99 | for x in range(striprev, len(repo)): | |
101 | files.update(repo[x].files()) |
|
100 | files.update(repo[x].files()) | |
102 |
|
101 | |||
103 | return sorted(files) |
|
102 | return sorted(files) | |
104 |
|
103 | |||
105 |
|
104 | |||
106 | def _collectrevlog(revlog, striprev): |
|
105 | def _collectrevlog(revlog, striprev): | |
107 | _, brokenset = revlog.getstrippoint(striprev) |
|
106 | _, brokenset = revlog.getstrippoint(striprev) | |
108 | return [revlog.linkrev(r) for r in brokenset] |
|
107 | return [revlog.linkrev(r) for r in brokenset] | |
109 |
|
108 | |||
110 |
|
109 | |||
111 | def _collectbrokencsets(repo, files, striprev): |
|
110 | def _collectbrokencsets(repo, files, striprev): | |
112 | """return the changesets which will be broken by the truncation""" |
|
111 | """return the changesets which will be broken by the truncation""" | |
113 | s = set() |
|
112 | s = set() | |
114 |
|
113 | |||
115 | for revlog in manifestrevlogs(repo): |
|
114 | for revlog in manifestrevlogs(repo): | |
116 | s.update(_collectrevlog(revlog, striprev)) |
|
115 | s.update(_collectrevlog(revlog, striprev)) | |
117 | for fname in files: |
|
116 | for fname in files: | |
118 | s.update(_collectrevlog(repo.file(fname), striprev)) |
|
117 | s.update(_collectrevlog(repo.file(fname), striprev)) | |
119 |
|
118 | |||
120 | return s |
|
119 | return s | |
121 |
|
120 | |||
122 |
|
121 | |||
123 | def strip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
122 | def strip(ui, repo, nodelist, backup=True, topic=b'backup'): | |
124 | # This function requires the caller to lock the repo, but it operates |
|
123 | # This function requires the caller to lock the repo, but it operates | |
125 | # within a transaction of its own, and thus requires there to be no current |
|
124 | # within a transaction of its own, and thus requires there to be no current | |
126 | # transaction when it is called. |
|
125 | # transaction when it is called. | |
127 | if repo.currenttransaction() is not None: |
|
126 | if repo.currenttransaction() is not None: | |
128 | raise error.ProgrammingError(b'cannot strip from inside a transaction') |
|
127 | raise error.ProgrammingError(b'cannot strip from inside a transaction') | |
129 |
|
128 | |||
130 | # Simple way to maintain backwards compatibility for this |
|
129 | # Simple way to maintain backwards compatibility for this | |
131 | # argument. |
|
130 | # argument. | |
132 | if backup in [b'none', b'strip']: |
|
131 | if backup in [b'none', b'strip']: | |
133 | backup = False |
|
132 | backup = False | |
134 |
|
133 | |||
135 | repo = repo.unfiltered() |
|
134 | repo = repo.unfiltered() | |
136 | repo.destroying() |
|
135 | repo.destroying() | |
137 | vfs = repo.vfs |
|
136 | vfs = repo.vfs | |
138 | # load bookmark before changelog to avoid side effect from outdated |
|
137 | # load bookmark before changelog to avoid side effect from outdated | |
139 | # changelog (see repo._refreshchangelog) |
|
138 | # changelog (see repo._refreshchangelog) | |
140 | repo._bookmarks |
|
139 | repo._bookmarks | |
141 | cl = repo.changelog |
|
140 | cl = repo.changelog | |
142 |
|
141 | |||
143 | # TODO handle undo of merge sets |
|
142 | # TODO handle undo of merge sets | |
144 | if isinstance(nodelist, bytes): |
|
143 | if isinstance(nodelist, bytes): | |
145 | nodelist = [nodelist] |
|
144 | nodelist = [nodelist] | |
146 | striplist = [cl.rev(node) for node in nodelist] |
|
145 | striplist = [cl.rev(node) for node in nodelist] | |
147 | striprev = min(striplist) |
|
146 | striprev = min(striplist) | |
148 |
|
147 | |||
149 | files = _collectfiles(repo, striprev) |
|
148 | files = _collectfiles(repo, striprev) | |
150 | saverevs = _collectbrokencsets(repo, files, striprev) |
|
149 | saverevs = _collectbrokencsets(repo, files, striprev) | |
151 |
|
150 | |||
152 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
151 | # Some revisions with rev > striprev may not be descendants of striprev. | |
153 | # We have to find these revisions and put them in a bundle, so that |
|
152 | # We have to find these revisions and put them in a bundle, so that | |
154 | # we can restore them after the truncations. |
|
153 | # we can restore them after the truncations. | |
155 | # To create the bundle we use repo.changegroupsubset which requires |
|
154 | # To create the bundle we use repo.changegroupsubset which requires | |
156 | # the list of heads and bases of the set of interesting revisions. |
|
155 | # the list of heads and bases of the set of interesting revisions. | |
157 | # (head = revision in the set that has no descendant in the set; |
|
156 | # (head = revision in the set that has no descendant in the set; | |
158 | # base = revision in the set that has no ancestor in the set) |
|
157 | # base = revision in the set that has no ancestor in the set) | |
159 | tostrip = set(striplist) |
|
158 | tostrip = set(striplist) | |
160 | saveheads = set(saverevs) |
|
159 | saveheads = set(saverevs) | |
161 | for r in cl.revs(start=striprev + 1): |
|
160 | for r in cl.revs(start=striprev + 1): | |
162 | if any(p in tostrip for p in cl.parentrevs(r)): |
|
161 | if any(p in tostrip for p in cl.parentrevs(r)): | |
163 | tostrip.add(r) |
|
162 | tostrip.add(r) | |
164 |
|
163 | |||
165 | if r not in tostrip: |
|
164 | if r not in tostrip: | |
166 | saverevs.add(r) |
|
165 | saverevs.add(r) | |
167 | saveheads.difference_update(cl.parentrevs(r)) |
|
166 | saveheads.difference_update(cl.parentrevs(r)) | |
168 | saveheads.add(r) |
|
167 | saveheads.add(r) | |
169 | saveheads = [cl.node(r) for r in saveheads] |
|
168 | saveheads = [cl.node(r) for r in saveheads] | |
170 |
|
169 | |||
171 | # compute base nodes |
|
170 | # compute base nodes | |
172 | if saverevs: |
|
171 | if saverevs: | |
173 | descendants = set(cl.descendants(saverevs)) |
|
172 | descendants = set(cl.descendants(saverevs)) | |
174 | saverevs.difference_update(descendants) |
|
173 | saverevs.difference_update(descendants) | |
175 | savebases = [cl.node(r) for r in saverevs] |
|
174 | savebases = [cl.node(r) for r in saverevs] | |
176 | stripbases = [cl.node(r) for r in tostrip] |
|
175 | stripbases = [cl.node(r) for r in tostrip] | |
177 |
|
176 | |||
178 | stripobsidx = obsmarkers = () |
|
177 | stripobsidx = obsmarkers = () | |
179 | if repo.ui.configbool(b'devel', b'strip-obsmarkers'): |
|
178 | if repo.ui.configbool(b'devel', b'strip-obsmarkers'): | |
180 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) |
|
179 | obsmarkers = obsutil.exclusivemarkers(repo, stripbases) | |
181 | if obsmarkers: |
|
180 | if obsmarkers: | |
182 | stripobsidx = [ |
|
181 | stripobsidx = [ | |
183 | i for i, m in enumerate(repo.obsstore) if m in obsmarkers |
|
182 | i for i, m in enumerate(repo.obsstore) if m in obsmarkers | |
184 | ] |
|
183 | ] | |
185 |
|
184 | |||
186 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
185 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) | |
187 |
|
186 | |||
188 | backupfile = None |
|
187 | backupfile = None | |
189 | node = nodelist[-1] |
|
188 | node = nodelist[-1] | |
190 | if backup: |
|
189 | if backup: | |
191 | backupfile = _createstripbackup(repo, stripbases, node, topic) |
|
190 | backupfile = _createstripbackup(repo, stripbases, node, topic) | |
192 | # create a changegroup for all the branches we need to keep |
|
191 | # create a changegroup for all the branches we need to keep | |
193 | tmpbundlefile = None |
|
192 | tmpbundlefile = None | |
194 | if saveheads: |
|
193 | if saveheads: | |
195 | # do not compress temporary bundle if we remove it from disk later |
|
194 | # do not compress temporary bundle if we remove it from disk later | |
196 | # |
|
195 | # | |
197 | # We do not include obsolescence, it might re-introduce prune markers |
|
196 | # We do not include obsolescence, it might re-introduce prune markers | |
198 | # we are trying to strip. This is harmless since the stripped markers |
|
197 | # we are trying to strip. This is harmless since the stripped markers | |
199 | # are already backed up and we did not touched the markers for the |
|
198 | # are already backed up and we did not touched the markers for the | |
200 | # saved changesets. |
|
199 | # saved changesets. | |
201 | tmpbundlefile = backupbundle( |
|
200 | tmpbundlefile = backupbundle( | |
202 | repo, |
|
201 | repo, | |
203 | savebases, |
|
202 | savebases, | |
204 | saveheads, |
|
203 | saveheads, | |
205 | node, |
|
204 | node, | |
206 | b'temp', |
|
205 | b'temp', | |
207 | compress=False, |
|
206 | compress=False, | |
208 | obsolescence=False, |
|
207 | obsolescence=False, | |
209 | tmp_backup=True, |
|
208 | tmp_backup=True, | |
210 | ) |
|
209 | ) | |
211 |
|
210 | |||
212 | with ui.uninterruptible(): |
|
211 | with ui.uninterruptible(): | |
213 | try: |
|
212 | try: | |
214 | with repo.transaction(b"strip") as tr: |
|
213 | with repo.transaction(b"strip") as tr: | |
215 | # TODO this code violates the interface abstraction of the |
|
214 | # TODO this code violates the interface abstraction of the | |
216 | # transaction and makes assumptions that file storage is |
|
215 | # transaction and makes assumptions that file storage is | |
217 | # using append-only files. We'll need some kind of storage |
|
216 | # using append-only files. We'll need some kind of storage | |
218 | # API to handle stripping for us. |
|
217 | # API to handle stripping for us. | |
219 | oldfiles = set(tr._offsetmap.keys()) |
|
218 | oldfiles = set(tr._offsetmap.keys()) | |
220 | oldfiles.update(tr._newfiles) |
|
219 | oldfiles.update(tr._newfiles) | |
221 |
|
220 | |||
222 | tr.startgroup() |
|
221 | tr.startgroup() | |
223 | cl.strip(striprev, tr) |
|
222 | cl.strip(striprev, tr) | |
224 | stripmanifest(repo, striprev, tr, files) |
|
223 | stripmanifest(repo, striprev, tr, files) | |
225 |
|
224 | |||
226 | for fn in files: |
|
225 | for fn in files: | |
227 | repo.file(fn).strip(striprev, tr) |
|
226 | repo.file(fn).strip(striprev, tr) | |
228 | tr.endgroup() |
|
227 | tr.endgroup() | |
229 |
|
228 | |||
230 | entries = tr.readjournal() |
|
229 | entries = tr.readjournal() | |
231 |
|
230 | |||
232 | for file, troffset in entries: |
|
231 | for file, troffset in entries: | |
233 | if file in oldfiles: |
|
232 | if file in oldfiles: | |
234 | continue |
|
233 | continue | |
235 | with repo.svfs(file, b'a', checkambig=True) as fp: |
|
234 | with repo.svfs(file, b'a', checkambig=True) as fp: | |
236 | fp.truncate(troffset) |
|
235 | fp.truncate(troffset) | |
237 | if troffset == 0: |
|
236 | if troffset == 0: | |
238 | repo.store.markremoved(file) |
|
237 | repo.store.markremoved(file) | |
239 |
|
238 | |||
240 | deleteobsmarkers(repo.obsstore, stripobsidx) |
|
239 | deleteobsmarkers(repo.obsstore, stripobsidx) | |
241 | del repo.obsstore |
|
240 | del repo.obsstore | |
242 | repo.invalidatevolatilesets() |
|
241 | repo.invalidatevolatilesets() | |
243 | repo._phasecache.filterunknown(repo) |
|
242 | repo._phasecache.filterunknown(repo) | |
244 |
|
243 | |||
245 | if tmpbundlefile: |
|
244 | if tmpbundlefile: | |
246 | ui.note(_(b"adding branch\n")) |
|
245 | ui.note(_(b"adding branch\n")) | |
247 | f = vfs.open(tmpbundlefile, b"rb") |
|
246 | f = vfs.open(tmpbundlefile, b"rb") | |
248 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
|
247 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) | |
249 | # silence internal shuffling chatter |
|
248 | # silence internal shuffling chatter | |
250 | maybe_silent = ( |
|
249 | maybe_silent = ( | |
251 | repo.ui.silent() |
|
250 | repo.ui.silent() | |
252 | if not repo.ui.verbose |
|
251 | if not repo.ui.verbose | |
253 | else util.nullcontextmanager() |
|
252 | else util.nullcontextmanager() | |
254 | ) |
|
253 | ) | |
255 | with maybe_silent: |
|
254 | with maybe_silent: | |
256 | tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) |
|
255 | tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile) | |
257 | txnname = b'strip' |
|
256 | txnname = b'strip' | |
258 | if not isinstance(gen, bundle2.unbundle20): |
|
257 | if not isinstance(gen, bundle2.unbundle20): | |
259 | txnname = b"strip\n%s" % urlutil.hidepassword( |
|
258 | txnname = b"strip\n%s" % urlutil.hidepassword( | |
260 | tmpbundleurl |
|
259 | tmpbundleurl | |
261 | ) |
|
260 | ) | |
262 | with repo.transaction(txnname) as tr: |
|
261 | with repo.transaction(txnname) as tr: | |
263 | bundle2.applybundle( |
|
262 | bundle2.applybundle( | |
264 | repo, gen, tr, source=b'strip', url=tmpbundleurl |
|
263 | repo, gen, tr, source=b'strip', url=tmpbundleurl | |
265 | ) |
|
264 | ) | |
266 | f.close() |
|
265 | f.close() | |
267 |
|
266 | |||
268 | with repo.transaction(b'repair') as tr: |
|
267 | with repo.transaction(b'repair') as tr: | |
269 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
268 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] | |
270 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
269 | repo._bookmarks.applychanges(repo, tr, bmchanges) | |
271 |
|
270 | |||
272 | transaction.cleanup_undo_files(repo.ui.warn, repo.vfs_map) |
|
271 | transaction.cleanup_undo_files(repo.ui.warn, repo.vfs_map) | |
273 |
|
272 | |||
274 | except: # re-raises |
|
273 | except: # re-raises | |
275 | if backupfile: |
|
274 | if backupfile: | |
276 | ui.warn( |
|
275 | ui.warn( | |
277 | _(b"strip failed, backup bundle stored in '%s'\n") |
|
276 | _(b"strip failed, backup bundle stored in '%s'\n") | |
278 | % vfs.join(backupfile) |
|
277 | % vfs.join(backupfile) | |
279 | ) |
|
278 | ) | |
280 | if tmpbundlefile: |
|
279 | if tmpbundlefile: | |
281 | ui.warn( |
|
280 | ui.warn( | |
282 | _(b"strip failed, unrecovered changes stored in '%s'\n") |
|
281 | _(b"strip failed, unrecovered changes stored in '%s'\n") | |
283 | % vfs.join(tmpbundlefile) |
|
282 | % vfs.join(tmpbundlefile) | |
284 | ) |
|
283 | ) | |
285 | ui.warn( |
|
284 | ui.warn( | |
286 | _( |
|
285 | _( | |
287 | b"(fix the problem, then recover the changesets with " |
|
286 | b"(fix the problem, then recover the changesets with " | |
288 | b"\"hg unbundle '%s'\")\n" |
|
287 | b"\"hg unbundle '%s'\")\n" | |
289 | ) |
|
288 | ) | |
290 | % vfs.join(tmpbundlefile) |
|
289 | % vfs.join(tmpbundlefile) | |
291 | ) |
|
290 | ) | |
292 | raise |
|
291 | raise | |
293 | else: |
|
292 | else: | |
294 | if tmpbundlefile: |
|
293 | if tmpbundlefile: | |
295 | # Remove temporary bundle only if there were no exceptions |
|
294 | # Remove temporary bundle only if there were no exceptions | |
296 | vfs.unlink(tmpbundlefile) |
|
295 | vfs.unlink(tmpbundlefile) | |
297 |
|
296 | |||
298 | repo.destroyed() |
|
297 | repo.destroyed() | |
299 | # return the backup file path (or None if 'backup' was False) so |
|
298 | # return the backup file path (or None if 'backup' was False) so | |
300 | # extensions can use it |
|
299 | # extensions can use it | |
301 | return backupfile |
|
300 | return backupfile | |
302 |
|
301 | |||
303 |
|
302 | |||
304 | def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): |
|
303 | def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'): | |
305 | """perform a "soft" strip using the archived phase""" |
|
304 | """perform a "soft" strip using the archived phase""" | |
306 | tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] |
|
305 | tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)] | |
307 | if not tostrip: |
|
306 | if not tostrip: | |
308 | return None |
|
307 | return None | |
309 |
|
308 | |||
310 | backupfile = None |
|
309 | backupfile = None | |
311 | if backup: |
|
310 | if backup: | |
312 | node = tostrip[0] |
|
311 | node = tostrip[0] | |
313 | backupfile = _createstripbackup(repo, tostrip, node, topic) |
|
312 | backupfile = _createstripbackup(repo, tostrip, node, topic) | |
314 |
|
313 | |||
315 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) |
|
314 | newbmtarget, updatebm = _bookmarkmovements(repo, tostrip) | |
316 | with repo.transaction(b'strip') as tr: |
|
315 | with repo.transaction(b'strip') as tr: | |
317 | phases.retractboundary(repo, tr, phases.archived, tostrip) |
|
316 | phases.retractboundary(repo, tr, phases.archived, tostrip) | |
318 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] |
|
317 | bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] | |
319 | repo._bookmarks.applychanges(repo, tr, bmchanges) |
|
318 | repo._bookmarks.applychanges(repo, tr, bmchanges) | |
320 | return backupfile |
|
319 | return backupfile | |
321 |
|
320 | |||
322 |
|
321 | |||
323 | def _bookmarkmovements(repo, tostrip): |
|
322 | def _bookmarkmovements(repo, tostrip): | |
324 | # compute necessary bookmark movement |
|
323 | # compute necessary bookmark movement | |
325 | bm = repo._bookmarks |
|
324 | bm = repo._bookmarks | |
326 | updatebm = [] |
|
325 | updatebm = [] | |
327 | for m in bm: |
|
326 | for m in bm: | |
328 | rev = repo[bm[m]].rev() |
|
327 | rev = repo[bm[m]].rev() | |
329 | if rev in tostrip: |
|
328 | if rev in tostrip: | |
330 | updatebm.append(m) |
|
329 | updatebm.append(m) | |
331 | newbmtarget = None |
|
330 | newbmtarget = None | |
332 | # If we need to move bookmarks, compute bookmark |
|
331 | # If we need to move bookmarks, compute bookmark | |
333 | # targets. Otherwise we can skip doing this logic. |
|
332 | # targets. Otherwise we can skip doing this logic. | |
334 | if updatebm: |
|
333 | if updatebm: | |
335 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), |
|
334 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), | |
336 | # but is much faster |
|
335 | # but is much faster | |
337 | newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
336 | newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip) | |
338 | if newbmtarget: |
|
337 | if newbmtarget: | |
339 | newbmtarget = repo[newbmtarget.first()].node() |
|
338 | newbmtarget = repo[newbmtarget.first()].node() | |
340 | else: |
|
339 | else: | |
341 | newbmtarget = b'.' |
|
340 | newbmtarget = b'.' | |
342 | return newbmtarget, updatebm |
|
341 | return newbmtarget, updatebm | |
343 |
|
342 | |||
344 |
|
343 | |||
345 | def _createstripbackup(repo, stripbases, node, topic): |
|
344 | def _createstripbackup(repo, stripbases, node, topic): | |
346 | # backup the changeset we are about to strip |
|
345 | # backup the changeset we are about to strip | |
347 | vfs = repo.vfs |
|
346 | vfs = repo.vfs | |
348 | unfi = repo.unfiltered() |
|
347 | unfi = repo.unfiltered() | |
349 | to_node = unfi.changelog.node |
|
348 | to_node = unfi.changelog.node | |
350 | # internal changeset are internal implementation details that should not |
|
349 | # internal changeset are internal implementation details that should not | |
351 | # leave the repository and not be exposed to the users. In addition feature |
|
350 | # leave the repository and not be exposed to the users. In addition feature | |
352 | # using them requires to be resistant to strip. See test case for more |
|
351 | # using them requires to be resistant to strip. See test case for more | |
353 | # details. |
|
352 | # details. | |
354 | all_backup = unfi.revs( |
|
353 | all_backup = unfi.revs( | |
355 | b"(%ln)::(%ld) and not _internal()", |
|
354 | b"(%ln)::(%ld) and not _internal()", | |
356 | stripbases, |
|
355 | stripbases, | |
357 | unfi.changelog.headrevs(), |
|
356 | unfi.changelog.headrevs(), | |
358 | ) |
|
357 | ) | |
359 | if not all_backup: |
|
358 | if not all_backup: | |
360 | return None |
|
359 | return None | |
361 |
|
360 | |||
362 | def to_nodes(revs): |
|
361 | def to_nodes(revs): | |
363 | return [to_node(r) for r in revs] |
|
362 | return [to_node(r) for r in revs] | |
364 |
|
363 | |||
365 | bases = to_nodes(unfi.revs("roots(%ld)", all_backup)) |
|
364 | bases = to_nodes(unfi.revs("roots(%ld)", all_backup)) | |
366 | heads = to_nodes(unfi.revs("heads(%ld)", all_backup)) |
|
365 | heads = to_nodes(unfi.revs("heads(%ld)", all_backup)) | |
367 | backupfile = backupbundle(repo, bases, heads, node, topic) |
|
366 | backupfile = backupbundle(repo, bases, heads, node, topic) | |
368 | repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) |
|
367 | repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile)) | |
369 | repo.ui.log( |
|
368 | repo.ui.log( | |
370 | b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) |
|
369 | b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile) | |
371 | ) |
|
370 | ) | |
372 | return backupfile |
|
371 | return backupfile | |
373 |
|
372 | |||
374 |
|
373 | |||
375 | def safestriproots(ui, repo, nodes): |
|
374 | def safestriproots(ui, repo, nodes): | |
376 | """return list of roots of nodes where descendants are covered by nodes""" |
|
375 | """return list of roots of nodes where descendants are covered by nodes""" | |
377 | torev = repo.unfiltered().changelog.rev |
|
376 | torev = repo.unfiltered().changelog.rev | |
378 | revs = {torev(n) for n in nodes} |
|
377 | revs = {torev(n) for n in nodes} | |
379 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) |
|
378 | # tostrip = wanted - unsafe = wanted - ancestors(orphaned) | |
380 | # orphaned = affected - wanted |
|
379 | # orphaned = affected - wanted | |
381 | # affected = descendants(roots(wanted)) |
|
380 | # affected = descendants(roots(wanted)) | |
382 | # wanted = revs |
|
381 | # wanted = revs | |
383 | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' |
|
382 | revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )' | |
384 | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) |
|
383 | tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs)) | |
385 | notstrip = revs - tostrip |
|
384 | notstrip = revs - tostrip | |
386 | if notstrip: |
|
385 | if notstrip: | |
387 | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) |
|
386 | nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip)) | |
388 | ui.warn( |
|
387 | ui.warn( | |
389 | _(b'warning: orphaned descendants detected, not stripping %s\n') |
|
388 | _(b'warning: orphaned descendants detected, not stripping %s\n') | |
390 | % nodestr |
|
389 | % nodestr | |
391 | ) |
|
390 | ) | |
392 | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] |
|
391 | return [c.node() for c in repo.set(b'roots(%ld)', tostrip)] | |
393 |
|
392 | |||
394 |
|
393 | |||
395 | class stripcallback: |
|
394 | class stripcallback: | |
396 | """used as a transaction postclose callback""" |
|
395 | """used as a transaction postclose callback""" | |
397 |
|
396 | |||
398 | def __init__(self, ui, repo, backup, topic): |
|
397 | def __init__(self, ui, repo, backup, topic): | |
399 | self.ui = ui |
|
398 | self.ui = ui | |
400 | self.repo = repo |
|
399 | self.repo = repo | |
401 | self.backup = backup |
|
400 | self.backup = backup | |
402 | self.topic = topic or b'backup' |
|
401 | self.topic = topic or b'backup' | |
403 | self.nodelist = [] |
|
402 | self.nodelist = [] | |
404 |
|
403 | |||
405 | def addnodes(self, nodes): |
|
404 | def addnodes(self, nodes): | |
406 | self.nodelist.extend(nodes) |
|
405 | self.nodelist.extend(nodes) | |
407 |
|
406 | |||
408 | def __call__(self, tr): |
|
407 | def __call__(self, tr): | |
409 | roots = safestriproots(self.ui, self.repo, self.nodelist) |
|
408 | roots = safestriproots(self.ui, self.repo, self.nodelist) | |
410 | if roots: |
|
409 | if roots: | |
411 | strip(self.ui, self.repo, roots, self.backup, self.topic) |
|
410 | strip(self.ui, self.repo, roots, self.backup, self.topic) | |
412 |
|
411 | |||
413 |
|
412 | |||
414 | def delayedstrip(ui, repo, nodelist, topic=None, backup=True): |
|
413 | def delayedstrip(ui, repo, nodelist, topic=None, backup=True): | |
415 | """like strip, but works inside transaction and won't strip irreverent revs |
|
414 | """like strip, but works inside transaction and won't strip irreverent revs | |
416 |
|
415 | |||
417 | nodelist must explicitly contain all descendants. Otherwise a warning will |
|
416 | nodelist must explicitly contain all descendants. Otherwise a warning will | |
418 | be printed that some nodes are not stripped. |
|
417 | be printed that some nodes are not stripped. | |
419 |
|
418 | |||
420 | Will do a backup if `backup` is True. The last non-None "topic" will be |
|
419 | Will do a backup if `backup` is True. The last non-None "topic" will be | |
421 | used as the backup topic name. The default backup topic name is "backup". |
|
420 | used as the backup topic name. The default backup topic name is "backup". | |
422 | """ |
|
421 | """ | |
423 | tr = repo.currenttransaction() |
|
422 | tr = repo.currenttransaction() | |
424 | if not tr: |
|
423 | if not tr: | |
425 | nodes = safestriproots(ui, repo, nodelist) |
|
424 | nodes = safestriproots(ui, repo, nodelist) | |
426 | return strip(ui, repo, nodes, backup=backup, topic=topic) |
|
425 | return strip(ui, repo, nodes, backup=backup, topic=topic) | |
427 | # transaction postclose callbacks are called in alphabet order. |
|
426 | # transaction postclose callbacks are called in alphabet order. | |
428 | # use '\xff' as prefix so we are likely to be called last. |
|
427 | # use '\xff' as prefix so we are likely to be called last. | |
429 | callback = tr.getpostclose(b'\xffstrip') |
|
428 | callback = tr.getpostclose(b'\xffstrip') | |
430 | if callback is None: |
|
429 | if callback is None: | |
431 | callback = stripcallback(ui, repo, backup=backup, topic=topic) |
|
430 | callback = stripcallback(ui, repo, backup=backup, topic=topic) | |
432 | tr.addpostclose(b'\xffstrip', callback) |
|
431 | tr.addpostclose(b'\xffstrip', callback) | |
433 | if topic: |
|
432 | if topic: | |
434 | callback.topic = topic |
|
433 | callback.topic = topic | |
435 | callback.addnodes(nodelist) |
|
434 | callback.addnodes(nodelist) | |
436 |
|
435 | |||
437 |
|
436 | |||
438 | def stripmanifest(repo, striprev, tr, files): |
|
437 | def stripmanifest(repo, striprev, tr, files): | |
439 | for revlog in manifestrevlogs(repo): |
|
438 | for revlog in manifestrevlogs(repo): | |
440 | revlog.strip(striprev, tr) |
|
439 | revlog.strip(striprev, tr) | |
441 |
|
440 | |||
442 |
|
441 | |||
443 | def manifestrevlogs(repo): |
|
442 | def manifestrevlogs(repo): | |
444 | yield repo.manifestlog.getstorage(b'') |
|
443 | yield repo.manifestlog.getstorage(b'') | |
445 | if scmutil.istreemanifest(repo): |
|
444 | if scmutil.istreemanifest(repo): | |
446 | # This logic is safe if treemanifest isn't enabled, but also |
|
445 | # This logic is safe if treemanifest isn't enabled, but also | |
447 | # pointless, so we skip it if treemanifest isn't enabled. |
|
446 | # pointless, so we skip it if treemanifest isn't enabled. | |
448 | for entry in repo.store.data_entries(): |
|
447 | for entry in repo.store.data_entries(): | |
449 |
if n |
|
448 | if entry.is_revlog and entry.is_manifestlog: | |
450 | continue |
|
|||
451 | if entry.revlog_type == store.FILEFLAGS_MANIFESTLOG: |
|
|||
452 | yield repo.manifestlog.getstorage(entry.target_id) |
|
449 | yield repo.manifestlog.getstorage(entry.target_id) | |
453 |
|
450 | |||
454 |
|
451 | |||
455 | def rebuildfncache(ui, repo, only_data=False): |
|
452 | def rebuildfncache(ui, repo, only_data=False): | |
456 | """Rebuilds the fncache file from repo history. |
|
453 | """Rebuilds the fncache file from repo history. | |
457 |
|
454 | |||
458 | Missing entries will be added. Extra entries will be removed. |
|
455 | Missing entries will be added. Extra entries will be removed. | |
459 | """ |
|
456 | """ | |
460 | repo = repo.unfiltered() |
|
457 | repo = repo.unfiltered() | |
461 |
|
458 | |||
462 | if requirements.FNCACHE_REQUIREMENT not in repo.requirements: |
|
459 | if requirements.FNCACHE_REQUIREMENT not in repo.requirements: | |
463 | ui.warn( |
|
460 | ui.warn( | |
464 | _( |
|
461 | _( | |
465 | b'(not rebuilding fncache because repository does not ' |
|
462 | b'(not rebuilding fncache because repository does not ' | |
466 | b'support fncache)\n' |
|
463 | b'support fncache)\n' | |
467 | ) |
|
464 | ) | |
468 | ) |
|
465 | ) | |
469 | return |
|
466 | return | |
470 |
|
467 | |||
471 | with repo.lock(): |
|
468 | with repo.lock(): | |
472 | fnc = repo.store.fncache |
|
469 | fnc = repo.store.fncache | |
473 | fnc.ensureloaded(warn=ui.warn) |
|
470 | fnc.ensureloaded(warn=ui.warn) | |
474 |
|
471 | |||
475 | oldentries = set(fnc.entries) |
|
472 | oldentries = set(fnc.entries) | |
476 | newentries = set() |
|
473 | newentries = set() | |
477 | seenfiles = set() |
|
474 | seenfiles = set() | |
478 |
|
475 | |||
479 | if only_data: |
|
476 | if only_data: | |
480 | # Trust the listing of .i from the fncache, but not the .d. This is |
|
477 | # Trust the listing of .i from the fncache, but not the .d. This is | |
481 | # much faster, because we only need to stat every possible .d files, |
|
478 | # much faster, because we only need to stat every possible .d files, | |
482 | # instead of reading the full changelog |
|
479 | # instead of reading the full changelog | |
483 | for f in fnc: |
|
480 | for f in fnc: | |
484 | if f[:5] == b'data/' and f[-2:] == b'.i': |
|
481 | if f[:5] == b'data/' and f[-2:] == b'.i': | |
485 | seenfiles.add(f[5:-2]) |
|
482 | seenfiles.add(f[5:-2]) | |
486 | newentries.add(f) |
|
483 | newentries.add(f) | |
487 | dataf = f[:-2] + b'.d' |
|
484 | dataf = f[:-2] + b'.d' | |
488 | if repo.store._exists(dataf): |
|
485 | if repo.store._exists(dataf): | |
489 | newentries.add(dataf) |
|
486 | newentries.add(dataf) | |
490 | else: |
|
487 | else: | |
491 | progress = ui.makeprogress( |
|
488 | progress = ui.makeprogress( | |
492 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) |
|
489 | _(b'rebuilding'), unit=_(b'changesets'), total=len(repo) | |
493 | ) |
|
490 | ) | |
494 | for rev in repo: |
|
491 | for rev in repo: | |
495 | progress.update(rev) |
|
492 | progress.update(rev) | |
496 |
|
493 | |||
497 | ctx = repo[rev] |
|
494 | ctx = repo[rev] | |
498 | for f in ctx.files(): |
|
495 | for f in ctx.files(): | |
499 | # This is to minimize I/O. |
|
496 | # This is to minimize I/O. | |
500 | if f in seenfiles: |
|
497 | if f in seenfiles: | |
501 | continue |
|
498 | continue | |
502 | seenfiles.add(f) |
|
499 | seenfiles.add(f) | |
503 |
|
500 | |||
504 | i = b'data/%s.i' % f |
|
501 | i = b'data/%s.i' % f | |
505 | d = b'data/%s.d' % f |
|
502 | d = b'data/%s.d' % f | |
506 |
|
503 | |||
507 | if repo.store._exists(i): |
|
504 | if repo.store._exists(i): | |
508 | newentries.add(i) |
|
505 | newentries.add(i) | |
509 | if repo.store._exists(d): |
|
506 | if repo.store._exists(d): | |
510 | newentries.add(d) |
|
507 | newentries.add(d) | |
511 |
|
508 | |||
512 | progress.complete() |
|
509 | progress.complete() | |
513 |
|
510 | |||
514 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: |
|
511 | if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements: | |
515 | # This logic is safe if treemanifest isn't enabled, but also |
|
512 | # This logic is safe if treemanifest isn't enabled, but also | |
516 | # pointless, so we skip it if treemanifest isn't enabled. |
|
513 | # pointless, so we skip it if treemanifest isn't enabled. | |
517 | for dir in pathutil.dirs(seenfiles): |
|
514 | for dir in pathutil.dirs(seenfiles): | |
518 | i = b'meta/%s/00manifest.i' % dir |
|
515 | i = b'meta/%s/00manifest.i' % dir | |
519 | d = b'meta/%s/00manifest.d' % dir |
|
516 | d = b'meta/%s/00manifest.d' % dir | |
520 |
|
517 | |||
521 | if repo.store._exists(i): |
|
518 | if repo.store._exists(i): | |
522 | newentries.add(i) |
|
519 | newentries.add(i) | |
523 | if repo.store._exists(d): |
|
520 | if repo.store._exists(d): | |
524 | newentries.add(d) |
|
521 | newentries.add(d) | |
525 |
|
522 | |||
526 | addcount = len(newentries - oldentries) |
|
523 | addcount = len(newentries - oldentries) | |
527 | removecount = len(oldentries - newentries) |
|
524 | removecount = len(oldentries - newentries) | |
528 | for p in sorted(oldentries - newentries): |
|
525 | for p in sorted(oldentries - newentries): | |
529 | ui.write(_(b'removing %s\n') % p) |
|
526 | ui.write(_(b'removing %s\n') % p) | |
530 | for p in sorted(newentries - oldentries): |
|
527 | for p in sorted(newentries - oldentries): | |
531 | ui.write(_(b'adding %s\n') % p) |
|
528 | ui.write(_(b'adding %s\n') % p) | |
532 |
|
529 | |||
533 | if addcount or removecount: |
|
530 | if addcount or removecount: | |
534 | ui.write( |
|
531 | ui.write( | |
535 | _(b'%d items added, %d removed from fncache\n') |
|
532 | _(b'%d items added, %d removed from fncache\n') | |
536 | % (addcount, removecount) |
|
533 | % (addcount, removecount) | |
537 | ) |
|
534 | ) | |
538 | fnc.entries = newentries |
|
535 | fnc.entries = newentries | |
539 | fnc._dirty = True |
|
536 | fnc._dirty = True | |
540 |
|
537 | |||
541 | with repo.transaction(b'fncache') as tr: |
|
538 | with repo.transaction(b'fncache') as tr: | |
542 | fnc.write(tr) |
|
539 | fnc.write(tr) | |
543 | else: |
|
540 | else: | |
544 | ui.write(_(b'fncache already up to date\n')) |
|
541 | ui.write(_(b'fncache already up to date\n')) | |
545 |
|
542 | |||
546 |
|
543 | |||
547 | def deleteobsmarkers(obsstore, indices): |
|
544 | def deleteobsmarkers(obsstore, indices): | |
548 | """Delete some obsmarkers from obsstore and return how many were deleted |
|
545 | """Delete some obsmarkers from obsstore and return how many were deleted | |
549 |
|
546 | |||
550 | 'indices' is a list of ints which are the indices |
|
547 | 'indices' is a list of ints which are the indices | |
551 | of the markers to be deleted. |
|
548 | of the markers to be deleted. | |
552 |
|
549 | |||
553 | Every invocation of this function completely rewrites the obsstore file, |
|
550 | Every invocation of this function completely rewrites the obsstore file, | |
554 | skipping the markers we want to be removed. The new temporary file is |
|
551 | skipping the markers we want to be removed. The new temporary file is | |
555 | created, remaining markers are written there and on .close() this file |
|
552 | created, remaining markers are written there and on .close() this file | |
556 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" |
|
553 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" | |
557 | if not indices: |
|
554 | if not indices: | |
558 | # we don't want to rewrite the obsstore with the same content |
|
555 | # we don't want to rewrite the obsstore with the same content | |
559 | return |
|
556 | return | |
560 |
|
557 | |||
561 | left = [] |
|
558 | left = [] | |
562 | current = obsstore._all |
|
559 | current = obsstore._all | |
563 | n = 0 |
|
560 | n = 0 | |
564 | for i, m in enumerate(current): |
|
561 | for i, m in enumerate(current): | |
565 | if i in indices: |
|
562 | if i in indices: | |
566 | n += 1 |
|
563 | n += 1 | |
567 | continue |
|
564 | continue | |
568 | left.append(m) |
|
565 | left.append(m) | |
569 |
|
566 | |||
570 | newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True) |
|
567 | newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True) | |
571 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): |
|
568 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): | |
572 | newobsstorefile.write(bytes) |
|
569 | newobsstorefile.write(bytes) | |
573 | newobsstorefile.close() |
|
570 | newobsstorefile.close() | |
574 | return n |
|
571 | return n |
General Comments 0
You need to be logged in to leave comments.
Login now