##// END OF EJS Templates
strip: do now include internal changeset in the strip backup...
marmoute -
r51211:f24c2e42 default
parent child Browse files
Show More
@@ -0,0 +1,236 b''
1 =====================================================
2 test behavior of the `internal` phase around bundling
3 =====================================================
4
5 Long story short, internal changeset are internal implementation details and
6 they should never leave the repository. Hence, they should never be in a
7 bundle.
8
9 Setup
10 =====
11
12 $ cat << EOF >> $HGRCPATH
13 > [ui]
14 > logtemplate="{node|short} [{phase}] {desc|firstline}"
15 > EOF
16
17
18 $ hg init reference-repo --config format.use-internal-phase=yes
19 $ cd reference-repo
20 $ echo a > a
21 $ hg add a
22 $ hg commit -m "a"
23 $ echo b > b
24 $ hg add b
25 $ hg commit -m "b"
26 $ echo b > c
27 $ hg add c
28 $ hg commit -m "c"
29 $ hg log -G
30 @ 07f0cc02c068 [draft] c
31 |
32 o d2ae7f538514 [draft] b
33 |
34 o cb9a9f314b8b [draft] a
35
36 $ hg up ".^"
37 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
38
39 do a shelve
40
41 $ touch a_file.txt
42 $ hg shelve -A
43 adding a_file.txt
44 shelved as default
45 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
46 $ hg log -G --hidden
47 o 2ec3cf310d86 [internal] changes to: b
48 |
49 | o 07f0cc02c068 [draft] c
50 |/
51 @ d2ae7f538514 [draft] b
52 |
53 o cb9a9f314b8b [draft] a
54
55 $ shelved_node=`hg log --rev tip --hidden -T '{node|short}'`
56
57 add more changeset above it
58
59 $ hg up 'desc(a)'
60 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 $ echo d > d
62 $ hg add d
63 $ hg commit -m "d"
64 created new head
65 $ echo d > e
66 $ hg add e
67 $ hg commit -m "e"
68 $ hg up null
69 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
70 $ hg log -G
71 o 636bc07920e3 [draft] e
72 |
73 o 980f7dc84c29 [draft] d
74 |
75 | o 07f0cc02c068 [draft] c
76 | |
77 | o d2ae7f538514 [draft] b
78 |/
79 o cb9a9f314b8b [draft] a
80
81 $ hg log -G --hidden
82 o 636bc07920e3 [draft] e
83 |
84 o 980f7dc84c29 [draft] d
85 |
86 | o 2ec3cf310d86 [internal] changes to: b
87 | |
88 | | o 07f0cc02c068 [draft] c
89 | |/
90 | o d2ae7f538514 [draft] b
91 |/
92 o cb9a9f314b8b [draft] a
93
94 $ cd ..
95
96 backup bundle from strip
97 ========================
98
99 strip an ancestors of the internal changeset
100 --------------------------------------------
101
102 $ cp -ar reference-repo strip-ancestor
103 $ cd strip-ancestor
104
105 The internal change is stripped, yet it should be skipped from the backup bundle.
106
107 $ hg log -G
108 o 636bc07920e3 [draft] e
109 |
110 o 980f7dc84c29 [draft] d
111 |
112 | o 07f0cc02c068 [draft] c
113 | |
114 | o d2ae7f538514 [draft] b
115 |/
116 o cb9a9f314b8b [draft] a
117
118 $ hg debugstrip 'desc(b)'
119 saved backup bundle to $TESTTMP/strip-ancestor/.hg/strip-backup/d2ae7f538514-59bd8bc3-backup.hg
120
121 The change should be either gone or hidden
122
123 $ hg log -G
124 o 636bc07920e3 [draft] e
125 |
126 o 980f7dc84c29 [draft] d
127 |
128 o cb9a9f314b8b [draft] a
129
130
131 The backup should not include it (as people tend to manipulate these directly)
132
133 $ ls -1 .hg/strip-backup/
134 d2ae7f538514-59bd8bc3-backup.hg
135 $ hg debugbundle .hg/strip-backup/*.hg
136 Stream params: {Compression: BZ}
137 changegroup -- {nbchanges: 2, version: 03} (mandatory: True)
138 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
139 07f0cc02c06869c81ebf33867edef30554020c0d
140 cache:rev-branch-cache -- {} (mandatory: False)
141 phase-heads -- {} (mandatory: True)
142 07f0cc02c06869c81ebf33867edef30554020c0d draft
143
144 Shelve should still work
145
146 $ hg unshelve
147 unshelving change 'default'
148 rebasing shelved changes
149 $ hg status
150 A a_file.txt
151
152 $ cd ..
153
154 strip an unrelated changeset with a lower revnum
155 ------------------------------------------------
156
157 $ cp -ar reference-repo strip-unrelated
158 $ cd strip-unrelated
159
160 The internal change is not directly stripped, but it is affected by the strip
161 and it is in the "temporary backup" zone. The zone that needs to be put in a
162 temporary bundle while we affect data under it.
163
164 $ hg debugstrip 'desc(c)'
165 saved backup bundle to $TESTTMP/strip-unrelated/.hg/strip-backup/07f0cc02c068-8fd0515f-backup.hg
166
167 The change should be either gone or hidden
168
169 $ hg log -G
170 o 636bc07920e3 [draft] e
171 |
172 o 980f7dc84c29 [draft] d
173 |
174 | o d2ae7f538514 [draft] b
175 |/
176 o cb9a9f314b8b [draft] a
177
178 The backup should not include it (as people tend to manipulate these directly)
179
180 $ ls -1 .hg/strip-backup/
181 07f0cc02c068-8fd0515f-backup.hg
182 $ hg debugbundle .hg/strip-backup/*.hg
183 Stream params: {Compression: BZ}
184 changegroup -- {nbchanges: 1, version: 03} (mandatory: True)
185 07f0cc02c06869c81ebf33867edef30554020c0d
186 cache:rev-branch-cache -- {} (mandatory: False)
187 phase-heads -- {} (mandatory: True)
188 07f0cc02c06869c81ebf33867edef30554020c0d draft
189
190 Shelve should still work
191
192 $ hg unshelve
193 unshelving change 'default'
194 rebasing shelved changes
195 $ hg status
196 A a_file.txt
197
198 $ cd ..
199
200 explicitly strip the internal changeset
201 ---------------------------------------
202
203 $ cp -ar reference-repo strip-explicit
204 $ cd strip-explicit
205
206 The internal change is directly selected for stripping.
207
208 $ hg debugstrip --hidden $shelved_node
209
210 The change should be gone
211
212 $ hg log -G --hidden
213 o 636bc07920e3 [draft] e
214 |
215 o 980f7dc84c29 [draft] d
216 |
217 | o 07f0cc02c068 [draft] c
218 | |
219 | o d2ae7f538514 [draft] b
220 |/
221 o cb9a9f314b8b [draft] a
222
223
224 We don't need to backup anything
225
226 $ ls -1 .hg/strip-backup/
227
228 Shelve should still work
229
230 $ hg unshelve
231 unshelving change 'default'
232 rebasing shelved changes
233 $ hg status
234 A a_file.txt
235
236 $ cd ..
@@ -1,573 +1,579 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Olivia Mackall
4 # Copyright 2007 Olivia Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 short,
15 short,
16 )
16 )
17 from . import (
17 from . import (
18 bundle2,
18 bundle2,
19 changegroup,
19 changegroup,
20 discovery,
20 discovery,
21 error,
21 error,
22 exchange,
22 exchange,
23 obsolete,
23 obsolete,
24 obsutil,
24 obsutil,
25 pathutil,
25 pathutil,
26 phases,
26 phases,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 hashutil,
32 hashutil,
33 stringutil,
33 stringutil,
34 urlutil,
34 urlutil,
35 )
35 )
36
36
37
37
38 def backupbundle(
38 def backupbundle(
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
39 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 ):
40 ):
41 """create a bundle with the specified revisions as a backup"""
41 """create a bundle with the specified revisions as a backup"""
42
42
43 backupdir = b"strip-backup"
43 backupdir = b"strip-backup"
44 vfs = repo.vfs
44 vfs = repo.vfs
45 if not vfs.isdir(backupdir):
45 if not vfs.isdir(backupdir):
46 vfs.mkdir(backupdir)
46 vfs.mkdir(backupdir)
47
47
48 # Include a hash of all the nodes in the filename for uniqueness
48 # Include a hash of all the nodes in the filename for uniqueness
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
49 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allhashes = sorted(c.hex() for c in allcommits)
50 allhashes = sorted(c.hex() for c in allcommits)
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
51 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 name = b"%s/%s-%s-%s.hg" % (
52 name = b"%s/%s-%s-%s.hg" % (
53 backupdir,
53 backupdir,
54 short(node),
54 short(node),
55 hex(totalhash[:4]),
55 hex(totalhash[:4]),
56 suffix,
56 suffix,
57 )
57 )
58
58
59 cgversion = changegroup.localversion(repo)
59 cgversion = changegroup.localversion(repo)
60 comp = None
60 comp = None
61 if cgversion != b'01':
61 if cgversion != b'01':
62 bundletype = b"HG20"
62 bundletype = b"HG20"
63 if compress:
63 if compress:
64 comp = b'BZ'
64 comp = b'BZ'
65 elif compress:
65 elif compress:
66 bundletype = b"HG10BZ"
66 bundletype = b"HG10BZ"
67 else:
67 else:
68 bundletype = b"HG10UN"
68 bundletype = b"HG10UN"
69
69
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
70 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 contentopts = {
71 contentopts = {
72 b'cg.version': cgversion,
72 b'cg.version': cgversion,
73 b'obsolescence': obsolescence,
73 b'obsolescence': obsolescence,
74 b'phases': True,
74 b'phases': True,
75 }
75 }
76 return bundle2.writenewbundle(
76 return bundle2.writenewbundle(
77 repo.ui,
77 repo.ui,
78 repo,
78 repo,
79 b'strip',
79 b'strip',
80 name,
80 name,
81 bundletype,
81 bundletype,
82 outgoing,
82 outgoing,
83 contentopts,
83 contentopts,
84 vfs,
84 vfs,
85 compression=comp,
85 compression=comp,
86 )
86 )
87
87
88
88
89 def _collectfiles(repo, striprev):
89 def _collectfiles(repo, striprev):
90 """find out the filelogs affected by the strip"""
90 """find out the filelogs affected by the strip"""
91 files = set()
91 files = set()
92
92
93 for x in range(striprev, len(repo)):
93 for x in range(striprev, len(repo)):
94 files.update(repo[x].files())
94 files.update(repo[x].files())
95
95
96 return sorted(files)
96 return sorted(files)
97
97
98
98
99 def _collectrevlog(revlog, striprev):
99 def _collectrevlog(revlog, striprev):
100 _, brokenset = revlog.getstrippoint(striprev)
100 _, brokenset = revlog.getstrippoint(striprev)
101 return [revlog.linkrev(r) for r in brokenset]
101 return [revlog.linkrev(r) for r in brokenset]
102
102
103
103
104 def _collectbrokencsets(repo, files, striprev):
104 def _collectbrokencsets(repo, files, striprev):
105 """return the changesets which will be broken by the truncation"""
105 """return the changesets which will be broken by the truncation"""
106 s = set()
106 s = set()
107
107
108 for revlog in manifestrevlogs(repo):
108 for revlog in manifestrevlogs(repo):
109 s.update(_collectrevlog(revlog, striprev))
109 s.update(_collectrevlog(revlog, striprev))
110 for fname in files:
110 for fname in files:
111 s.update(_collectrevlog(repo.file(fname), striprev))
111 s.update(_collectrevlog(repo.file(fname), striprev))
112
112
113 return s
113 return s
114
114
115
115
116 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
116 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 # This function requires the caller to lock the repo, but it operates
117 # This function requires the caller to lock the repo, but it operates
118 # within a transaction of its own, and thus requires there to be no current
118 # within a transaction of its own, and thus requires there to be no current
119 # transaction when it is called.
119 # transaction when it is called.
120 if repo.currenttransaction() is not None:
120 if repo.currenttransaction() is not None:
121 raise error.ProgrammingError(b'cannot strip from inside a transaction')
121 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122
122
123 # Simple way to maintain backwards compatibility for this
123 # Simple way to maintain backwards compatibility for this
124 # argument.
124 # argument.
125 if backup in [b'none', b'strip']:
125 if backup in [b'none', b'strip']:
126 backup = False
126 backup = False
127
127
128 repo = repo.unfiltered()
128 repo = repo.unfiltered()
129 repo.destroying()
129 repo.destroying()
130 vfs = repo.vfs
130 vfs = repo.vfs
131 # load bookmark before changelog to avoid side effect from outdated
131 # load bookmark before changelog to avoid side effect from outdated
132 # changelog (see repo._refreshchangelog)
132 # changelog (see repo._refreshchangelog)
133 repo._bookmarks
133 repo._bookmarks
134 cl = repo.changelog
134 cl = repo.changelog
135
135
136 # TODO handle undo of merge sets
136 # TODO handle undo of merge sets
137 if isinstance(nodelist, bytes):
137 if isinstance(nodelist, bytes):
138 nodelist = [nodelist]
138 nodelist = [nodelist]
139 striplist = [cl.rev(node) for node in nodelist]
139 striplist = [cl.rev(node) for node in nodelist]
140 striprev = min(striplist)
140 striprev = min(striplist)
141
141
142 files = _collectfiles(repo, striprev)
142 files = _collectfiles(repo, striprev)
143 saverevs = _collectbrokencsets(repo, files, striprev)
143 saverevs = _collectbrokencsets(repo, files, striprev)
144
144
145 # Some revisions with rev > striprev may not be descendants of striprev.
145 # Some revisions with rev > striprev may not be descendants of striprev.
146 # We have to find these revisions and put them in a bundle, so that
146 # We have to find these revisions and put them in a bundle, so that
147 # we can restore them after the truncations.
147 # we can restore them after the truncations.
148 # To create the bundle we use repo.changegroupsubset which requires
148 # To create the bundle we use repo.changegroupsubset which requires
149 # the list of heads and bases of the set of interesting revisions.
149 # the list of heads and bases of the set of interesting revisions.
150 # (head = revision in the set that has no descendant in the set;
150 # (head = revision in the set that has no descendant in the set;
151 # base = revision in the set that has no ancestor in the set)
151 # base = revision in the set that has no ancestor in the set)
152 tostrip = set(striplist)
152 tostrip = set(striplist)
153 saveheads = set(saverevs)
153 saveheads = set(saverevs)
154 for r in cl.revs(start=striprev + 1):
154 for r in cl.revs(start=striprev + 1):
155 if any(p in tostrip for p in cl.parentrevs(r)):
155 if any(p in tostrip for p in cl.parentrevs(r)):
156 tostrip.add(r)
156 tostrip.add(r)
157
157
158 if r not in tostrip:
158 if r not in tostrip:
159 saverevs.add(r)
159 saverevs.add(r)
160 saveheads.difference_update(cl.parentrevs(r))
160 saveheads.difference_update(cl.parentrevs(r))
161 saveheads.add(r)
161 saveheads.add(r)
162 saveheads = [cl.node(r) for r in saveheads]
162 saveheads = [cl.node(r) for r in saveheads]
163
163
164 # compute base nodes
164 # compute base nodes
165 if saverevs:
165 if saverevs:
166 descendants = set(cl.descendants(saverevs))
166 descendants = set(cl.descendants(saverevs))
167 saverevs.difference_update(descendants)
167 saverevs.difference_update(descendants)
168 savebases = [cl.node(r) for r in saverevs]
168 savebases = [cl.node(r) for r in saverevs]
169 stripbases = [cl.node(r) for r in tostrip]
169 stripbases = [cl.node(r) for r in tostrip]
170
170
171 stripobsidx = obsmarkers = ()
171 stripobsidx = obsmarkers = ()
172 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
172 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
173 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 if obsmarkers:
174 if obsmarkers:
175 stripobsidx = [
175 stripobsidx = [
176 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
176 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 ]
177 ]
178
178
179 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
179 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180
180
181 backupfile = None
181 backupfile = None
182 node = nodelist[-1]
182 node = nodelist[-1]
183 if backup:
183 if backup:
184 backupfile = _createstripbackup(repo, stripbases, node, topic)
184 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 # create a changegroup for all the branches we need to keep
185 # create a changegroup for all the branches we need to keep
186 tmpbundlefile = None
186 tmpbundlefile = None
187 if saveheads:
187 if saveheads:
188 # do not compress temporary bundle if we remove it from disk later
188 # do not compress temporary bundle if we remove it from disk later
189 #
189 #
190 # We do not include obsolescence, it might re-introduce prune markers
190 # We do not include obsolescence, it might re-introduce prune markers
191 # we are trying to strip. This is harmless since the stripped markers
191 # we are trying to strip. This is harmless since the stripped markers
192 # are already backed up and we did not touched the markers for the
192 # are already backed up and we did not touched the markers for the
193 # saved changesets.
193 # saved changesets.
194 tmpbundlefile = backupbundle(
194 tmpbundlefile = backupbundle(
195 repo,
195 repo,
196 savebases,
196 savebases,
197 saveheads,
197 saveheads,
198 node,
198 node,
199 b'temp',
199 b'temp',
200 compress=False,
200 compress=False,
201 obsolescence=False,
201 obsolescence=False,
202 )
202 )
203
203
204 with ui.uninterruptible():
204 with ui.uninterruptible():
205 try:
205 try:
206 with repo.transaction(b"strip") as tr:
206 with repo.transaction(b"strip") as tr:
207 # TODO this code violates the interface abstraction of the
207 # TODO this code violates the interface abstraction of the
208 # transaction and makes assumptions that file storage is
208 # transaction and makes assumptions that file storage is
209 # using append-only files. We'll need some kind of storage
209 # using append-only files. We'll need some kind of storage
210 # API to handle stripping for us.
210 # API to handle stripping for us.
211 oldfiles = set(tr._offsetmap.keys())
211 oldfiles = set(tr._offsetmap.keys())
212 oldfiles.update(tr._newfiles)
212 oldfiles.update(tr._newfiles)
213
213
214 tr.startgroup()
214 tr.startgroup()
215 cl.strip(striprev, tr)
215 cl.strip(striprev, tr)
216 stripmanifest(repo, striprev, tr, files)
216 stripmanifest(repo, striprev, tr, files)
217
217
218 for fn in files:
218 for fn in files:
219 repo.file(fn).strip(striprev, tr)
219 repo.file(fn).strip(striprev, tr)
220 tr.endgroup()
220 tr.endgroup()
221
221
222 entries = tr.readjournal()
222 entries = tr.readjournal()
223
223
224 for file, troffset in entries:
224 for file, troffset in entries:
225 if file in oldfiles:
225 if file in oldfiles:
226 continue
226 continue
227 with repo.svfs(file, b'a', checkambig=True) as fp:
227 with repo.svfs(file, b'a', checkambig=True) as fp:
228 fp.truncate(troffset)
228 fp.truncate(troffset)
229 if troffset == 0:
229 if troffset == 0:
230 repo.store.markremoved(file)
230 repo.store.markremoved(file)
231
231
232 deleteobsmarkers(repo.obsstore, stripobsidx)
232 deleteobsmarkers(repo.obsstore, stripobsidx)
233 del repo.obsstore
233 del repo.obsstore
234 repo.invalidatevolatilesets()
234 repo.invalidatevolatilesets()
235 repo._phasecache.filterunknown(repo)
235 repo._phasecache.filterunknown(repo)
236
236
237 if tmpbundlefile:
237 if tmpbundlefile:
238 ui.note(_(b"adding branch\n"))
238 ui.note(_(b"adding branch\n"))
239 f = vfs.open(tmpbundlefile, b"rb")
239 f = vfs.open(tmpbundlefile, b"rb")
240 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
240 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 # silence internal shuffling chatter
241 # silence internal shuffling chatter
242 maybe_silent = (
242 maybe_silent = (
243 repo.ui.silent()
243 repo.ui.silent()
244 if not repo.ui.verbose
244 if not repo.ui.verbose
245 else util.nullcontextmanager()
245 else util.nullcontextmanager()
246 )
246 )
247 with maybe_silent:
247 with maybe_silent:
248 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
248 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
249 txnname = b'strip'
249 txnname = b'strip'
250 if not isinstance(gen, bundle2.unbundle20):
250 if not isinstance(gen, bundle2.unbundle20):
251 txnname = b"strip\n%s" % urlutil.hidepassword(
251 txnname = b"strip\n%s" % urlutil.hidepassword(
252 tmpbundleurl
252 tmpbundleurl
253 )
253 )
254 with repo.transaction(txnname) as tr:
254 with repo.transaction(txnname) as tr:
255 bundle2.applybundle(
255 bundle2.applybundle(
256 repo, gen, tr, source=b'strip', url=tmpbundleurl
256 repo, gen, tr, source=b'strip', url=tmpbundleurl
257 )
257 )
258 f.close()
258 f.close()
259
259
260 with repo.transaction(b'repair') as tr:
260 with repo.transaction(b'repair') as tr:
261 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
261 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
262 repo._bookmarks.applychanges(repo, tr, bmchanges)
262 repo._bookmarks.applychanges(repo, tr, bmchanges)
263
263
264 # remove undo files
264 # remove undo files
265 for undovfs, undofile in repo.undofiles():
265 for undovfs, undofile in repo.undofiles():
266 try:
266 try:
267 undovfs.unlink(undofile)
267 undovfs.unlink(undofile)
268 except OSError as e:
268 except OSError as e:
269 if e.errno != errno.ENOENT:
269 if e.errno != errno.ENOENT:
270 ui.warn(
270 ui.warn(
271 _(b'error removing %s: %s\n')
271 _(b'error removing %s: %s\n')
272 % (
272 % (
273 undovfs.join(undofile),
273 undovfs.join(undofile),
274 stringutil.forcebytestr(e),
274 stringutil.forcebytestr(e),
275 )
275 )
276 )
276 )
277
277
278 except: # re-raises
278 except: # re-raises
279 if backupfile:
279 if backupfile:
280 ui.warn(
280 ui.warn(
281 _(b"strip failed, backup bundle stored in '%s'\n")
281 _(b"strip failed, backup bundle stored in '%s'\n")
282 % vfs.join(backupfile)
282 % vfs.join(backupfile)
283 )
283 )
284 if tmpbundlefile:
284 if tmpbundlefile:
285 ui.warn(
285 ui.warn(
286 _(b"strip failed, unrecovered changes stored in '%s'\n")
286 _(b"strip failed, unrecovered changes stored in '%s'\n")
287 % vfs.join(tmpbundlefile)
287 % vfs.join(tmpbundlefile)
288 )
288 )
289 ui.warn(
289 ui.warn(
290 _(
290 _(
291 b"(fix the problem, then recover the changesets with "
291 b"(fix the problem, then recover the changesets with "
292 b"\"hg unbundle '%s'\")\n"
292 b"\"hg unbundle '%s'\")\n"
293 )
293 )
294 % vfs.join(tmpbundlefile)
294 % vfs.join(tmpbundlefile)
295 )
295 )
296 raise
296 raise
297 else:
297 else:
298 if tmpbundlefile:
298 if tmpbundlefile:
299 # Remove temporary bundle only if there were no exceptions
299 # Remove temporary bundle only if there were no exceptions
300 vfs.unlink(tmpbundlefile)
300 vfs.unlink(tmpbundlefile)
301
301
302 repo.destroyed()
302 repo.destroyed()
303 # return the backup file path (or None if 'backup' was False) so
303 # return the backup file path (or None if 'backup' was False) so
304 # extensions can use it
304 # extensions can use it
305 return backupfile
305 return backupfile
306
306
307
307
308 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
308 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
309 """perform a "soft" strip using the archived phase"""
309 """perform a "soft" strip using the archived phase"""
310 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
310 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
311 if not tostrip:
311 if not tostrip:
312 return None
312 return None
313
313
314 backupfile = None
314 backupfile = None
315 if backup:
315 if backup:
316 node = tostrip[0]
316 node = tostrip[0]
317 backupfile = _createstripbackup(repo, tostrip, node, topic)
317 backupfile = _createstripbackup(repo, tostrip, node, topic)
318
318
319 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
319 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
320 with repo.transaction(b'strip') as tr:
320 with repo.transaction(b'strip') as tr:
321 phases.retractboundary(repo, tr, phases.archived, tostrip)
321 phases.retractboundary(repo, tr, phases.archived, tostrip)
322 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
322 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
323 repo._bookmarks.applychanges(repo, tr, bmchanges)
323 repo._bookmarks.applychanges(repo, tr, bmchanges)
324 return backupfile
324 return backupfile
325
325
326
326
327 def _bookmarkmovements(repo, tostrip):
327 def _bookmarkmovements(repo, tostrip):
328 # compute necessary bookmark movement
328 # compute necessary bookmark movement
329 bm = repo._bookmarks
329 bm = repo._bookmarks
330 updatebm = []
330 updatebm = []
331 for m in bm:
331 for m in bm:
332 rev = repo[bm[m]].rev()
332 rev = repo[bm[m]].rev()
333 if rev in tostrip:
333 if rev in tostrip:
334 updatebm.append(m)
334 updatebm.append(m)
335 newbmtarget = None
335 newbmtarget = None
336 # If we need to move bookmarks, compute bookmark
336 # If we need to move bookmarks, compute bookmark
337 # targets. Otherwise we can skip doing this logic.
337 # targets. Otherwise we can skip doing this logic.
338 if updatebm:
338 if updatebm:
339 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
339 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
340 # but is much faster
340 # but is much faster
341 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
341 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
342 if newbmtarget:
342 if newbmtarget:
343 newbmtarget = repo[newbmtarget.first()].node()
343 newbmtarget = repo[newbmtarget.first()].node()
344 else:
344 else:
345 newbmtarget = b'.'
345 newbmtarget = b'.'
346 return newbmtarget, updatebm
346 return newbmtarget, updatebm
347
347
348
348
349 def _createstripbackup(repo, stripbases, node, topic):
349 def _createstripbackup(repo, stripbases, node, topic):
350 # backup the changeset we are about to strip
350 # backup the changeset we are about to strip
351 vfs = repo.vfs
351 vfs = repo.vfs
352 unfi = repo.unfiltered()
352 unfi = repo.unfiltered()
353 to_node = unfi.changelog.node
353 to_node = unfi.changelog.node
354 # internal changeset are internal implementation details that should not
355 # leave the repository and not be exposed to the users. In addition feature
356 # using them requires to be resistant to strip. See test case for more
357 # details.
354 all_backup = unfi.revs(
358 all_backup = unfi.revs(
355 b"(%ln)::(%ld)", stripbases, unfi.changelog.headrevs()
359 b"(%ln)::(%ld) and not _internal()",
360 stripbases,
361 unfi.changelog.headrevs(),
356 )
362 )
357 if not all_backup:
363 if not all_backup:
358 return None
364 return None
359
365
360 def to_nodes(revs):
366 def to_nodes(revs):
361 return [to_node(r) for r in revs]
367 return [to_node(r) for r in revs]
362
368
363 bases = to_nodes(unfi.revs("roots(%ld)", all_backup))
369 bases = to_nodes(unfi.revs("roots(%ld)", all_backup))
364 heads = to_nodes(unfi.revs("heads(%ld)", all_backup))
370 heads = to_nodes(unfi.revs("heads(%ld)", all_backup))
365 backupfile = backupbundle(repo, bases, heads, node, topic)
371 backupfile = backupbundle(repo, bases, heads, node, topic)
366 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
372 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
367 repo.ui.log(
373 repo.ui.log(
368 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
374 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
369 )
375 )
370 return backupfile
376 return backupfile
371
377
372
378
373 def safestriproots(ui, repo, nodes):
379 def safestriproots(ui, repo, nodes):
374 """return list of roots of nodes where descendants are covered by nodes"""
380 """return list of roots of nodes where descendants are covered by nodes"""
375 torev = repo.unfiltered().changelog.rev
381 torev = repo.unfiltered().changelog.rev
376 revs = {torev(n) for n in nodes}
382 revs = {torev(n) for n in nodes}
377 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
383 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
378 # orphaned = affected - wanted
384 # orphaned = affected - wanted
379 # affected = descendants(roots(wanted))
385 # affected = descendants(roots(wanted))
380 # wanted = revs
386 # wanted = revs
381 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
387 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
382 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
388 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
383 notstrip = revs - tostrip
389 notstrip = revs - tostrip
384 if notstrip:
390 if notstrip:
385 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
391 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
386 ui.warn(
392 ui.warn(
387 _(b'warning: orphaned descendants detected, not stripping %s\n')
393 _(b'warning: orphaned descendants detected, not stripping %s\n')
388 % nodestr
394 % nodestr
389 )
395 )
390 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
396 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
391
397
392
398
393 class stripcallback:
399 class stripcallback:
394 """used as a transaction postclose callback"""
400 """used as a transaction postclose callback"""
395
401
396 def __init__(self, ui, repo, backup, topic):
402 def __init__(self, ui, repo, backup, topic):
397 self.ui = ui
403 self.ui = ui
398 self.repo = repo
404 self.repo = repo
399 self.backup = backup
405 self.backup = backup
400 self.topic = topic or b'backup'
406 self.topic = topic or b'backup'
401 self.nodelist = []
407 self.nodelist = []
402
408
403 def addnodes(self, nodes):
409 def addnodes(self, nodes):
404 self.nodelist.extend(nodes)
410 self.nodelist.extend(nodes)
405
411
406 def __call__(self, tr):
412 def __call__(self, tr):
407 roots = safestriproots(self.ui, self.repo, self.nodelist)
413 roots = safestriproots(self.ui, self.repo, self.nodelist)
408 if roots:
414 if roots:
409 strip(self.ui, self.repo, roots, self.backup, self.topic)
415 strip(self.ui, self.repo, roots, self.backup, self.topic)
410
416
411
417
412 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
418 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
413 """like strip, but works inside transaction and won't strip irreverent revs
419 """like strip, but works inside transaction and won't strip irreverent revs
414
420
415 nodelist must explicitly contain all descendants. Otherwise a warning will
421 nodelist must explicitly contain all descendants. Otherwise a warning will
416 be printed that some nodes are not stripped.
422 be printed that some nodes are not stripped.
417
423
418 Will do a backup if `backup` is True. The last non-None "topic" will be
424 Will do a backup if `backup` is True. The last non-None "topic" will be
419 used as the backup topic name. The default backup topic name is "backup".
425 used as the backup topic name. The default backup topic name is "backup".
420 """
426 """
421 tr = repo.currenttransaction()
427 tr = repo.currenttransaction()
422 if not tr:
428 if not tr:
423 nodes = safestriproots(ui, repo, nodelist)
429 nodes = safestriproots(ui, repo, nodelist)
424 return strip(ui, repo, nodes, backup=backup, topic=topic)
430 return strip(ui, repo, nodes, backup=backup, topic=topic)
425 # transaction postclose callbacks are called in alphabet order.
431 # transaction postclose callbacks are called in alphabet order.
426 # use '\xff' as prefix so we are likely to be called last.
432 # use '\xff' as prefix so we are likely to be called last.
427 callback = tr.getpostclose(b'\xffstrip')
433 callback = tr.getpostclose(b'\xffstrip')
428 if callback is None:
434 if callback is None:
429 callback = stripcallback(ui, repo, backup=backup, topic=topic)
435 callback = stripcallback(ui, repo, backup=backup, topic=topic)
430 tr.addpostclose(b'\xffstrip', callback)
436 tr.addpostclose(b'\xffstrip', callback)
431 if topic:
437 if topic:
432 callback.topic = topic
438 callback.topic = topic
433 callback.addnodes(nodelist)
439 callback.addnodes(nodelist)
434
440
435
441
436 def stripmanifest(repo, striprev, tr, files):
442 def stripmanifest(repo, striprev, tr, files):
437 for revlog in manifestrevlogs(repo):
443 for revlog in manifestrevlogs(repo):
438 revlog.strip(striprev, tr)
444 revlog.strip(striprev, tr)
439
445
440
446
441 def manifestrevlogs(repo):
447 def manifestrevlogs(repo):
442 yield repo.manifestlog.getstorage(b'')
448 yield repo.manifestlog.getstorage(b'')
443 if scmutil.istreemanifest(repo):
449 if scmutil.istreemanifest(repo):
444 # This logic is safe if treemanifest isn't enabled, but also
450 # This logic is safe if treemanifest isn't enabled, but also
445 # pointless, so we skip it if treemanifest isn't enabled.
451 # pointless, so we skip it if treemanifest isn't enabled.
446 for t, unencoded, size in repo.store.datafiles():
452 for t, unencoded, size in repo.store.datafiles():
447 if unencoded.startswith(b'meta/') and unencoded.endswith(
453 if unencoded.startswith(b'meta/') and unencoded.endswith(
448 b'00manifest.i'
454 b'00manifest.i'
449 ):
455 ):
450 dir = unencoded[5:-12]
456 dir = unencoded[5:-12]
451 yield repo.manifestlog.getstorage(dir)
457 yield repo.manifestlog.getstorage(dir)
452
458
453
459
454 def rebuildfncache(ui, repo, only_data=False):
460 def rebuildfncache(ui, repo, only_data=False):
455 """Rebuilds the fncache file from repo history.
461 """Rebuilds the fncache file from repo history.
456
462
457 Missing entries will be added. Extra entries will be removed.
463 Missing entries will be added. Extra entries will be removed.
458 """
464 """
459 repo = repo.unfiltered()
465 repo = repo.unfiltered()
460
466
461 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
467 if requirements.FNCACHE_REQUIREMENT not in repo.requirements:
462 ui.warn(
468 ui.warn(
463 _(
469 _(
464 b'(not rebuilding fncache because repository does not '
470 b'(not rebuilding fncache because repository does not '
465 b'support fncache)\n'
471 b'support fncache)\n'
466 )
472 )
467 )
473 )
468 return
474 return
469
475
470 with repo.lock():
476 with repo.lock():
471 fnc = repo.store.fncache
477 fnc = repo.store.fncache
472 fnc.ensureloaded(warn=ui.warn)
478 fnc.ensureloaded(warn=ui.warn)
473
479
474 oldentries = set(fnc.entries)
480 oldentries = set(fnc.entries)
475 newentries = set()
481 newentries = set()
476 seenfiles = set()
482 seenfiles = set()
477
483
478 if only_data:
484 if only_data:
479 # Trust the listing of .i from the fncache, but not the .d. This is
485 # Trust the listing of .i from the fncache, but not the .d. This is
480 # much faster, because we only need to stat every possible .d files,
486 # much faster, because we only need to stat every possible .d files,
481 # instead of reading the full changelog
487 # instead of reading the full changelog
482 for f in fnc:
488 for f in fnc:
483 if f[:5] == b'data/' and f[-2:] == b'.i':
489 if f[:5] == b'data/' and f[-2:] == b'.i':
484 seenfiles.add(f[5:-2])
490 seenfiles.add(f[5:-2])
485 newentries.add(f)
491 newentries.add(f)
486 dataf = f[:-2] + b'.d'
492 dataf = f[:-2] + b'.d'
487 if repo.store._exists(dataf):
493 if repo.store._exists(dataf):
488 newentries.add(dataf)
494 newentries.add(dataf)
489 else:
495 else:
490 progress = ui.makeprogress(
496 progress = ui.makeprogress(
491 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
497 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
492 )
498 )
493 for rev in repo:
499 for rev in repo:
494 progress.update(rev)
500 progress.update(rev)
495
501
496 ctx = repo[rev]
502 ctx = repo[rev]
497 for f in ctx.files():
503 for f in ctx.files():
498 # This is to minimize I/O.
504 # This is to minimize I/O.
499 if f in seenfiles:
505 if f in seenfiles:
500 continue
506 continue
501 seenfiles.add(f)
507 seenfiles.add(f)
502
508
503 i = b'data/%s.i' % f
509 i = b'data/%s.i' % f
504 d = b'data/%s.d' % f
510 d = b'data/%s.d' % f
505
511
506 if repo.store._exists(i):
512 if repo.store._exists(i):
507 newentries.add(i)
513 newentries.add(i)
508 if repo.store._exists(d):
514 if repo.store._exists(d):
509 newentries.add(d)
515 newentries.add(d)
510
516
511 progress.complete()
517 progress.complete()
512
518
513 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
519 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
514 # This logic is safe if treemanifest isn't enabled, but also
520 # This logic is safe if treemanifest isn't enabled, but also
515 # pointless, so we skip it if treemanifest isn't enabled.
521 # pointless, so we skip it if treemanifest isn't enabled.
516 for dir in pathutil.dirs(seenfiles):
522 for dir in pathutil.dirs(seenfiles):
517 i = b'meta/%s/00manifest.i' % dir
523 i = b'meta/%s/00manifest.i' % dir
518 d = b'meta/%s/00manifest.d' % dir
524 d = b'meta/%s/00manifest.d' % dir
519
525
520 if repo.store._exists(i):
526 if repo.store._exists(i):
521 newentries.add(i)
527 newentries.add(i)
522 if repo.store._exists(d):
528 if repo.store._exists(d):
523 newentries.add(d)
529 newentries.add(d)
524
530
525 addcount = len(newentries - oldentries)
531 addcount = len(newentries - oldentries)
526 removecount = len(oldentries - newentries)
532 removecount = len(oldentries - newentries)
527 for p in sorted(oldentries - newentries):
533 for p in sorted(oldentries - newentries):
528 ui.write(_(b'removing %s\n') % p)
534 ui.write(_(b'removing %s\n') % p)
529 for p in sorted(newentries - oldentries):
535 for p in sorted(newentries - oldentries):
530 ui.write(_(b'adding %s\n') % p)
536 ui.write(_(b'adding %s\n') % p)
531
537
532 if addcount or removecount:
538 if addcount or removecount:
533 ui.write(
539 ui.write(
534 _(b'%d items added, %d removed from fncache\n')
540 _(b'%d items added, %d removed from fncache\n')
535 % (addcount, removecount)
541 % (addcount, removecount)
536 )
542 )
537 fnc.entries = newentries
543 fnc.entries = newentries
538 fnc._dirty = True
544 fnc._dirty = True
539
545
540 with repo.transaction(b'fncache') as tr:
546 with repo.transaction(b'fncache') as tr:
541 fnc.write(tr)
547 fnc.write(tr)
542 else:
548 else:
543 ui.write(_(b'fncache already up to date\n'))
549 ui.write(_(b'fncache already up to date\n'))
544
550
545
551
546 def deleteobsmarkers(obsstore, indices):
552 def deleteobsmarkers(obsstore, indices):
547 """Delete some obsmarkers from obsstore and return how many were deleted
553 """Delete some obsmarkers from obsstore and return how many were deleted
548
554
549 'indices' is a list of ints which are the indices
555 'indices' is a list of ints which are the indices
550 of the markers to be deleted.
556 of the markers to be deleted.
551
557
552 Every invocation of this function completely rewrites the obsstore file,
558 Every invocation of this function completely rewrites the obsstore file,
553 skipping the markers we want to be removed. The new temporary file is
559 skipping the markers we want to be removed. The new temporary file is
554 created, remaining markers are written there and on .close() this file
560 created, remaining markers are written there and on .close() this file
555 gets atomically renamed to obsstore, thus guaranteeing consistency."""
561 gets atomically renamed to obsstore, thus guaranteeing consistency."""
556 if not indices:
562 if not indices:
557 # we don't want to rewrite the obsstore with the same content
563 # we don't want to rewrite the obsstore with the same content
558 return
564 return
559
565
560 left = []
566 left = []
561 current = obsstore._all
567 current = obsstore._all
562 n = 0
568 n = 0
563 for i, m in enumerate(current):
569 for i, m in enumerate(current):
564 if i in indices:
570 if i in indices:
565 n += 1
571 n += 1
566 continue
572 continue
567 left.append(m)
573 left.append(m)
568
574
569 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
575 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
570 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
576 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
571 newobsstorefile.write(bytes)
577 newobsstorefile.write(bytes)
572 newobsstorefile.close()
578 newobsstorefile.close()
573 return n
579 return n
General Comments 0
You need to be logged in to leave comments. Login now