##// END OF EJS Templates
repair: use node to track post-strip bookmark target...
Matt Mackall -
r17796:1b51638b default
parent child Browse files
Show More
@@ -1,200 +1,200 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup, bookmarks
9 from mercurial import changegroup, bookmarks
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os
12 import os
13 import errno
13 import errno
14
14
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 """create a bundle with the specified revisions as a backup"""
16 """create a bundle with the specified revisions as a backup"""
17 cg = repo.changegroupsubset(bases, heads, 'strip')
17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 backupdir = repo.join("strip-backup")
18 backupdir = repo.join("strip-backup")
19 if not os.path.isdir(backupdir):
19 if not os.path.isdir(backupdir):
20 os.mkdir(backupdir)
20 os.mkdir(backupdir)
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype)
26 return changegroup.writebundle(cg, name, bundletype)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 linkgen = (revlog.linkrev(i) for i in revlog)
41 linkgen = (revlog.linkrev(i) for i in revlog)
42 # find the truncation point of the revlog
42 # find the truncation point of the revlog
43 for lrev in linkgen:
43 for lrev in linkgen:
44 if lrev >= striprev:
44 if lrev >= striprev:
45 break
45 break
46 # see if any revision after this point has a linkrev
46 # see if any revision after this point has a linkrev
47 # less than striprev (those will be broken by strip)
47 # less than striprev (those will be broken by strip)
48 for lrev in linkgen:
48 for lrev in linkgen:
49 if lrev < striprev:
49 if lrev < striprev:
50 s.add(lrev)
50 s.add(lrev)
51
51
52 collectone(repo.manifest)
52 collectone(repo.manifest)
53 for fname in files:
53 for fname in files:
54 collectone(repo.file(fname))
54 collectone(repo.file(fname))
55
55
56 return s
56 return s
57
57
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 # It simplifies the logic around updating the branchheads cache if we only
59 # It simplifies the logic around updating the branchheads cache if we only
60 # have to consider the effect of the stripped revisions and not revisions
60 # have to consider the effect of the stripped revisions and not revisions
61 # missing because the cache is out-of-date.
61 # missing because the cache is out-of-date.
62 repo.updatebranchcache()
62 repo.updatebranchcache()
63
63
64 cl = repo.changelog
64 cl = repo.changelog
65 # TODO handle undo of merge sets
65 # TODO handle undo of merge sets
66 if isinstance(nodelist, str):
66 if isinstance(nodelist, str):
67 nodelist = [nodelist]
67 nodelist = [nodelist]
68 striplist = [cl.rev(node) for node in nodelist]
68 striplist = [cl.rev(node) for node in nodelist]
69 striprev = min(striplist)
69 striprev = min(striplist)
70
70
71 # Generate set of branches who will have nodes stripped.
71 # Generate set of branches who will have nodes stripped.
72 striprevs = repo.revs("%ld::", striplist)
72 striprevs = repo.revs("%ld::", striplist)
73 stripbranches = set([repo[rev].branch() for rev in striprevs])
73 stripbranches = set([repo[rev].branch() for rev in striprevs])
74
74
75 # Set of potential new heads resulting from the strip. The parents of any
75 # Set of potential new heads resulting from the strip. The parents of any
76 # node removed could be a new head because the node to be removed could have
76 # node removed could be a new head because the node to be removed could have
77 # been the only child of the parent.
77 # been the only child of the parent.
78 newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
78 newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
79 newheadnodes = set([cl.node(rev) for rev in newheadrevs])
79 newheadnodes = set([cl.node(rev) for rev in newheadrevs])
80 newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
80 newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
81
81
82 keeppartialbundle = backup == 'strip'
82 keeppartialbundle = backup == 'strip'
83
83
84 # Some revisions with rev > striprev may not be descendants of striprev.
84 # Some revisions with rev > striprev may not be descendants of striprev.
85 # We have to find these revisions and put them in a bundle, so that
85 # We have to find these revisions and put them in a bundle, so that
86 # we can restore them after the truncations.
86 # we can restore them after the truncations.
87 # To create the bundle we use repo.changegroupsubset which requires
87 # To create the bundle we use repo.changegroupsubset which requires
88 # the list of heads and bases of the set of interesting revisions.
88 # the list of heads and bases of the set of interesting revisions.
89 # (head = revision in the set that has no descendant in the set;
89 # (head = revision in the set that has no descendant in the set;
90 # base = revision in the set that has no ancestor in the set)
90 # base = revision in the set that has no ancestor in the set)
91 tostrip = set(striplist)
91 tostrip = set(striplist)
92 for rev in striplist:
92 for rev in striplist:
93 for desc in cl.descendants([rev]):
93 for desc in cl.descendants([rev]):
94 tostrip.add(desc)
94 tostrip.add(desc)
95
95
96 files = _collectfiles(repo, striprev)
96 files = _collectfiles(repo, striprev)
97 saverevs = _collectbrokencsets(repo, files, striprev)
97 saverevs = _collectbrokencsets(repo, files, striprev)
98
98
99 # compute heads
99 # compute heads
100 saveheads = set(saverevs)
100 saveheads = set(saverevs)
101 for r in xrange(striprev + 1, len(cl)):
101 for r in xrange(striprev + 1, len(cl)):
102 if r not in tostrip:
102 if r not in tostrip:
103 saverevs.add(r)
103 saverevs.add(r)
104 saveheads.difference_update(cl.parentrevs(r))
104 saveheads.difference_update(cl.parentrevs(r))
105 saveheads.add(r)
105 saveheads.add(r)
106 saveheads = [cl.node(r) for r in saveheads]
106 saveheads = [cl.node(r) for r in saveheads]
107
107
108 # compute base nodes
108 # compute base nodes
109 if saverevs:
109 if saverevs:
110 descendants = set(cl.descendants(saverevs))
110 descendants = set(cl.descendants(saverevs))
111 saverevs.difference_update(descendants)
111 saverevs.difference_update(descendants)
112 savebases = [cl.node(r) for r in saverevs]
112 savebases = [cl.node(r) for r in saverevs]
113 stripbases = [cl.node(r) for r in tostrip]
113 stripbases = [cl.node(r) for r in tostrip]
114 newbmtarget = repo.revs('sort(heads((::%ld) - (%ld)), -rev)',
114 newbmtarget = repo.revs('sort(heads((::%ld) - (%ld)), -rev)',
115 tostrip, tostrip)
115 tostrip, tostrip)
116 if newbmtarget:
116 if newbmtarget:
117 newbmtarget = newbmtarget[0]
117 newbmtarget = repo[newbmtarget[0]].node()
118 else:
118 else:
119 newbmtarget = '.'
119 newbmtarget = '.'
120
120
121 bm = repo._bookmarks
121 bm = repo._bookmarks
122 updatebm = []
122 updatebm = []
123 for m in bm:
123 for m in bm:
124 rev = repo[bm[m]].rev()
124 rev = repo[bm[m]].rev()
125 if rev in tostrip:
125 if rev in tostrip:
126 updatebm.append(m)
126 updatebm.append(m)
127
127
128 # create a changegroup for all the branches we need to keep
128 # create a changegroup for all the branches we need to keep
129 backupfile = None
129 backupfile = None
130 if backup == "all":
130 if backup == "all":
131 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
131 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
132 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
132 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
133 if saveheads or savebases:
133 if saveheads or savebases:
134 # do not compress partial bundle if we remove it from disk later
134 # do not compress partial bundle if we remove it from disk later
135 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
135 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
136 compress=keeppartialbundle)
136 compress=keeppartialbundle)
137
137
138 mfst = repo.manifest
138 mfst = repo.manifest
139
139
140 tr = repo.transaction("strip")
140 tr = repo.transaction("strip")
141 offset = len(tr.entries)
141 offset = len(tr.entries)
142
142
143 try:
143 try:
144 tr.startgroup()
144 tr.startgroup()
145 cl.strip(striprev, tr)
145 cl.strip(striprev, tr)
146 mfst.strip(striprev, tr)
146 mfst.strip(striprev, tr)
147 for fn in files:
147 for fn in files:
148 repo.file(fn).strip(striprev, tr)
148 repo.file(fn).strip(striprev, tr)
149 tr.endgroup()
149 tr.endgroup()
150
150
151 try:
151 try:
152 for i in xrange(offset, len(tr.entries)):
152 for i in xrange(offset, len(tr.entries)):
153 file, troffset, ignore = tr.entries[i]
153 file, troffset, ignore = tr.entries[i]
154 repo.sopener(file, 'a').truncate(troffset)
154 repo.sopener(file, 'a').truncate(troffset)
155 tr.close()
155 tr.close()
156 except: # re-raises
156 except: # re-raises
157 tr.abort()
157 tr.abort()
158 raise
158 raise
159
159
160 if saveheads or savebases:
160 if saveheads or savebases:
161 ui.note(_("adding branch\n"))
161 ui.note(_("adding branch\n"))
162 f = open(chgrpfile, "rb")
162 f = open(chgrpfile, "rb")
163 gen = changegroup.readbundle(f, chgrpfile)
163 gen = changegroup.readbundle(f, chgrpfile)
164 if not repo.ui.verbose:
164 if not repo.ui.verbose:
165 # silence internal shuffling chatter
165 # silence internal shuffling chatter
166 repo.ui.pushbuffer()
166 repo.ui.pushbuffer()
167 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
167 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
168 if not repo.ui.verbose:
168 if not repo.ui.verbose:
169 repo.ui.popbuffer()
169 repo.ui.popbuffer()
170 f.close()
170 f.close()
171 if not keeppartialbundle:
171 if not keeppartialbundle:
172 os.unlink(chgrpfile)
172 os.unlink(chgrpfile)
173
173
174 # remove undo files
174 # remove undo files
175 for undofile in repo.undofiles():
175 for undofile in repo.undofiles():
176 try:
176 try:
177 os.unlink(undofile)
177 os.unlink(undofile)
178 except OSError, e:
178 except OSError, e:
179 if e.errno != errno.ENOENT:
179 if e.errno != errno.ENOENT:
180 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
180 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
181
181
182 for m in updatebm:
182 for m in updatebm:
183 bm[m] = repo[newbmtarget].node()
183 bm[m] = repo[newbmtarget].node()
184 bookmarks.write(repo)
184 bookmarks.write(repo)
185 except: # re-raises
185 except: # re-raises
186 if backupfile:
186 if backupfile:
187 ui.warn(_("strip failed, full bundle stored in '%s'\n")
187 ui.warn(_("strip failed, full bundle stored in '%s'\n")
188 % backupfile)
188 % backupfile)
189 elif saveheads:
189 elif saveheads:
190 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
190 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
191 % chgrpfile)
191 % chgrpfile)
192 raise
192 raise
193
193
194 if len(stripbranches) == 1 and len(newheadbranches) == 1 \
194 if len(stripbranches) == 1 and len(newheadbranches) == 1 \
195 and stripbranches == newheadbranches:
195 and stripbranches == newheadbranches:
196 repo.destroyed(newheadnodes)
196 repo.destroyed(newheadnodes)
197 else:
197 else:
198 # Multiple branches involved in strip. Will allow branchcache to become
198 # Multiple branches involved in strip. Will allow branchcache to become
199 # invalid and later on rebuilt from scratch
199 # invalid and later on rebuilt from scratch
200 repo.destroyed()
200 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now