##// END OF EJS Templates
repair: fix missing import...
Alain Leufroy -
r16440:692bf06b default
parent child Browse files
Show More
@@ -1,175 +1,176 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup, bookmarks, phases
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import os
13 import errno
13 14
14 15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 16 """create a bundle with the specified revisions as a backup"""
16 17 cg = repo.changegroupsubset(bases, heads, 'strip')
17 18 backupdir = repo.join("strip-backup")
18 19 if not os.path.isdir(backupdir):
19 20 os.mkdir(backupdir)
20 21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 22 if compress:
22 23 bundletype = "HG10BZ"
23 24 else:
24 25 bundletype = "HG10UN"
25 26 return changegroup.writebundle(cg, name, bundletype)
26 27
27 28 def _collectfiles(repo, striprev):
28 29 """find out the filelogs affected by the strip"""
29 30 files = set()
30 31
31 32 for x in xrange(striprev, len(repo)):
32 33 files.update(repo[x].files())
33 34
34 35 return sorted(files)
35 36
36 37 def _collectbrokencsets(repo, files, striprev):
37 38 """return the changesets which will be broken by the truncation"""
38 39 s = set()
39 40 def collectone(revlog):
40 41 links = (revlog.linkrev(i) for i in revlog)
41 42 # find the truncation point of the revlog
42 43 for lrev in links:
43 44 if lrev >= striprev:
44 45 break
45 46 # see if any revision after this point has a linkrev
46 47 # less than striprev (those will be broken by strip)
47 48 for lrev in links:
48 49 if lrev < striprev:
49 50 s.add(lrev)
50 51
51 52 collectone(repo.manifest)
52 53 for fname in files:
53 54 collectone(repo.file(fname))
54 55
55 56 return s
56 57
57 58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
58 59 cl = repo.changelog
59 60 # TODO handle undo of merge sets
60 61 if isinstance(nodelist, str):
61 62 nodelist = [nodelist]
62 63 striplist = [cl.rev(node) for node in nodelist]
63 64 striprev = min(striplist)
64 65
65 66 keeppartialbundle = backup == 'strip'
66 67
67 68 # Some revisions with rev > striprev may not be descendants of striprev.
68 69 # We have to find these revisions and put them in a bundle, so that
69 70 # we can restore them after the truncations.
70 71 # To create the bundle we use repo.changegroupsubset which requires
71 72 # the list of heads and bases of the set of interesting revisions.
72 73 # (head = revision in the set that has no descendant in the set;
73 74 # base = revision in the set that has no ancestor in the set)
74 75 tostrip = set(striplist)
75 76 for rev in striplist:
76 77 for desc in cl.descendants(rev):
77 78 tostrip.add(desc)
78 79
79 80 files = _collectfiles(repo, striprev)
80 81 saverevs = _collectbrokencsets(repo, files, striprev)
81 82
82 83 # compute heads
83 84 saveheads = set(saverevs)
84 85 for r in xrange(striprev + 1, len(cl)):
85 86 if r not in tostrip:
86 87 saverevs.add(r)
87 88 saveheads.difference_update(cl.parentrevs(r))
88 89 saveheads.add(r)
89 90 saveheads = [cl.node(r) for r in saveheads]
90 91
91 92 # compute base nodes
92 93 if saverevs:
93 94 descendants = set(cl.descendants(*saverevs))
94 95 saverevs.difference_update(descendants)
95 96 savebases = [cl.node(r) for r in saverevs]
96 97 stripbases = [cl.node(r) for r in tostrip]
97 98
98 99 bm = repo._bookmarks
99 100 updatebm = []
100 101 for m in bm:
101 102 rev = repo[bm[m]].rev()
102 103 if rev in tostrip:
103 104 updatebm.append(m)
104 105
105 106 # create a changegroup for all the branches we need to keep
106 107 backupfile = None
107 108 if backup == "all":
108 109 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
109 110 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
110 111 if saveheads or savebases:
111 112 # do not compress partial bundle if we remove it from disk later
112 113 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 114 compress=keeppartialbundle)
114 115
115 116 mfst = repo.manifest
116 117
117 118 tr = repo.transaction("strip")
118 119 offset = len(tr.entries)
119 120
120 121 try:
121 122 tr.startgroup()
122 123 cl.strip(striprev, tr)
123 124 mfst.strip(striprev, tr)
124 125 for fn in files:
125 126 repo.file(fn).strip(striprev, tr)
126 127 tr.endgroup()
127 128
128 129 try:
129 130 for i in xrange(offset, len(tr.entries)):
130 131 file, troffset, ignore = tr.entries[i]
131 132 repo.sopener(file, 'a').truncate(troffset)
132 133 tr.close()
133 134 except:
134 135 tr.abort()
135 136 raise
136 137
137 138 if saveheads or savebases:
138 139 ui.note(_("adding branch\n"))
139 140 f = open(chgrpfile, "rb")
140 141 gen = changegroup.readbundle(f, chgrpfile)
141 142 if not repo.ui.verbose:
142 143 # silence internal shuffling chatter
143 144 repo.ui.pushbuffer()
144 145 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
145 146 if not repo.ui.verbose:
146 147 repo.ui.popbuffer()
147 148 f.close()
148 149 if not keeppartialbundle:
149 150 os.unlink(chgrpfile)
150 151
151 152 # remove undo files
152 153 for undofile in repo.undofiles():
153 154 try:
154 155 os.unlink(undofile)
155 156 except OSError, e:
156 157 if e.errno != errno.ENOENT:
157 158 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
158 159
159 160 for m in updatebm:
160 161 bm[m] = repo['.'].node()
161 162 bookmarks.write(repo)
162 163 except:
163 164 if backupfile:
164 165 ui.warn(_("strip failed, full bundle stored in '%s'\n")
165 166 % backupfile)
166 167 elif saveheads:
167 168 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
168 169 % chgrpfile)
169 170 raise
170 171
171 172 repo.destroyed()
172 173
173 174 # remove potential unknown phase
174 175 # XXX using to_strip data would be faster
175 176 phases.filterunknown(repo)
General Comments 0
You need to be logged in to leave comments. Login now