##// END OF EJS Templates
repair: avoid string concatenation by + operator...
FUJIWARA Katsunori -
r24863:f3558829 stable
parent child Browse files
Show More
@@ -1,225 +1,225 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup, exchange, util, bundle2
10 10 from mercurial.node import short, hex
11 11 from mercurial.i18n import _
12 12 import errno
13 13
14 14 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 15 """create a bundle with the specified revisions as a backup"""
16 16 usebundle2 = (repo.ui.config('experimental', 'bundle2-exp') and
17 17 repo.ui.config('experimental', 'strip-bundle2-version'))
18 18 if usebundle2:
19 19 cgversion = repo.ui.config('experimental', 'strip-bundle2-version')
20 20 if cgversion not in changegroup.packermap:
21 repo.ui.warn(_('unknown strip-bundle2-version value %r; ' +
21 repo.ui.warn(_('unknown strip-bundle2-version value %r; '
22 22 'should be one of %r\n') %
23 23 (cgversion, sorted(changegroup.packermap.keys()),))
24 24 cgversion = '01'
25 25 usebundle2 = False
26 26 else:
27 27 cgversion = '01'
28 28
29 29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 30 version=cgversion)
31 31 backupdir = "strip-backup"
32 32 vfs = repo.vfs
33 33 if not vfs.isdir(backupdir):
34 34 vfs.mkdir(backupdir)
35 35
36 36 # Include a hash of all the nodes in the filename for uniqueness
37 37 hexbases = (hex(n) for n in bases)
38 38 hexheads = (hex(n) for n in heads)
39 39 allcommits = repo.set('%ls::%ls', hexbases, hexheads)
40 40 allhashes = sorted(c.hex() for c in allcommits)
41 41 totalhash = util.sha1(''.join(allhashes)).hexdigest()
42 42 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
43 43
44 44 if usebundle2:
45 45 bundletype = "HG20"
46 46 elif compress:
47 47 bundletype = "HG10BZ"
48 48 else:
49 49 bundletype = "HG10UN"
50 50 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
51 51
52 52 def _collectfiles(repo, striprev):
53 53 """find out the filelogs affected by the strip"""
54 54 files = set()
55 55
56 56 for x in xrange(striprev, len(repo)):
57 57 files.update(repo[x].files())
58 58
59 59 return sorted(files)
60 60
61 61 def _collectbrokencsets(repo, files, striprev):
62 62 """return the changesets which will be broken by the truncation"""
63 63 s = set()
64 64 def collectone(revlog):
65 65 _, brokenset = revlog.getstrippoint(striprev)
66 66 s.update([revlog.linkrev(r) for r in brokenset])
67 67
68 68 collectone(repo.manifest)
69 69 for fname in files:
70 70 collectone(repo.file(fname))
71 71
72 72 return s
73 73
74 74 def strip(ui, repo, nodelist, backup=True, topic='backup'):
75 75
76 76 # Simple way to maintain backwards compatibility for this
77 77 # argument.
78 78 if backup in ['none', 'strip']:
79 79 backup = False
80 80
81 81 repo = repo.unfiltered()
82 82 repo.destroying()
83 83
84 84 cl = repo.changelog
85 85 # TODO handle undo of merge sets
86 86 if isinstance(nodelist, str):
87 87 nodelist = [nodelist]
88 88 striplist = [cl.rev(node) for node in nodelist]
89 89 striprev = min(striplist)
90 90
91 91 # Some revisions with rev > striprev may not be descendants of striprev.
92 92 # We have to find these revisions and put them in a bundle, so that
93 93 # we can restore them after the truncations.
94 94 # To create the bundle we use repo.changegroupsubset which requires
95 95 # the list of heads and bases of the set of interesting revisions.
96 96 # (head = revision in the set that has no descendant in the set;
97 97 # base = revision in the set that has no ancestor in the set)
98 98 tostrip = set(striplist)
99 99 for rev in striplist:
100 100 for desc in cl.descendants([rev]):
101 101 tostrip.add(desc)
102 102
103 103 files = _collectfiles(repo, striprev)
104 104 saverevs = _collectbrokencsets(repo, files, striprev)
105 105
106 106 # compute heads
107 107 saveheads = set(saverevs)
108 108 for r in xrange(striprev + 1, len(cl)):
109 109 if r not in tostrip:
110 110 saverevs.add(r)
111 111 saveheads.difference_update(cl.parentrevs(r))
112 112 saveheads.add(r)
113 113 saveheads = [cl.node(r) for r in saveheads]
114 114
115 115 # compute base nodes
116 116 if saverevs:
117 117 descendants = set(cl.descendants(saverevs))
118 118 saverevs.difference_update(descendants)
119 119 savebases = [cl.node(r) for r in saverevs]
120 120 stripbases = [cl.node(r) for r in tostrip]
121 121
122 122 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 123 # is much faster
124 124 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 125 if newbmtarget:
126 126 newbmtarget = repo[newbmtarget.first()].node()
127 127 else:
128 128 newbmtarget = '.'
129 129
130 130 bm = repo._bookmarks
131 131 updatebm = []
132 132 for m in bm:
133 133 rev = repo[bm[m]].rev()
134 134 if rev in tostrip:
135 135 updatebm.append(m)
136 136
137 137 # create a changegroup for all the branches we need to keep
138 138 backupfile = None
139 139 vfs = repo.vfs
140 140 node = nodelist[-1]
141 141 if backup:
142 142 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 143 repo.ui.status(_("saved backup bundle to %s\n") %
144 144 vfs.join(backupfile))
145 145 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 146 vfs.join(backupfile))
147 147 if saveheads or savebases:
148 148 # do not compress partial bundle if we remove it from disk later
149 149 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
150 150 compress=False)
151 151
152 152 mfst = repo.manifest
153 153
154 154 tr = repo.transaction("strip")
155 155 offset = len(tr.entries)
156 156
157 157 try:
158 158 tr.startgroup()
159 159 cl.strip(striprev, tr)
160 160 mfst.strip(striprev, tr)
161 161 for fn in files:
162 162 repo.file(fn).strip(striprev, tr)
163 163 tr.endgroup()
164 164
165 165 try:
166 166 for i in xrange(offset, len(tr.entries)):
167 167 file, troffset, ignore = tr.entries[i]
168 168 repo.svfs(file, 'a').truncate(troffset)
169 169 if troffset == 0:
170 170 repo.store.markremoved(file)
171 171 tr.close()
172 172 except: # re-raises
173 173 tr.abort()
174 174 raise
175 175
176 176 if saveheads or savebases:
177 177 ui.note(_("adding branch\n"))
178 178 f = vfs.open(chgrpfile, "rb")
179 179 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
180 180 if not repo.ui.verbose:
181 181 # silence internal shuffling chatter
182 182 repo.ui.pushbuffer()
183 183 if isinstance(gen, bundle2.unbundle20):
184 184 tr = repo.transaction('strip')
185 185 tr.hookargs = {'source': 'strip',
186 186 'url': 'bundle:' + vfs.join(chgrpfile)}
187 187 try:
188 188 bundle2.processbundle(repo, gen, lambda: tr)
189 189 tr.close()
190 190 finally:
191 191 tr.release()
192 192 else:
193 193 changegroup.addchangegroup(repo, gen, 'strip',
194 194 'bundle:' + vfs.join(chgrpfile),
195 195 True)
196 196 if not repo.ui.verbose:
197 197 repo.ui.popbuffer()
198 198 f.close()
199 199
200 200 # remove undo files
201 201 for undovfs, undofile in repo.undofiles():
202 202 try:
203 203 undovfs.unlink(undofile)
204 204 except OSError, e:
205 205 if e.errno != errno.ENOENT:
206 206 ui.warn(_('error removing %s: %s\n') %
207 207 (undovfs.join(undofile), str(e)))
208 208
209 209 for m in updatebm:
210 210 bm[m] = repo[newbmtarget].node()
211 211 bm.write()
212 212 except: # re-raises
213 213 if backupfile:
214 214 ui.warn(_("strip failed, full bundle stored in '%s'\n")
215 215 % vfs.join(backupfile))
216 216 elif saveheads:
217 217 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
218 218 % vfs.join(chgrpfile))
219 219 raise
220 220 else:
221 221 if saveheads or savebases:
222 222 # Remove partial backup only if there were no exceptions
223 223 vfs.unlink(chgrpfile)
224 224
225 225 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now