Show More
@@ -1,362 +1,360 b'' | |||
|
1 | 1 | # repair.py - functions for repository repair for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
4 | 4 | # Copyright 2007 Matt Mackall |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import errno |
|
12 | 12 | import hashlib |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import short |
|
16 | 16 | from . import ( |
|
17 | 17 | bundle2, |
|
18 | 18 | changegroup, |
|
19 | 19 | error, |
|
20 | 20 | exchange, |
|
21 | 21 | obsolete, |
|
22 | 22 | util, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | def _bundle(repo, bases, heads, node, suffix, compress=True): |
|
26 | 26 | """create a bundle with the specified revisions as a backup""" |
|
27 | 27 | cgversion = changegroup.safeversion(repo) |
|
28 | 28 | |
|
29 | 29 | cg = changegroup.changegroupsubset(repo, bases, heads, 'strip', |
|
30 | 30 | version=cgversion) |
|
31 | 31 | backupdir = "strip-backup" |
|
32 | 32 | vfs = repo.vfs |
|
33 | 33 | if not vfs.isdir(backupdir): |
|
34 | 34 | vfs.mkdir(backupdir) |
|
35 | 35 | |
|
36 | 36 | # Include a hash of all the nodes in the filename for uniqueness |
|
37 | 37 | allcommits = repo.set('%ln::%ln', bases, heads) |
|
38 | 38 | allhashes = sorted(c.hex() for c in allcommits) |
|
39 | 39 | totalhash = hashlib.sha1(''.join(allhashes)).hexdigest() |
|
40 | 40 | name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix) |
|
41 | 41 | |
|
42 | 42 | comp = None |
|
43 | 43 | if cgversion != '01': |
|
44 | 44 | bundletype = "HG20" |
|
45 | 45 | if compress: |
|
46 | 46 | comp = 'BZ' |
|
47 | 47 | elif compress: |
|
48 | 48 | bundletype = "HG10BZ" |
|
49 | 49 | else: |
|
50 | 50 | bundletype = "HG10UN" |
|
51 | 51 | return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs, |
|
52 | 52 | compression=comp) |
|
53 | 53 | |
|
54 | 54 | def _collectfiles(repo, striprev): |
|
55 | 55 | """find out the filelogs affected by the strip""" |
|
56 | 56 | files = set() |
|
57 | 57 | |
|
58 | 58 | for x in xrange(striprev, len(repo)): |
|
59 | 59 | files.update(repo[x].files()) |
|
60 | 60 | |
|
61 | 61 | return sorted(files) |
|
62 | 62 | |
|
63 | 63 | def _collectbrokencsets(repo, files, striprev): |
|
64 | 64 | """return the changesets which will be broken by the truncation""" |
|
65 | 65 | s = set() |
|
66 | 66 | def collectone(revlog): |
|
67 | 67 | _, brokenset = revlog.getstrippoint(striprev) |
|
68 | 68 | s.update([revlog.linkrev(r) for r in brokenset]) |
|
69 | 69 | |
|
70 | 70 | collectone(repo.manifestlog._revlog) |
|
71 | 71 | for fname in files: |
|
72 | 72 | collectone(repo.file(fname)) |
|
73 | 73 | |
|
74 | 74 | return s |
|
75 | 75 | |
|
76 | 76 | def strip(ui, repo, nodelist, backup=True, topic='backup'): |
|
77 | 77 | # This function operates within a transaction of its own, but does |
|
78 | 78 | # not take any lock on the repo. |
|
79 | 79 | # Simple way to maintain backwards compatibility for this |
|
80 | 80 | # argument. |
|
81 | 81 | if backup in ['none', 'strip']: |
|
82 | 82 | backup = False |
|
83 | 83 | |
|
84 | 84 | repo = repo.unfiltered() |
|
85 | 85 | repo.destroying() |
|
86 | 86 | |
|
87 | 87 | cl = repo.changelog |
|
88 | 88 | # TODO handle undo of merge sets |
|
89 | 89 | if isinstance(nodelist, str): |
|
90 | 90 | nodelist = [nodelist] |
|
91 | 91 | striplist = [cl.rev(node) for node in nodelist] |
|
92 | 92 | striprev = min(striplist) |
|
93 | 93 | |
|
94 | files = _collectfiles(repo, striprev) | |
|
95 | saverevs = _collectbrokencsets(repo, files, striprev) | |
|
96 | ||
|
94 | 97 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
95 | 98 | # We have to find these revisions and put them in a bundle, so that |
|
96 | 99 | # we can restore them after the truncations. |
|
97 | 100 | # To create the bundle we use repo.changegroupsubset which requires |
|
98 | 101 | # the list of heads and bases of the set of interesting revisions. |
|
99 | 102 | # (head = revision in the set that has no descendant in the set; |
|
100 | 103 | # base = revision in the set that has no ancestor in the set) |
|
101 | 104 | tostrip = set(striplist) |
|
105 | saveheads = set(saverevs) | |
|
102 | 106 | for r in cl.revs(start=striprev + 1): |
|
103 | 107 | if any(p in tostrip for p in cl.parentrevs(r)): |
|
104 | 108 | tostrip.add(r) |
|
105 | 109 | |
|
106 | files = _collectfiles(repo, striprev) | |
|
107 | saverevs = _collectbrokencsets(repo, files, striprev) | |
|
108 | ||
|
109 | # compute heads | |
|
110 | saveheads = set(saverevs) | |
|
111 | for r in xrange(striprev + 1, len(cl)): | |
|
112 | 110 | if r not in tostrip: |
|
113 | 111 | saverevs.add(r) |
|
114 | 112 | saveheads.difference_update(cl.parentrevs(r)) |
|
115 | 113 | saveheads.add(r) |
|
116 | 114 | saveheads = [cl.node(r) for r in saveheads] |
|
117 | 115 | |
|
118 | 116 | # compute base nodes |
|
119 | 117 | if saverevs: |
|
120 | 118 | descendants = set(cl.descendants(saverevs)) |
|
121 | 119 | saverevs.difference_update(descendants) |
|
122 | 120 | savebases = [cl.node(r) for r in saverevs] |
|
123 | 121 | stripbases = [cl.node(r) for r in tostrip] |
|
124 | 122 | |
|
125 | 123 | # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but |
|
126 | 124 | # is much faster |
|
127 | 125 | newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) |
|
128 | 126 | if newbmtarget: |
|
129 | 127 | newbmtarget = repo[newbmtarget.first()].node() |
|
130 | 128 | else: |
|
131 | 129 | newbmtarget = '.' |
|
132 | 130 | |
|
133 | 131 | bm = repo._bookmarks |
|
134 | 132 | updatebm = [] |
|
135 | 133 | for m in bm: |
|
136 | 134 | rev = repo[bm[m]].rev() |
|
137 | 135 | if rev in tostrip: |
|
138 | 136 | updatebm.append(m) |
|
139 | 137 | |
|
140 | 138 | # create a changegroup for all the branches we need to keep |
|
141 | 139 | backupfile = None |
|
142 | 140 | vfs = repo.vfs |
|
143 | 141 | node = nodelist[-1] |
|
144 | 142 | if backup: |
|
145 | 143 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) |
|
146 | 144 | repo.ui.status(_("saved backup bundle to %s\n") % |
|
147 | 145 | vfs.join(backupfile)) |
|
148 | 146 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", |
|
149 | 147 | vfs.join(backupfile)) |
|
150 | 148 | tmpbundlefile = None |
|
151 | 149 | if saveheads: |
|
152 | 150 | # do not compress temporary bundle if we remove it from disk later |
|
153 | 151 | tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
154 | 152 | compress=False) |
|
155 | 153 | |
|
156 | 154 | mfst = repo.manifestlog._revlog |
|
157 | 155 | |
|
158 | 156 | curtr = repo.currenttransaction() |
|
159 | 157 | if curtr is not None: |
|
160 | 158 | del curtr # avoid carrying reference to transaction for nothing |
|
161 | 159 | msg = _('programming error: cannot strip from inside a transaction') |
|
162 | 160 | raise error.Abort(msg, hint=_('contact your extension maintainer')) |
|
163 | 161 | |
|
164 | 162 | try: |
|
165 | 163 | with repo.transaction("strip") as tr: |
|
166 | 164 | offset = len(tr.entries) |
|
167 | 165 | |
|
168 | 166 | tr.startgroup() |
|
169 | 167 | cl.strip(striprev, tr) |
|
170 | 168 | mfst.strip(striprev, tr) |
|
171 | 169 | if 'treemanifest' in repo.requirements: # safe but unnecessary |
|
172 | 170 | # otherwise |
|
173 | 171 | for unencoded, encoded, size in repo.store.datafiles(): |
|
174 | 172 | if (unencoded.startswith('meta/') and |
|
175 | 173 | unencoded.endswith('00manifest.i')): |
|
176 | 174 | dir = unencoded[5:-12] |
|
177 | 175 | repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr) |
|
178 | 176 | for fn in files: |
|
179 | 177 | repo.file(fn).strip(striprev, tr) |
|
180 | 178 | tr.endgroup() |
|
181 | 179 | |
|
182 | 180 | for i in xrange(offset, len(tr.entries)): |
|
183 | 181 | file, troffset, ignore = tr.entries[i] |
|
184 | 182 | with repo.svfs(file, 'a', checkambig=True) as fp: |
|
185 | 183 | fp.truncate(troffset) |
|
186 | 184 | if troffset == 0: |
|
187 | 185 | repo.store.markremoved(file) |
|
188 | 186 | |
|
189 | 187 | if tmpbundlefile: |
|
190 | 188 | ui.note(_("adding branch\n")) |
|
191 | 189 | f = vfs.open(tmpbundlefile, "rb") |
|
192 | 190 | gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) |
|
193 | 191 | if not repo.ui.verbose: |
|
194 | 192 | # silence internal shuffling chatter |
|
195 | 193 | repo.ui.pushbuffer() |
|
196 | 194 | if isinstance(gen, bundle2.unbundle20): |
|
197 | 195 | with repo.transaction('strip') as tr: |
|
198 | 196 | tr.hookargs = {'source': 'strip', |
|
199 | 197 | 'url': 'bundle:' + vfs.join(tmpbundlefile)} |
|
200 | 198 | bundle2.applybundle(repo, gen, tr, source='strip', |
|
201 | 199 | url='bundle:' + vfs.join(tmpbundlefile)) |
|
202 | 200 | else: |
|
203 | 201 | gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile), |
|
204 | 202 | True) |
|
205 | 203 | if not repo.ui.verbose: |
|
206 | 204 | repo.ui.popbuffer() |
|
207 | 205 | f.close() |
|
208 | 206 | repo._phasecache.invalidate() |
|
209 | 207 | |
|
210 | 208 | for m in updatebm: |
|
211 | 209 | bm[m] = repo[newbmtarget].node() |
|
212 | 210 | lock = tr = None |
|
213 | 211 | try: |
|
214 | 212 | lock = repo.lock() |
|
215 | 213 | tr = repo.transaction('repair') |
|
216 | 214 | bm.recordchange(tr) |
|
217 | 215 | tr.close() |
|
218 | 216 | finally: |
|
219 | 217 | tr.release() |
|
220 | 218 | lock.release() |
|
221 | 219 | |
|
222 | 220 | # remove undo files |
|
223 | 221 | for undovfs, undofile in repo.undofiles(): |
|
224 | 222 | try: |
|
225 | 223 | undovfs.unlink(undofile) |
|
226 | 224 | except OSError as e: |
|
227 | 225 | if e.errno != errno.ENOENT: |
|
228 | 226 | ui.warn(_('error removing %s: %s\n') % |
|
229 | 227 | (undovfs.join(undofile), str(e))) |
|
230 | 228 | |
|
231 | 229 | except: # re-raises |
|
232 | 230 | if backupfile: |
|
233 | 231 | ui.warn(_("strip failed, backup bundle stored in '%s'\n") |
|
234 | 232 | % vfs.join(backupfile)) |
|
235 | 233 | if tmpbundlefile: |
|
236 | 234 | ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") |
|
237 | 235 | % vfs.join(tmpbundlefile)) |
|
238 | 236 | ui.warn(_("(fix the problem, then recover the changesets with " |
|
239 | 237 | "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) |
|
240 | 238 | raise |
|
241 | 239 | else: |
|
242 | 240 | if tmpbundlefile: |
|
243 | 241 | # Remove temporary bundle only if there were no exceptions |
|
244 | 242 | vfs.unlink(tmpbundlefile) |
|
245 | 243 | |
|
246 | 244 | repo.destroyed() |
|
247 | 245 | # return the backup file path (or None if 'backup' was False) so |
|
248 | 246 | # extensions can use it |
|
249 | 247 | return backupfile |
|
250 | 248 | |
|
251 | 249 | def rebuildfncache(ui, repo): |
|
252 | 250 | """Rebuilds the fncache file from repo history. |
|
253 | 251 | |
|
254 | 252 | Missing entries will be added. Extra entries will be removed. |
|
255 | 253 | """ |
|
256 | 254 | repo = repo.unfiltered() |
|
257 | 255 | |
|
258 | 256 | if 'fncache' not in repo.requirements: |
|
259 | 257 | ui.warn(_('(not rebuilding fncache because repository does not ' |
|
260 | 258 | 'support fncache)\n')) |
|
261 | 259 | return |
|
262 | 260 | |
|
263 | 261 | with repo.lock(): |
|
264 | 262 | fnc = repo.store.fncache |
|
265 | 263 | # Trigger load of fncache. |
|
266 | 264 | if 'irrelevant' in fnc: |
|
267 | 265 | pass |
|
268 | 266 | |
|
269 | 267 | oldentries = set(fnc.entries) |
|
270 | 268 | newentries = set() |
|
271 | 269 | seenfiles = set() |
|
272 | 270 | |
|
273 | 271 | repolen = len(repo) |
|
274 | 272 | for rev in repo: |
|
275 | 273 | ui.progress(_('rebuilding'), rev, total=repolen, |
|
276 | 274 | unit=_('changesets')) |
|
277 | 275 | |
|
278 | 276 | ctx = repo[rev] |
|
279 | 277 | for f in ctx.files(): |
|
280 | 278 | # This is to minimize I/O. |
|
281 | 279 | if f in seenfiles: |
|
282 | 280 | continue |
|
283 | 281 | seenfiles.add(f) |
|
284 | 282 | |
|
285 | 283 | i = 'data/%s.i' % f |
|
286 | 284 | d = 'data/%s.d' % f |
|
287 | 285 | |
|
288 | 286 | if repo.store._exists(i): |
|
289 | 287 | newentries.add(i) |
|
290 | 288 | if repo.store._exists(d): |
|
291 | 289 | newentries.add(d) |
|
292 | 290 | |
|
293 | 291 | ui.progress(_('rebuilding'), None) |
|
294 | 292 | |
|
295 | 293 | if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise |
|
296 | 294 | for dir in util.dirs(seenfiles): |
|
297 | 295 | i = 'meta/%s/00manifest.i' % dir |
|
298 | 296 | d = 'meta/%s/00manifest.d' % dir |
|
299 | 297 | |
|
300 | 298 | if repo.store._exists(i): |
|
301 | 299 | newentries.add(i) |
|
302 | 300 | if repo.store._exists(d): |
|
303 | 301 | newentries.add(d) |
|
304 | 302 | |
|
305 | 303 | addcount = len(newentries - oldentries) |
|
306 | 304 | removecount = len(oldentries - newentries) |
|
307 | 305 | for p in sorted(oldentries - newentries): |
|
308 | 306 | ui.write(_('removing %s\n') % p) |
|
309 | 307 | for p in sorted(newentries - oldentries): |
|
310 | 308 | ui.write(_('adding %s\n') % p) |
|
311 | 309 | |
|
312 | 310 | if addcount or removecount: |
|
313 | 311 | ui.write(_('%d items added, %d removed from fncache\n') % |
|
314 | 312 | (addcount, removecount)) |
|
315 | 313 | fnc.entries = newentries |
|
316 | 314 | fnc._dirty = True |
|
317 | 315 | |
|
318 | 316 | with repo.transaction('fncache') as tr: |
|
319 | 317 | fnc.write(tr) |
|
320 | 318 | else: |
|
321 | 319 | ui.write(_('fncache already up to date\n')) |
|
322 | 320 | |
|
323 | 321 | def stripbmrevset(repo, mark): |
|
324 | 322 | """ |
|
325 | 323 | The revset to strip when strip is called with -B mark |
|
326 | 324 | |
|
327 | 325 | Needs to live here so extensions can use it and wrap it even when strip is |
|
328 | 326 | not enabled or not present on a box. |
|
329 | 327 | """ |
|
330 | 328 | return repo.revs("ancestors(bookmark(%s)) - " |
|
331 | 329 | "ancestors(head() and not bookmark(%s)) - " |
|
332 | 330 | "ancestors(bookmark() and not bookmark(%s))", |
|
333 | 331 | mark, mark, mark) |
|
334 | 332 | |
|
335 | 333 | def deleteobsmarkers(obsstore, indices): |
|
336 | 334 | """Delete some obsmarkers from obsstore and return how many were deleted |
|
337 | 335 | |
|
338 | 336 | 'indices' is a list of ints which are the indices |
|
339 | 337 | of the markers to be deleted. |
|
340 | 338 | |
|
341 | 339 | Every invocation of this function completely rewrites the obsstore file, |
|
342 | 340 | skipping the markers we want to be removed. The new temporary file is |
|
343 | 341 | created, remaining markers are written there and on .close() this file |
|
344 | 342 | gets atomically renamed to obsstore, thus guaranteeing consistency.""" |
|
345 | 343 | if not indices: |
|
346 | 344 | # we don't want to rewrite the obsstore with the same content |
|
347 | 345 | return |
|
348 | 346 | |
|
349 | 347 | left = [] |
|
350 | 348 | current = obsstore._all |
|
351 | 349 | n = 0 |
|
352 | 350 | for i, m in enumerate(current): |
|
353 | 351 | if i in indices: |
|
354 | 352 | n += 1 |
|
355 | 353 | continue |
|
356 | 354 | left.append(m) |
|
357 | 355 | |
|
358 | 356 | newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True) |
|
359 | 357 | for bytes in obsolete.encodemarkers(left, True, obsstore._version): |
|
360 | 358 | newobsstorefile.write(bytes) |
|
361 | 359 | newobsstorefile.close() |
|
362 | 360 | return n |
General Comments 0
You need to be logged in to leave comments.
Login now