Show More
@@ -1,179 +1,162 b'' | |||||
1 | # repair.py - functions for repository repair for mercurial |
|
1 | # repair.py - functions for repository repair for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # Copyright 2007 Matt Mackall |
|
4 | # Copyright 2007 Matt Mackall | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | import changegroup, bookmarks |
|
9 | import changegroup, bookmarks | |
10 | from node import nullrev, short |
|
10 | from node import nullrev, short | |
11 | from i18n import _ |
|
11 | from i18n import _ | |
12 | import os |
|
12 | import os | |
13 |
|
13 | |||
14 |
def _bundle(repo, bases, heads, node, suffix, |
|
14 | def _bundle(repo, bases, heads, node, suffix, compress=True): | |
15 | """create a bundle with the specified revisions as a backup""" |
|
15 | """create a bundle with the specified revisions as a backup""" | |
16 |
cg = repo.changegroupsubset(bases, heads, 'strip' |
|
16 | cg = repo.changegroupsubset(bases, heads, 'strip') | |
17 | backupdir = repo.join("strip-backup") |
|
17 | backupdir = repo.join("strip-backup") | |
18 | if not os.path.isdir(backupdir): |
|
18 | if not os.path.isdir(backupdir): | |
19 | os.mkdir(backupdir) |
|
19 | os.mkdir(backupdir) | |
20 | name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix)) |
|
20 | name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix)) | |
21 | if compress: |
|
21 | if compress: | |
22 | bundletype = "HG10BZ" |
|
22 | bundletype = "HG10BZ" | |
23 | else: |
|
23 | else: | |
24 | bundletype = "HG10UN" |
|
24 | bundletype = "HG10UN" | |
25 | return changegroup.writebundle(cg, name, bundletype) |
|
25 | return changegroup.writebundle(cg, name, bundletype) | |
26 |
|
26 | |||
27 | def _collectfiles(repo, striprev): |
|
27 | def _collectfiles(repo, striprev): | |
28 | """find out the filelogs affected by the strip""" |
|
28 | """find out the filelogs affected by the strip""" | |
29 | files = set() |
|
29 | files = set() | |
30 |
|
30 | |||
31 | for x in xrange(striprev, len(repo)): |
|
31 | for x in xrange(striprev, len(repo)): | |
32 | files.update(repo[x].files()) |
|
32 | files.update(repo[x].files()) | |
33 |
|
33 | |||
34 | return sorted(files) |
|
34 | return sorted(files) | |
35 |
|
35 | |||
36 |
def _collect |
|
36 | def _collectbrokencsets(repo, files, striprev): | |
37 |
"""return the |
|
37 | """return the changesets which will be broken by the truncation""" | |
38 |
def collectone( |
|
38 | def collectone(revlog): | |
39 | extra = [] |
|
|||
40 | startrev = count = len(revlog) |
|
39 | startrev = count = len(revlog) | |
41 | # find the truncation point of the revlog |
|
40 | # find the truncation point of the revlog | |
42 | for i in xrange(count): |
|
41 | for i in xrange(count): | |
43 | lrev = revlog.linkrev(i) |
|
42 | lrev = revlog.linkrev(i) | |
44 |
if lrev >= |
|
43 | if lrev >= striprev: | |
45 | startrev = i + 1 |
|
44 | startrev = i + 1 | |
46 | break |
|
45 | break | |
47 |
|
46 | |||
48 |
# see if any revision after that point has a linkrev less than |
|
47 | # see if any revision after that point has a linkrev less than striprev | |
49 | # (we have to manually save these guys) |
|
48 | # (those will be broken by strip) | |
50 | for i in xrange(startrev, count): |
|
49 | for i in xrange(startrev, count): | |
51 | node = revlog.node(i) |
|
|||
52 | lrev = revlog.linkrev(i) |
|
50 | lrev = revlog.linkrev(i) | |
53 |
if lrev < |
|
51 | if lrev < striprev: | |
54 | extra.append((node, cl.node(lrev))) |
|
52 | yield lrev | |
55 |
|
53 | |||
56 | return extra |
|
54 | for rev in collectone(repo.manifest): | |
57 |
|
55 | yield rev | ||
58 | extranodes = {} |
|
|||
59 | cl = repo.changelog |
|
|||
60 | extra = collectone(cl, repo.manifest) |
|
|||
61 | if extra: |
|
|||
62 | extranodes[1] = extra |
|
|||
63 | for fname in files: |
|
56 | for fname in files: | |
64 | f = repo.file(fname) |
|
57 | f = repo.file(fname) | |
65 |
|
|
58 | for rev in collectone(f): | |
66 | if extra: |
|
59 | yield rev | |
67 | extranodes[fname] = extra |
|
|||
68 |
|
||||
69 | return extranodes |
|
|||
70 |
|
60 | |||
71 | def strip(ui, repo, node, backup="all"): |
|
61 | def strip(ui, repo, node, backup="all"): | |
72 | cl = repo.changelog |
|
62 | cl = repo.changelog | |
73 | # TODO delete the undo files, and handle undo of merge sets |
|
63 | # TODO delete the undo files, and handle undo of merge sets | |
74 | striprev = cl.rev(node) |
|
64 | striprev = cl.rev(node) | |
75 |
|
65 | |||
76 | keeppartialbundle = backup == 'strip' |
|
66 | keeppartialbundle = backup == 'strip' | |
77 |
|
67 | |||
78 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
68 | # Some revisions with rev > striprev may not be descendants of striprev. | |
79 | # We have to find these revisions and put them in a bundle, so that |
|
69 | # We have to find these revisions and put them in a bundle, so that | |
80 | # we can restore them after the truncations. |
|
70 | # we can restore them after the truncations. | |
81 | # To create the bundle we use repo.changegroupsubset which requires |
|
71 | # To create the bundle we use repo.changegroupsubset which requires | |
82 | # the list of heads and bases of the set of interesting revisions. |
|
72 | # the list of heads and bases of the set of interesting revisions. | |
83 | # (head = revision in the set that has no descendant in the set; |
|
73 | # (head = revision in the set that has no descendant in the set; | |
84 | # base = revision in the set that has no ancestor in the set) |
|
74 | # base = revision in the set that has no ancestor in the set) | |
85 |
tostrip = set((striprev |
|
75 | tostrip = set(cl.descendants(striprev)) | |
86 | saveheads = set() |
|
76 | tostrip.add(striprev) | |
87 | savebases = [] |
|
77 | ||
|
78 | files = _collectfiles(repo, striprev) | |||
|
79 | saverevs = set(_collectbrokencsets(repo, files, striprev)) | |||
|
80 | ||||
|
81 | # compute heads | |||
|
82 | saveheads = set(saverevs) | |||
88 | for r in xrange(striprev + 1, len(cl)): |
|
83 | for r in xrange(striprev + 1, len(cl)): | |
89 | parents = cl.parentrevs(r) |
|
84 | if r not in tostrip: | |
90 | if parents[0] in tostrip or parents[1] in tostrip: |
|
85 | saverevs.add(r) | |
91 | # r is a descendant of striprev |
|
86 | saveheads.difference_update(cl.parentrevs(r)) | |
92 |
|
|
87 | saveheads.add(r) | |
93 | # if this is a merge and one of the parents does not descend |
|
88 | saveheads = [cl.node(r) for r in saveheads] | |
94 | # from striprev, mark that parent as a savehead. |
|
|||
95 | if parents[1] != nullrev: |
|
|||
96 | for p in parents: |
|
|||
97 | if p not in tostrip and p > striprev: |
|
|||
98 | saveheads.add(p) |
|
|||
99 | else: |
|
|||
100 | # if no parents of this revision will be stripped, mark it as |
|
|||
101 | # a savebase |
|
|||
102 | if parents[0] < striprev and parents[1] < striprev: |
|
|||
103 | savebases.append(cl.node(r)) |
|
|||
104 |
|
89 | |||
105 | saveheads.difference_update(parents) |
|
90 | # compute base nodes | |
106 | saveheads.add(r) |
|
91 | if saverevs: | |
|
92 | descendants = set(cl.descendants(*saverevs)) | |||
|
93 | saverevs.difference_update(descendants) | |||
|
94 | savebases = [cl.node(r) for r in saverevs] | |||
107 |
|
95 | |||
108 | bm = repo._bookmarks |
|
96 | bm = repo._bookmarks | |
109 | updatebm = [] |
|
97 | updatebm = [] | |
110 | for m in bm: |
|
98 | for m in bm: | |
111 | rev = repo[bm[m]].rev() |
|
99 | rev = repo[bm[m]].rev() | |
112 | if rev in tostrip: |
|
100 | if rev in tostrip: | |
113 | updatebm.append(m) |
|
101 | updatebm.append(m) | |
114 |
|
102 | |||
115 | saveheads = [cl.node(r) for r in saveheads] |
|
|||
116 | files = _collectfiles(repo, striprev) |
|
|||
117 |
|
||||
118 | extranodes = _collectextranodes(repo, files, striprev) |
|
|||
119 |
|
||||
120 | # create a changegroup for all the branches we need to keep |
|
103 | # create a changegroup for all the branches we need to keep | |
121 | backupfile = None |
|
104 | backupfile = None | |
122 | if backup == "all": |
|
105 | if backup == "all": | |
123 | backupfile = _bundle(repo, [node], cl.heads(), node, 'backup') |
|
106 | backupfile = _bundle(repo, [node], cl.heads(), node, 'backup') | |
124 | repo.ui.status(_("saved backup bundle to %s\n") % backupfile) |
|
107 | repo.ui.status(_("saved backup bundle to %s\n") % backupfile) | |
125 |
if saveheads or |
|
108 | if saveheads or savebases: | |
126 | # do not compress partial bundle if we remove it from disk later |
|
109 | # do not compress partial bundle if we remove it from disk later | |
127 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
110 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', | |
128 |
|
|
111 | compress=keeppartialbundle) | |
129 |
|
112 | |||
130 | mfst = repo.manifest |
|
113 | mfst = repo.manifest | |
131 |
|
114 | |||
132 | tr = repo.transaction("strip") |
|
115 | tr = repo.transaction("strip") | |
133 | offset = len(tr.entries) |
|
116 | offset = len(tr.entries) | |
134 |
|
117 | |||
135 | try: |
|
118 | try: | |
136 | tr.startgroup() |
|
119 | tr.startgroup() | |
137 | cl.strip(striprev, tr) |
|
120 | cl.strip(striprev, tr) | |
138 | mfst.strip(striprev, tr) |
|
121 | mfst.strip(striprev, tr) | |
139 | for fn in files: |
|
122 | for fn in files: | |
140 | repo.file(fn).strip(striprev, tr) |
|
123 | repo.file(fn).strip(striprev, tr) | |
141 | tr.endgroup() |
|
124 | tr.endgroup() | |
142 |
|
125 | |||
143 | try: |
|
126 | try: | |
144 | for i in xrange(offset, len(tr.entries)): |
|
127 | for i in xrange(offset, len(tr.entries)): | |
145 | file, troffset, ignore = tr.entries[i] |
|
128 | file, troffset, ignore = tr.entries[i] | |
146 | repo.sopener(file, 'a').truncate(troffset) |
|
129 | repo.sopener(file, 'a').truncate(troffset) | |
147 | tr.close() |
|
130 | tr.close() | |
148 | except: |
|
131 | except: | |
149 | tr.abort() |
|
132 | tr.abort() | |
150 | raise |
|
133 | raise | |
151 |
|
134 | |||
152 |
if saveheads or |
|
135 | if saveheads or savebases: | |
153 | ui.note(_("adding branch\n")) |
|
136 | ui.note(_("adding branch\n")) | |
154 | f = open(chgrpfile, "rb") |
|
137 | f = open(chgrpfile, "rb") | |
155 | gen = changegroup.readbundle(f, chgrpfile) |
|
138 | gen = changegroup.readbundle(f, chgrpfile) | |
156 | if not repo.ui.verbose: |
|
139 | if not repo.ui.verbose: | |
157 | # silence internal shuffling chatter |
|
140 | # silence internal shuffling chatter | |
158 | repo.ui.pushbuffer() |
|
141 | repo.ui.pushbuffer() | |
159 | repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) |
|
142 | repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) | |
160 | if not repo.ui.verbose: |
|
143 | if not repo.ui.verbose: | |
161 | repo.ui.popbuffer() |
|
144 | repo.ui.popbuffer() | |
162 | f.close() |
|
145 | f.close() | |
163 | if not keeppartialbundle: |
|
146 | if not keeppartialbundle: | |
164 | os.unlink(chgrpfile) |
|
147 | os.unlink(chgrpfile) | |
165 |
|
148 | |||
166 | for m in updatebm: |
|
149 | for m in updatebm: | |
167 | bm[m] = repo['.'].node() |
|
150 | bm[m] = repo['.'].node() | |
168 | bookmarks.write(repo) |
|
151 | bookmarks.write(repo) | |
169 |
|
152 | |||
170 | except: |
|
153 | except: | |
171 | if backupfile: |
|
154 | if backupfile: | |
172 | ui.warn(_("strip failed, full bundle stored in '%s'\n") |
|
155 | ui.warn(_("strip failed, full bundle stored in '%s'\n") | |
173 | % backupfile) |
|
156 | % backupfile) | |
174 | elif saveheads: |
|
157 | elif saveheads: | |
175 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") |
|
158 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") | |
176 | % chgrpfile) |
|
159 | % chgrpfile) | |
177 | raise |
|
160 | raise | |
178 |
|
161 | |||
179 | repo.destroyed() |
|
162 | repo.destroyed() |
General Comments 0
You need to be logged in to leave comments.
Login now