Show More
@@ -1,136 +1,135 b'' | |||||
1 | # repair.py - functions for repository repair for mercurial |
|
1 | # repair.py - functions for repository repair for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> |
|
3 | # Copyright 2005, 2006 Chris Mason <mason@suse.com> | |
4 | # Copyright 2007 Matt Mackall |
|
4 | # Copyright 2007 Matt Mackall | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | import changegroup, os |
|
9 | import changegroup, os | |
10 | from node import nullrev, short |
|
10 | from node import nullrev, short | |
11 |
|
11 | |||
12 | def _bundle(repo, bases, heads, node, suffix, extranodes=None): |
|
12 | def _bundle(repo, bases, heads, node, suffix, extranodes=None): | |
13 | """create a bundle with the specified revisions as a backup""" |
|
13 | """create a bundle with the specified revisions as a backup""" | |
14 | cg = repo.changegroupsubset(bases, heads, 'strip', extranodes) |
|
14 | cg = repo.changegroupsubset(bases, heads, 'strip', extranodes) | |
15 | backupdir = repo.join("strip-backup") |
|
15 | backupdir = repo.join("strip-backup") | |
16 | if not os.path.isdir(backupdir): |
|
16 | if not os.path.isdir(backupdir): | |
17 | os.mkdir(backupdir) |
|
17 | os.mkdir(backupdir) | |
18 | name = os.path.join(backupdir, "%s-%s" % (short(node), suffix)) |
|
18 | name = os.path.join(backupdir, "%s-%s" % (short(node), suffix)) | |
19 | repo.ui.warn("saving bundle to %s\n" % name) |
|
19 | repo.ui.warn("saving bundle to %s\n" % name) | |
20 | return changegroup.writebundle(cg, name, "HG10BZ") |
|
20 | return changegroup.writebundle(cg, name, "HG10BZ") | |
21 |
|
21 | |||
22 | def _collectfiles(repo, striprev): |
|
22 | def _collectfiles(repo, striprev): | |
23 | """find out the filelogs affected by the strip""" |
|
23 | """find out the filelogs affected by the strip""" | |
24 | files = {} |
|
24 | files = {} | |
25 |
|
25 | |||
26 | for x in xrange(striprev, repo.changelog.count()): |
|
26 | for x in xrange(striprev, repo.changelog.count()): | |
27 | for name in repo.changectx(x).files(): |
|
27 | for name in repo.changectx(x).files(): | |
28 | if name in files: |
|
28 | if name in files: | |
29 | continue |
|
29 | continue | |
30 | files[name] = 1 |
|
30 | files[name] = 1 | |
31 |
|
31 | |||
32 | files = files.keys() |
|
32 | files = files.keys() | |
33 | files.sort() |
|
33 | files.sort() | |
34 | return files |
|
34 | return files | |
35 |
|
35 | |||
36 | def _collectextranodes(repo, files, link): |
|
36 | def _collectextranodes(repo, files, link): | |
37 | """return the nodes that have to be saved before the strip""" |
|
37 | """return the nodes that have to be saved before the strip""" | |
38 | def collectone(revlog): |
|
38 | def collectone(revlog): | |
39 | extra = [] |
|
39 | extra = [] | |
40 | startrev = count = revlog.count() |
|
40 | startrev = count = revlog.count() | |
41 | # find the truncation point of the revlog |
|
41 | # find the truncation point of the revlog | |
42 | for i in xrange(0, count): |
|
42 | for i in xrange(0, count): | |
43 | node = revlog.node(i) |
|
43 | node = revlog.node(i) | |
44 | lrev = revlog.linkrev(node) |
|
44 | lrev = revlog.linkrev(node) | |
45 | if lrev >= link: |
|
45 | if lrev >= link: | |
46 | startrev = i + 1 |
|
46 | startrev = i + 1 | |
47 | break |
|
47 | break | |
48 |
|
48 | |||
49 | # see if any revision after that point has a linkrev less than link |
|
49 | # see if any revision after that point has a linkrev less than link | |
50 | # (we have to manually save these guys) |
|
50 | # (we have to manually save these guys) | |
51 | for i in xrange(startrev, count): |
|
51 | for i in xrange(startrev, count): | |
52 | node = revlog.node(i) |
|
52 | node = revlog.node(i) | |
53 | lrev = revlog.linkrev(node) |
|
53 | lrev = revlog.linkrev(node) | |
54 | if lrev < link: |
|
54 | if lrev < link: | |
55 | extra.append((node, cl.node(lrev))) |
|
55 | extra.append((node, cl.node(lrev))) | |
56 |
|
56 | |||
57 | return extra |
|
57 | return extra | |
58 |
|
58 | |||
59 | extranodes = {} |
|
59 | extranodes = {} | |
60 | cl = repo.changelog |
|
60 | cl = repo.changelog | |
61 | extra = collectone(repo.manifest) |
|
61 | extra = collectone(repo.manifest) | |
62 | if extra: |
|
62 | if extra: | |
63 | extranodes[1] = extra |
|
63 | extranodes[1] = extra | |
64 | for fname in files: |
|
64 | for fname in files: | |
65 | f = repo.file(fname) |
|
65 | f = repo.file(fname) | |
66 | extra = collectone(f) |
|
66 | extra = collectone(f) | |
67 | if extra: |
|
67 | if extra: | |
68 | extranodes[fname] = extra |
|
68 | extranodes[fname] = extra | |
69 |
|
69 | |||
70 | return extranodes |
|
70 | return extranodes | |
71 |
|
71 | |||
72 | def strip(ui, repo, node, backup="all"): |
|
72 | def strip(ui, repo, node, backup="all"): | |
73 | cl = repo.changelog |
|
73 | cl = repo.changelog | |
74 | # TODO delete the undo files, and handle undo of merge sets |
|
74 | # TODO delete the undo files, and handle undo of merge sets | |
75 | pp = cl.parents(node) |
|
|||
76 | striprev = cl.rev(node) |
|
75 | striprev = cl.rev(node) | |
77 |
|
76 | |||
78 | # Some revisions with rev > striprev may not be descendants of striprev. |
|
77 | # Some revisions with rev > striprev may not be descendants of striprev. | |
79 | # We have to find these revisions and put them in a bundle, so that |
|
78 | # We have to find these revisions and put them in a bundle, so that | |
80 | # we can restore them after the truncations. |
|
79 | # we can restore them after the truncations. | |
81 | # To create the bundle we use repo.changegroupsubset which requires |
|
80 | # To create the bundle we use repo.changegroupsubset which requires | |
82 | # the list of heads and bases of the set of interesting revisions. |
|
81 | # the list of heads and bases of the set of interesting revisions. | |
83 | # (head = revision in the set that has no descendant in the set; |
|
82 | # (head = revision in the set that has no descendant in the set; | |
84 | # base = revision in the set that has no ancestor in the set) |
|
83 | # base = revision in the set that has no ancestor in the set) | |
85 | tostrip = {striprev: 1} |
|
84 | tostrip = {striprev: 1} | |
86 | saveheads = {} |
|
85 | saveheads = {} | |
87 | savebases = [] |
|
86 | savebases = [] | |
88 | for r in xrange(striprev + 1, cl.count()): |
|
87 | for r in xrange(striprev + 1, cl.count()): | |
89 | parents = cl.parentrevs(r) |
|
88 | parents = cl.parentrevs(r) | |
90 | if parents[0] in tostrip or parents[1] in tostrip: |
|
89 | if parents[0] in tostrip or parents[1] in tostrip: | |
91 | # r is a descendant of striprev |
|
90 | # r is a descendant of striprev | |
92 | tostrip[r] = 1 |
|
91 | tostrip[r] = 1 | |
93 | # if this is a merge and one of the parents does not descend |
|
92 | # if this is a merge and one of the parents does not descend | |
94 | # from striprev, mark that parent as a savehead. |
|
93 | # from striprev, mark that parent as a savehead. | |
95 | if parents[1] != nullrev: |
|
94 | if parents[1] != nullrev: | |
96 | for p in parents: |
|
95 | for p in parents: | |
97 | if p not in tostrip and p > striprev: |
|
96 | if p not in tostrip and p > striprev: | |
98 | saveheads[p] = 1 |
|
97 | saveheads[p] = 1 | |
99 | else: |
|
98 | else: | |
100 | # if no parents of this revision will be stripped, mark it as |
|
99 | # if no parents of this revision will be stripped, mark it as | |
101 | # a savebase |
|
100 | # a savebase | |
102 | if parents[0] < striprev and parents[1] < striprev: |
|
101 | if parents[0] < striprev and parents[1] < striprev: | |
103 | savebases.append(cl.node(r)) |
|
102 | savebases.append(cl.node(r)) | |
104 |
|
103 | |||
105 | for p in parents: |
|
104 | for p in parents: | |
106 | if p in saveheads: |
|
105 | if p in saveheads: | |
107 | del saveheads[p] |
|
106 | del saveheads[p] | |
108 | saveheads[r] = 1 |
|
107 | saveheads[r] = 1 | |
109 |
|
108 | |||
110 | saveheads = [cl.node(r) for r in saveheads] |
|
109 | saveheads = [cl.node(r) for r in saveheads] | |
111 | files = _collectfiles(repo, striprev) |
|
110 | files = _collectfiles(repo, striprev) | |
112 |
|
111 | |||
113 | extranodes = _collectextranodes(repo, files, striprev) |
|
112 | extranodes = _collectextranodes(repo, files, striprev) | |
114 |
|
113 | |||
115 | # create a changegroup for all the branches we need to keep |
|
114 | # create a changegroup for all the branches we need to keep | |
116 | if backup == "all": |
|
115 | if backup == "all": | |
117 | _bundle(repo, [node], cl.heads(), node, 'backup') |
|
116 | _bundle(repo, [node], cl.heads(), node, 'backup') | |
118 | if saveheads or extranodes: |
|
117 | if saveheads or extranodes: | |
119 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
118 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', | |
120 | extranodes) |
|
119 | extranodes) | |
121 |
|
120 | |||
122 | cl.strip(striprev) |
|
121 | cl.strip(striprev) | |
123 | repo.manifest.strip(striprev) |
|
122 | repo.manifest.strip(striprev) | |
124 | for name in files: |
|
123 | for name in files: | |
125 | f = repo.file(name) |
|
124 | f = repo.file(name) | |
126 | f.strip(striprev) |
|
125 | f.strip(striprev) | |
127 |
|
126 | |||
128 | if saveheads or extranodes: |
|
127 | if saveheads or extranodes: | |
129 | ui.status("adding branch\n") |
|
128 | ui.status("adding branch\n") | |
130 | f = open(chgrpfile, "rb") |
|
129 | f = open(chgrpfile, "rb") | |
131 | gen = changegroup.readbundle(f, chgrpfile) |
|
130 | gen = changegroup.readbundle(f, chgrpfile) | |
132 | repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) |
|
131 | repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) | |
133 | f.close() |
|
132 | f.close() | |
134 | if backup != "strip": |
|
133 | if backup != "strip": | |
135 | os.unlink(chgrpfile) |
|
134 | os.unlink(chgrpfile) | |
136 |
|
135 |
General Comments 0
You need to be logged in to leave comments.
Login now