Show More
@@ -1,678 +1,678 | |||||
1 | # Patch transplanting extension for Mercurial |
|
1 | # Patch transplanting extension for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com> |
|
3 | # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''command to transplant changesets from another branch |
|
8 | '''command to transplant changesets from another branch | |
9 |
|
9 | |||
10 | This extension allows you to transplant patches from another branch. |
|
10 | This extension allows you to transplant patches from another branch. | |
11 |
|
11 | |||
12 | Transplanted patches are recorded in .hg/transplant/transplants, as a |
|
12 | Transplanted patches are recorded in .hg/transplant/transplants, as a | |
13 | map from a changeset hash to its hash in the source repository. |
|
13 | map from a changeset hash to its hash in the source repository. | |
14 | ''' |
|
14 | ''' | |
15 |
|
15 | |||
16 | from mercurial.i18n import _ |
|
16 | from mercurial.i18n import _ | |
17 | import os, tempfile |
|
17 | import os, tempfile | |
18 | from mercurial.node import short |
|
18 | from mercurial.node import short | |
19 | from mercurial import bundlerepo, hg, merge, match |
|
19 | from mercurial import bundlerepo, hg, merge, match | |
20 | from mercurial import patch, revlog, scmutil, util, error, cmdutil |
|
20 | from mercurial import patch, revlog, scmutil, util, error, cmdutil | |
21 | from mercurial import revset, templatekw |
|
21 | from mercurial import revset, templatekw | |
22 |
|
22 | |||
23 | class TransplantError(error.Abort): |
|
23 | class TransplantError(error.Abort): | |
24 | pass |
|
24 | pass | |
25 |
|
25 | |||
26 | cmdtable = {} |
|
26 | cmdtable = {} | |
27 | command = cmdutil.command(cmdtable) |
|
27 | command = cmdutil.command(cmdtable) | |
28 | testedwith = 'internal' |
|
28 | testedwith = 'internal' | |
29 |
|
29 | |||
30 | class transplantentry(object): |
|
30 | class transplantentry(object): | |
31 | def __init__(self, lnode, rnode): |
|
31 | def __init__(self, lnode, rnode): | |
32 | self.lnode = lnode |
|
32 | self.lnode = lnode | |
33 | self.rnode = rnode |
|
33 | self.rnode = rnode | |
34 |
|
34 | |||
35 | class transplants(object): |
|
35 | class transplants(object): | |
36 | def __init__(self, path=None, transplantfile=None, opener=None): |
|
36 | def __init__(self, path=None, transplantfile=None, opener=None): | |
37 | self.path = path |
|
37 | self.path = path | |
38 | self.transplantfile = transplantfile |
|
38 | self.transplantfile = transplantfile | |
39 | self.opener = opener |
|
39 | self.opener = opener | |
40 |
|
40 | |||
41 | if not opener: |
|
41 | if not opener: | |
42 | self.opener = scmutil.opener(self.path) |
|
42 | self.opener = scmutil.opener(self.path) | |
43 | self.transplants = {} |
|
43 | self.transplants = {} | |
44 | self.dirty = False |
|
44 | self.dirty = False | |
45 | self.read() |
|
45 | self.read() | |
46 |
|
46 | |||
47 | def read(self): |
|
47 | def read(self): | |
48 | abspath = os.path.join(self.path, self.transplantfile) |
|
48 | abspath = os.path.join(self.path, self.transplantfile) | |
49 | if self.transplantfile and os.path.exists(abspath): |
|
49 | if self.transplantfile and os.path.exists(abspath): | |
50 | for line in self.opener.read(self.transplantfile).splitlines(): |
|
50 | for line in self.opener.read(self.transplantfile).splitlines(): | |
51 | lnode, rnode = map(revlog.bin, line.split(':')) |
|
51 | lnode, rnode = map(revlog.bin, line.split(':')) | |
52 | list = self.transplants.setdefault(rnode, []) |
|
52 | list = self.transplants.setdefault(rnode, []) | |
53 | list.append(transplantentry(lnode, rnode)) |
|
53 | list.append(transplantentry(lnode, rnode)) | |
54 |
|
54 | |||
55 | def write(self): |
|
55 | def write(self): | |
56 | if self.dirty and self.transplantfile: |
|
56 | if self.dirty and self.transplantfile: | |
57 | if not os.path.isdir(self.path): |
|
57 | if not os.path.isdir(self.path): | |
58 | os.mkdir(self.path) |
|
58 | os.mkdir(self.path) | |
59 | fp = self.opener(self.transplantfile, 'w') |
|
59 | fp = self.opener(self.transplantfile, 'w') | |
60 | for list in self.transplants.itervalues(): |
|
60 | for list in self.transplants.itervalues(): | |
61 | for t in list: |
|
61 | for t in list: | |
62 | l, r = map(revlog.hex, (t.lnode, t.rnode)) |
|
62 | l, r = map(revlog.hex, (t.lnode, t.rnode)) | |
63 | fp.write(l + ':' + r + '\n') |
|
63 | fp.write(l + ':' + r + '\n') | |
64 | fp.close() |
|
64 | fp.close() | |
65 | self.dirty = False |
|
65 | self.dirty = False | |
66 |
|
66 | |||
67 | def get(self, rnode): |
|
67 | def get(self, rnode): | |
68 | return self.transplants.get(rnode) or [] |
|
68 | return self.transplants.get(rnode) or [] | |
69 |
|
69 | |||
70 | def set(self, lnode, rnode): |
|
70 | def set(self, lnode, rnode): | |
71 | list = self.transplants.setdefault(rnode, []) |
|
71 | list = self.transplants.setdefault(rnode, []) | |
72 | list.append(transplantentry(lnode, rnode)) |
|
72 | list.append(transplantentry(lnode, rnode)) | |
73 | self.dirty = True |
|
73 | self.dirty = True | |
74 |
|
74 | |||
75 | def remove(self, transplant): |
|
75 | def remove(self, transplant): | |
76 | list = self.transplants.get(transplant.rnode) |
|
76 | list = self.transplants.get(transplant.rnode) | |
77 | if list: |
|
77 | if list: | |
78 | del list[list.index(transplant)] |
|
78 | del list[list.index(transplant)] | |
79 | self.dirty = True |
|
79 | self.dirty = True | |
80 |
|
80 | |||
81 | class transplanter(object): |
|
81 | class transplanter(object): | |
82 | def __init__(self, ui, repo): |
|
82 | def __init__(self, ui, repo): | |
83 | self.ui = ui |
|
83 | self.ui = ui | |
84 | self.path = repo.join('transplant') |
|
84 | self.path = repo.join('transplant') | |
85 | self.opener = scmutil.opener(self.path) |
|
85 | self.opener = scmutil.opener(self.path) | |
86 | self.transplants = transplants(self.path, 'transplants', |
|
86 | self.transplants = transplants(self.path, 'transplants', | |
87 | opener=self.opener) |
|
87 | opener=self.opener) | |
88 | self.editor = None |
|
88 | self.editor = None | |
89 |
|
89 | |||
90 | def applied(self, repo, node, parent): |
|
90 | def applied(self, repo, node, parent): | |
91 | '''returns True if a node is already an ancestor of parent |
|
91 | '''returns True if a node is already an ancestor of parent | |
92 | or is parent or has already been transplanted''' |
|
92 | or is parent or has already been transplanted''' | |
93 | if hasnode(repo, parent): |
|
93 | if hasnode(repo, parent): | |
94 | parentrev = repo.changelog.rev(parent) |
|
94 | parentrev = repo.changelog.rev(parent) | |
95 | if hasnode(repo, node): |
|
95 | if hasnode(repo, node): | |
96 | rev = repo.changelog.rev(node) |
|
96 | rev = repo.changelog.rev(node) | |
97 | reachable = repo.changelog.ancestors([parentrev], rev, |
|
97 | reachable = repo.changelog.ancestors([parentrev], rev, | |
98 | inclusive=True) |
|
98 | inclusive=True) | |
99 | if rev in reachable: |
|
99 | if rev in reachable: | |
100 | return True |
|
100 | return True | |
101 | for t in self.transplants.get(node): |
|
101 | for t in self.transplants.get(node): | |
102 | # it might have been stripped |
|
102 | # it might have been stripped | |
103 | if not hasnode(repo, t.lnode): |
|
103 | if not hasnode(repo, t.lnode): | |
104 | self.transplants.remove(t) |
|
104 | self.transplants.remove(t) | |
105 | return False |
|
105 | return False | |
106 | lnoderev = repo.changelog.rev(t.lnode) |
|
106 | lnoderev = repo.changelog.rev(t.lnode) | |
107 | if lnoderev in repo.changelog.ancestors([parentrev], lnoderev, |
|
107 | if lnoderev in repo.changelog.ancestors([parentrev], lnoderev, | |
108 | inclusive=True): |
|
108 | inclusive=True): | |
109 | return True |
|
109 | return True | |
110 | return False |
|
110 | return False | |
111 |
|
111 | |||
112 | def apply(self, repo, source, revmap, merges, opts={}): |
|
112 | def apply(self, repo, source, revmap, merges, opts={}): | |
113 | '''apply the revisions in revmap one by one in revision order''' |
|
113 | '''apply the revisions in revmap one by one in revision order''' | |
114 | revs = sorted(revmap) |
|
114 | revs = sorted(revmap) | |
115 | p1, p2 = repo.dirstate.parents() |
|
115 | p1, p2 = repo.dirstate.parents() | |
116 | pulls = [] |
|
116 | pulls = [] | |
117 | diffopts = patch.diffopts(self.ui, opts) |
|
117 | diffopts = patch.diffopts(self.ui, opts) | |
118 | diffopts.git = True |
|
118 | diffopts.git = True | |
119 |
|
119 | |||
120 | lock = wlock = tr = None |
|
120 | lock = wlock = tr = None | |
121 | try: |
|
121 | try: | |
122 | wlock = repo.wlock() |
|
122 | wlock = repo.wlock() | |
123 | lock = repo.lock() |
|
123 | lock = repo.lock() | |
124 | tr = repo.transaction('transplant') |
|
124 | tr = repo.transaction('transplant') | |
125 | for rev in revs: |
|
125 | for rev in revs: | |
126 | node = revmap[rev] |
|
126 | node = revmap[rev] | |
127 | revstr = '%s:%s' % (rev, short(node)) |
|
127 | revstr = '%s:%s' % (rev, short(node)) | |
128 |
|
128 | |||
129 | if self.applied(repo, node, p1): |
|
129 | if self.applied(repo, node, p1): | |
130 | self.ui.warn(_('skipping already applied revision %s\n') % |
|
130 | self.ui.warn(_('skipping already applied revision %s\n') % | |
131 | revstr) |
|
131 | revstr) | |
132 | continue |
|
132 | continue | |
133 |
|
133 | |||
134 | parents = source.changelog.parents(node) |
|
134 | parents = source.changelog.parents(node) | |
135 | if not (opts.get('filter') or opts.get('log')): |
|
135 | if not (opts.get('filter') or opts.get('log')): | |
136 | # If the changeset parent is the same as the |
|
136 | # If the changeset parent is the same as the | |
137 | # wdir's parent, just pull it. |
|
137 | # wdir's parent, just pull it. | |
138 | if parents[0] == p1: |
|
138 | if parents[0] == p1: | |
139 | pulls.append(node) |
|
139 | pulls.append(node) | |
140 | p1 = node |
|
140 | p1 = node | |
141 | continue |
|
141 | continue | |
142 | if pulls: |
|
142 | if pulls: | |
143 | if source != repo: |
|
143 | if source != repo: | |
144 | repo.pull(source.peer(), heads=pulls) |
|
144 | repo.pull(source.peer(), heads=pulls) | |
145 | merge.update(repo, pulls[-1], False, False, None) |
|
145 | merge.update(repo, pulls[-1], False, False, None) | |
146 | p1, p2 = repo.dirstate.parents() |
|
146 | p1, p2 = repo.dirstate.parents() | |
147 | pulls = [] |
|
147 | pulls = [] | |
148 |
|
148 | |||
149 | domerge = False |
|
149 | domerge = False | |
150 | if node in merges: |
|
150 | if node in merges: | |
151 | # pulling all the merge revs at once would mean we |
|
151 | # pulling all the merge revs at once would mean we | |
152 | # couldn't transplant after the latest even if |
|
152 | # couldn't transplant after the latest even if | |
153 | # transplants before them fail. |
|
153 | # transplants before them fail. | |
154 | domerge = True |
|
154 | domerge = True | |
155 | if not hasnode(repo, node): |
|
155 | if not hasnode(repo, node): | |
156 | repo.pull(source, heads=[node]) |
|
156 | repo.pull(source, heads=[node]) | |
157 |
|
157 | |||
158 | skipmerge = False |
|
158 | skipmerge = False | |
159 | if parents[1] != revlog.nullid: |
|
159 | if parents[1] != revlog.nullid: | |
160 | if not opts.get('parent'): |
|
160 | if not opts.get('parent'): | |
161 | self.ui.note(_('skipping merge changeset %s:%s\n') |
|
161 | self.ui.note(_('skipping merge changeset %s:%s\n') | |
162 | % (rev, short(node))) |
|
162 | % (rev, short(node))) | |
163 | skipmerge = True |
|
163 | skipmerge = True | |
164 | else: |
|
164 | else: | |
165 | parent = source.lookup(opts['parent']) |
|
165 | parent = source.lookup(opts['parent']) | |
166 | if parent not in parents: |
|
166 | if parent not in parents: | |
167 | raise util.Abort(_('%s is not a parent of %s') % |
|
167 | raise util.Abort(_('%s is not a parent of %s') % | |
168 | (short(parent), short(node))) |
|
168 | (short(parent), short(node))) | |
169 | else: |
|
169 | else: | |
170 | parent = parents[0] |
|
170 | parent = parents[0] | |
171 |
|
171 | |||
172 | if skipmerge: |
|
172 | if skipmerge: | |
173 | patchfile = None |
|
173 | patchfile = None | |
174 | else: |
|
174 | else: | |
175 | fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-') |
|
175 | fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-') | |
176 | fp = os.fdopen(fd, 'w') |
|
176 | fp = os.fdopen(fd, 'w') | |
177 | gen = patch.diff(source, parent, node, opts=diffopts) |
|
177 | gen = patch.diff(source, parent, node, opts=diffopts) | |
178 | for chunk in gen: |
|
178 | for chunk in gen: | |
179 | fp.write(chunk) |
|
179 | fp.write(chunk) | |
180 | fp.close() |
|
180 | fp.close() | |
181 |
|
181 | |||
182 | del revmap[rev] |
|
182 | del revmap[rev] | |
183 | if patchfile or domerge: |
|
183 | if patchfile or domerge: | |
184 | try: |
|
184 | try: | |
185 | try: |
|
185 | try: | |
186 | n = self.applyone(repo, node, |
|
186 | n = self.applyone(repo, node, | |
187 | source.changelog.read(node), |
|
187 | source.changelog.read(node), | |
188 | patchfile, merge=domerge, |
|
188 | patchfile, merge=domerge, | |
189 | log=opts.get('log'), |
|
189 | log=opts.get('log'), | |
190 | filter=opts.get('filter')) |
|
190 | filter=opts.get('filter')) | |
191 | except TransplantError: |
|
191 | except TransplantError: | |
192 | # Do not rollback, it is up to the user to |
|
192 | # Do not rollback, it is up to the user to | |
193 | # fix the merge or cancel everything |
|
193 | # fix the merge or cancel everything | |
194 | tr.close() |
|
194 | tr.close() | |
195 | raise |
|
195 | raise | |
196 | if n and domerge: |
|
196 | if n and domerge: | |
197 | self.ui.status(_('%s merged at %s\n') % (revstr, |
|
197 | self.ui.status(_('%s merged at %s\n') % (revstr, | |
198 | short(n))) |
|
198 | short(n))) | |
199 | elif n: |
|
199 | elif n: | |
200 | self.ui.status(_('%s transplanted to %s\n') |
|
200 | self.ui.status(_('%s transplanted to %s\n') | |
201 | % (short(node), |
|
201 | % (short(node), | |
202 | short(n))) |
|
202 | short(n))) | |
203 | finally: |
|
203 | finally: | |
204 | if patchfile: |
|
204 | if patchfile: | |
205 | os.unlink(patchfile) |
|
205 | os.unlink(patchfile) | |
206 | tr.close() |
|
206 | tr.close() | |
207 | if pulls: |
|
207 | if pulls: | |
208 | repo.pull(source.peer(), heads=pulls) |
|
208 | repo.pull(source.peer(), heads=pulls) | |
209 | merge.update(repo, pulls[-1], False, False, None) |
|
209 | merge.update(repo, pulls[-1], False, False, None) | |
210 | finally: |
|
210 | finally: | |
211 | self.saveseries(revmap, merges) |
|
211 | self.saveseries(revmap, merges) | |
212 | self.transplants.write() |
|
212 | self.transplants.write() | |
213 | if tr: |
|
213 | if tr: | |
214 | tr.release() |
|
214 | tr.release() | |
215 | lock.release() |
|
215 | lock.release() | |
216 | wlock.release() |
|
216 | wlock.release() | |
217 |
|
217 | |||
218 | def filter(self, filter, node, changelog, patchfile): |
|
218 | def filter(self, filter, node, changelog, patchfile): | |
219 | '''arbitrarily rewrite changeset before applying it''' |
|
219 | '''arbitrarily rewrite changeset before applying it''' | |
220 |
|
220 | |||
221 | self.ui.status(_('filtering %s\n') % patchfile) |
|
221 | self.ui.status(_('filtering %s\n') % patchfile) | |
222 | user, date, msg = (changelog[1], changelog[2], changelog[4]) |
|
222 | user, date, msg = (changelog[1], changelog[2], changelog[4]) | |
223 | fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') |
|
223 | fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') | |
224 | fp = os.fdopen(fd, 'w') |
|
224 | fp = os.fdopen(fd, 'w') | |
225 | fp.write("# HG changeset patch\n") |
|
225 | fp.write("# HG changeset patch\n") | |
226 | fp.write("# User %s\n" % user) |
|
226 | fp.write("# User %s\n" % user) | |
227 | fp.write("# Date %d %d\n" % date) |
|
227 | fp.write("# Date %d %d\n" % date) | |
228 | fp.write(msg + '\n') |
|
228 | fp.write(msg + '\n') | |
229 | fp.close() |
|
229 | fp.close() | |
230 |
|
230 | |||
231 | try: |
|
231 | try: | |
232 | util.system('%s %s %s' % (filter, util.shellquote(headerfile), |
|
232 | util.system('%s %s %s' % (filter, util.shellquote(headerfile), | |
233 | util.shellquote(patchfile)), |
|
233 | util.shellquote(patchfile)), | |
234 | environ={'HGUSER': changelog[1], |
|
234 | environ={'HGUSER': changelog[1], | |
235 | 'HGREVISION': revlog.hex(node), |
|
235 | 'HGREVISION': revlog.hex(node), | |
236 | }, |
|
236 | }, | |
237 | onerr=util.Abort, errprefix=_('filter failed'), |
|
237 | onerr=util.Abort, errprefix=_('filter failed'), | |
238 | out=self.ui.fout) |
|
238 | out=self.ui.fout) | |
239 | user, date, msg = self.parselog(file(headerfile))[1:4] |
|
239 | user, date, msg = self.parselog(file(headerfile))[1:4] | |
240 | finally: |
|
240 | finally: | |
241 | os.unlink(headerfile) |
|
241 | os.unlink(headerfile) | |
242 |
|
242 | |||
243 | return (user, date, msg) |
|
243 | return (user, date, msg) | |
244 |
|
244 | |||
245 | def applyone(self, repo, node, cl, patchfile, merge=False, log=False, |
|
245 | def applyone(self, repo, node, cl, patchfile, merge=False, log=False, | |
246 | filter=None): |
|
246 | filter=None): | |
247 | '''apply the patch in patchfile to the repository as a transplant''' |
|
247 | '''apply the patch in patchfile to the repository as a transplant''' | |
248 | (manifest, user, (time, timezone), files, message) = cl[:5] |
|
248 | (manifest, user, (time, timezone), files, message) = cl[:5] | |
249 | date = "%d %d" % (time, timezone) |
|
249 | date = "%d %d" % (time, timezone) | |
250 | extra = {'transplant_source': node} |
|
250 | extra = {'transplant_source': node} | |
251 | if filter: |
|
251 | if filter: | |
252 | (user, date, message) = self.filter(filter, node, cl, patchfile) |
|
252 | (user, date, message) = self.filter(filter, node, cl, patchfile) | |
253 |
|
253 | |||
254 | if log: |
|
254 | if log: | |
255 | # we don't translate messages inserted into commits |
|
255 | # we don't translate messages inserted into commits | |
256 | message += '\n(transplanted from %s)' % revlog.hex(node) |
|
256 | message += '\n(transplanted from %s)' % revlog.hex(node) | |
257 |
|
257 | |||
258 | self.ui.status(_('applying %s\n') % short(node)) |
|
258 | self.ui.status(_('applying %s\n') % short(node)) | |
259 | self.ui.note('%s %s\n%s\n' % (user, date, message)) |
|
259 | self.ui.note('%s %s\n%s\n' % (user, date, message)) | |
260 |
|
260 | |||
261 | if not patchfile and not merge: |
|
261 | if not patchfile and not merge: | |
262 | raise util.Abort(_('can only omit patchfile if merging')) |
|
262 | raise util.Abort(_('can only omit patchfile if merging')) | |
263 | if patchfile: |
|
263 | if patchfile: | |
264 | try: |
|
264 | try: | |
265 | files = set() |
|
265 | files = set() | |
266 | patch.patch(self.ui, repo, patchfile, files=files, eolmode=None) |
|
266 | patch.patch(self.ui, repo, patchfile, files=files, eolmode=None) | |
267 | files = list(files) |
|
267 | files = list(files) | |
268 | except Exception, inst: |
|
268 | except Exception, inst: | |
269 | seriespath = os.path.join(self.path, 'series') |
|
269 | seriespath = os.path.join(self.path, 'series') | |
270 | if os.path.exists(seriespath): |
|
270 | if os.path.exists(seriespath): | |
271 | os.unlink(seriespath) |
|
271 | os.unlink(seriespath) | |
272 | p1 = repo.dirstate.p1() |
|
272 | p1 = repo.dirstate.p1() | |
273 | p2 = node |
|
273 | p2 = node | |
274 | self.log(user, date, message, p1, p2, merge=merge) |
|
274 | self.log(user, date, message, p1, p2, merge=merge) | |
275 | self.ui.write(str(inst) + '\n') |
|
275 | self.ui.write(str(inst) + '\n') | |
276 | raise TransplantError(_('fix up the merge and run ' |
|
276 | raise TransplantError(_('fix up the merge and run ' | |
277 | 'hg transplant --continue')) |
|
277 | 'hg transplant --continue')) | |
278 | else: |
|
278 | else: | |
279 | files = None |
|
279 | files = None | |
280 | if merge: |
|
280 | if merge: | |
281 | p1, p2 = repo.dirstate.parents() |
|
281 | p1, p2 = repo.dirstate.parents() | |
282 | repo.setparents(p1, node) |
|
282 | repo.setparents(p1, node) | |
283 | m = match.always(repo.root, '') |
|
283 | m = match.always(repo.root, '') | |
284 | else: |
|
284 | else: | |
285 | m = match.exact(repo.root, '', files) |
|
285 | m = match.exact(repo.root, '', files) | |
286 |
|
286 | |||
287 | n = repo.commit(message, user, date, extra=extra, match=m, |
|
287 | n = repo.commit(message, user, date, extra=extra, match=m, | |
288 | editor=self.editor) |
|
288 | editor=self.editor) | |
289 | if not n: |
|
289 | if not n: | |
290 | self.ui.warn(_('skipping emptied changeset %s\n') % short(node)) |
|
290 | self.ui.warn(_('skipping emptied changeset %s\n') % short(node)) | |
291 | return None |
|
291 | return None | |
292 | if not merge: |
|
292 | if not merge: | |
293 | self.transplants.set(n, node) |
|
293 | self.transplants.set(n, node) | |
294 |
|
294 | |||
295 | return n |
|
295 | return n | |
296 |
|
296 | |||
297 |
def resume(self, repo, source, opts |
|
297 | def resume(self, repo, source, opts): | |
298 | '''recover last transaction and apply remaining changesets''' |
|
298 | '''recover last transaction and apply remaining changesets''' | |
299 | if os.path.exists(os.path.join(self.path, 'journal')): |
|
299 | if os.path.exists(os.path.join(self.path, 'journal')): | |
300 | n, node = self.recover(repo) |
|
300 | n, node = self.recover(repo, opts) | |
301 | self.ui.status(_('%s transplanted as %s\n') % (short(node), |
|
301 | self.ui.status(_('%s transplanted as %s\n') % (short(node), | |
302 | short(n))) |
|
302 | short(n))) | |
303 | seriespath = os.path.join(self.path, 'series') |
|
303 | seriespath = os.path.join(self.path, 'series') | |
304 | if not os.path.exists(seriespath): |
|
304 | if not os.path.exists(seriespath): | |
305 | self.transplants.write() |
|
305 | self.transplants.write() | |
306 | return |
|
306 | return | |
307 | nodes, merges = self.readseries() |
|
307 | nodes, merges = self.readseries() | |
308 | revmap = {} |
|
308 | revmap = {} | |
309 | for n in nodes: |
|
309 | for n in nodes: | |
310 | revmap[source.changelog.rev(n)] = n |
|
310 | revmap[source.changelog.rev(n)] = n | |
311 | os.unlink(seriespath) |
|
311 | os.unlink(seriespath) | |
312 |
|
312 | |||
313 | self.apply(repo, source, revmap, merges, opts) |
|
313 | self.apply(repo, source, revmap, merges, opts) | |
314 |
|
314 | |||
315 | def recover(self, repo): |
|
315 | def recover(self, repo, opts): | |
316 | '''commit working directory using journal metadata''' |
|
316 | '''commit working directory using journal metadata''' | |
317 | node, user, date, message, parents = self.readlog() |
|
317 | node, user, date, message, parents = self.readlog() | |
318 | merge = False |
|
318 | merge = False | |
319 |
|
319 | |||
320 | if not user or not date or not message or not parents[0]: |
|
320 | if not user or not date or not message or not parents[0]: | |
321 | raise util.Abort(_('transplant log file is corrupt')) |
|
321 | raise util.Abort(_('transplant log file is corrupt')) | |
322 |
|
322 | |||
323 | parent = parents[0] |
|
323 | parent = parents[0] | |
324 | if len(parents) > 1: |
|
324 | if len(parents) > 1: | |
325 | if opts.get('parent'): |
|
325 | if opts.get('parent'): | |
326 | parent = source.lookup(opts['parent']) |
|
326 | parent = source.lookup(opts['parent']) | |
327 | if parent not in parents: |
|
327 | if parent not in parents: | |
328 | raise util.Abort(_('%s is not a parent of %s') % |
|
328 | raise util.Abort(_('%s is not a parent of %s') % | |
329 | (short(parent), short(node))) |
|
329 | (short(parent), short(node))) | |
330 | else: |
|
330 | else: | |
331 | merge = True |
|
331 | merge = True | |
332 |
|
332 | |||
333 | extra = {'transplant_source': node} |
|
333 | extra = {'transplant_source': node} | |
334 | wlock = repo.wlock() |
|
334 | wlock = repo.wlock() | |
335 | try: |
|
335 | try: | |
336 | p1, p2 = repo.dirstate.parents() |
|
336 | p1, p2 = repo.dirstate.parents() | |
337 | if p1 != parent: |
|
337 | if p1 != parent: | |
338 | raise util.Abort( |
|
338 | raise util.Abort( | |
339 | _('working dir not at transplant parent %s') % |
|
339 | _('working dir not at transplant parent %s') % | |
340 | revlog.hex(parent)) |
|
340 | revlog.hex(parent)) | |
341 | if merge: |
|
341 | if merge: | |
342 | repo.setparents(p1, parents[1]) |
|
342 | repo.setparents(p1, parents[1]) | |
343 | n = repo.commit(message, user, date, extra=extra, |
|
343 | n = repo.commit(message, user, date, extra=extra, | |
344 | editor=self.editor) |
|
344 | editor=self.editor) | |
345 | if not n: |
|
345 | if not n: | |
346 | raise util.Abort(_('commit failed')) |
|
346 | raise util.Abort(_('commit failed')) | |
347 | if not merge: |
|
347 | if not merge: | |
348 | self.transplants.set(n, node) |
|
348 | self.transplants.set(n, node) | |
349 | self.unlog() |
|
349 | self.unlog() | |
350 |
|
350 | |||
351 | return n, node |
|
351 | return n, node | |
352 | finally: |
|
352 | finally: | |
353 | wlock.release() |
|
353 | wlock.release() | |
354 |
|
354 | |||
355 | def readseries(self): |
|
355 | def readseries(self): | |
356 | nodes = [] |
|
356 | nodes = [] | |
357 | merges = [] |
|
357 | merges = [] | |
358 | cur = nodes |
|
358 | cur = nodes | |
359 | for line in self.opener.read('series').splitlines(): |
|
359 | for line in self.opener.read('series').splitlines(): | |
360 | if line.startswith('# Merges'): |
|
360 | if line.startswith('# Merges'): | |
361 | cur = merges |
|
361 | cur = merges | |
362 | continue |
|
362 | continue | |
363 | cur.append(revlog.bin(line)) |
|
363 | cur.append(revlog.bin(line)) | |
364 |
|
364 | |||
365 | return (nodes, merges) |
|
365 | return (nodes, merges) | |
366 |
|
366 | |||
367 | def saveseries(self, revmap, merges): |
|
367 | def saveseries(self, revmap, merges): | |
368 | if not revmap: |
|
368 | if not revmap: | |
369 | return |
|
369 | return | |
370 |
|
370 | |||
371 | if not os.path.isdir(self.path): |
|
371 | if not os.path.isdir(self.path): | |
372 | os.mkdir(self.path) |
|
372 | os.mkdir(self.path) | |
373 | series = self.opener('series', 'w') |
|
373 | series = self.opener('series', 'w') | |
374 | for rev in sorted(revmap): |
|
374 | for rev in sorted(revmap): | |
375 | series.write(revlog.hex(revmap[rev]) + '\n') |
|
375 | series.write(revlog.hex(revmap[rev]) + '\n') | |
376 | if merges: |
|
376 | if merges: | |
377 | series.write('# Merges\n') |
|
377 | series.write('# Merges\n') | |
378 | for m in merges: |
|
378 | for m in merges: | |
379 | series.write(revlog.hex(m) + '\n') |
|
379 | series.write(revlog.hex(m) + '\n') | |
380 | series.close() |
|
380 | series.close() | |
381 |
|
381 | |||
382 | def parselog(self, fp): |
|
382 | def parselog(self, fp): | |
383 | parents = [] |
|
383 | parents = [] | |
384 | message = [] |
|
384 | message = [] | |
385 | node = revlog.nullid |
|
385 | node = revlog.nullid | |
386 | inmsg = False |
|
386 | inmsg = False | |
387 | user = None |
|
387 | user = None | |
388 | date = None |
|
388 | date = None | |
389 | for line in fp.read().splitlines(): |
|
389 | for line in fp.read().splitlines(): | |
390 | if inmsg: |
|
390 | if inmsg: | |
391 | message.append(line) |
|
391 | message.append(line) | |
392 | elif line.startswith('# User '): |
|
392 | elif line.startswith('# User '): | |
393 | user = line[7:] |
|
393 | user = line[7:] | |
394 | elif line.startswith('# Date '): |
|
394 | elif line.startswith('# Date '): | |
395 | date = line[7:] |
|
395 | date = line[7:] | |
396 | elif line.startswith('# Node ID '): |
|
396 | elif line.startswith('# Node ID '): | |
397 | node = revlog.bin(line[10:]) |
|
397 | node = revlog.bin(line[10:]) | |
398 | elif line.startswith('# Parent '): |
|
398 | elif line.startswith('# Parent '): | |
399 | parents.append(revlog.bin(line[9:])) |
|
399 | parents.append(revlog.bin(line[9:])) | |
400 | elif not line.startswith('# '): |
|
400 | elif not line.startswith('# '): | |
401 | inmsg = True |
|
401 | inmsg = True | |
402 | message.append(line) |
|
402 | message.append(line) | |
403 | if None in (user, date): |
|
403 | if None in (user, date): | |
404 | raise util.Abort(_("filter corrupted changeset (no user or date)")) |
|
404 | raise util.Abort(_("filter corrupted changeset (no user or date)")) | |
405 | return (node, user, date, '\n'.join(message), parents) |
|
405 | return (node, user, date, '\n'.join(message), parents) | |
406 |
|
406 | |||
407 | def log(self, user, date, message, p1, p2, merge=False): |
|
407 | def log(self, user, date, message, p1, p2, merge=False): | |
408 | '''journal changelog metadata for later recover''' |
|
408 | '''journal changelog metadata for later recover''' | |
409 |
|
409 | |||
410 | if not os.path.isdir(self.path): |
|
410 | if not os.path.isdir(self.path): | |
411 | os.mkdir(self.path) |
|
411 | os.mkdir(self.path) | |
412 | fp = self.opener('journal', 'w') |
|
412 | fp = self.opener('journal', 'w') | |
413 | fp.write('# User %s\n' % user) |
|
413 | fp.write('# User %s\n' % user) | |
414 | fp.write('# Date %s\n' % date) |
|
414 | fp.write('# Date %s\n' % date) | |
415 | fp.write('# Node ID %s\n' % revlog.hex(p2)) |
|
415 | fp.write('# Node ID %s\n' % revlog.hex(p2)) | |
416 | fp.write('# Parent ' + revlog.hex(p1) + '\n') |
|
416 | fp.write('# Parent ' + revlog.hex(p1) + '\n') | |
417 | if merge: |
|
417 | if merge: | |
418 | fp.write('# Parent ' + revlog.hex(p2) + '\n') |
|
418 | fp.write('# Parent ' + revlog.hex(p2) + '\n') | |
419 | fp.write(message.rstrip() + '\n') |
|
419 | fp.write(message.rstrip() + '\n') | |
420 | fp.close() |
|
420 | fp.close() | |
421 |
|
421 | |||
422 | def readlog(self): |
|
422 | def readlog(self): | |
423 | return self.parselog(self.opener('journal')) |
|
423 | return self.parselog(self.opener('journal')) | |
424 |
|
424 | |||
425 | def unlog(self): |
|
425 | def unlog(self): | |
426 | '''remove changelog journal''' |
|
426 | '''remove changelog journal''' | |
427 | absdst = os.path.join(self.path, 'journal') |
|
427 | absdst = os.path.join(self.path, 'journal') | |
428 | if os.path.exists(absdst): |
|
428 | if os.path.exists(absdst): | |
429 | os.unlink(absdst) |
|
429 | os.unlink(absdst) | |
430 |
|
430 | |||
431 | def transplantfilter(self, repo, source, root): |
|
431 | def transplantfilter(self, repo, source, root): | |
432 | def matchfn(node): |
|
432 | def matchfn(node): | |
433 | if self.applied(repo, node, root): |
|
433 | if self.applied(repo, node, root): | |
434 | return False |
|
434 | return False | |
435 | if source.changelog.parents(node)[1] != revlog.nullid: |
|
435 | if source.changelog.parents(node)[1] != revlog.nullid: | |
436 | return False |
|
436 | return False | |
437 | extra = source.changelog.read(node)[5] |
|
437 | extra = source.changelog.read(node)[5] | |
438 | cnode = extra.get('transplant_source') |
|
438 | cnode = extra.get('transplant_source') | |
439 | if cnode and self.applied(repo, cnode, root): |
|
439 | if cnode and self.applied(repo, cnode, root): | |
440 | return False |
|
440 | return False | |
441 | return True |
|
441 | return True | |
442 |
|
442 | |||
443 | return matchfn |
|
443 | return matchfn | |
444 |
|
444 | |||
445 | def hasnode(repo, node): |
|
445 | def hasnode(repo, node): | |
446 | try: |
|
446 | try: | |
447 | return repo.changelog.rev(node) is not None |
|
447 | return repo.changelog.rev(node) is not None | |
448 | except error.RevlogError: |
|
448 | except error.RevlogError: | |
449 | return False |
|
449 | return False | |
450 |
|
450 | |||
451 | def browserevs(ui, repo, nodes, opts): |
|
451 | def browserevs(ui, repo, nodes, opts): | |
452 | '''interactively transplant changesets''' |
|
452 | '''interactively transplant changesets''' | |
453 | def browsehelp(ui): |
|
453 | def browsehelp(ui): | |
454 | ui.write(_('y: transplant this changeset\n' |
|
454 | ui.write(_('y: transplant this changeset\n' | |
455 | 'n: skip this changeset\n' |
|
455 | 'n: skip this changeset\n' | |
456 | 'm: merge at this changeset\n' |
|
456 | 'm: merge at this changeset\n' | |
457 | 'p: show patch\n' |
|
457 | 'p: show patch\n' | |
458 | 'c: commit selected changesets\n' |
|
458 | 'c: commit selected changesets\n' | |
459 | 'q: cancel transplant\n' |
|
459 | 'q: cancel transplant\n' | |
460 | '?: show this help\n')) |
|
460 | '?: show this help\n')) | |
461 |
|
461 | |||
462 | displayer = cmdutil.show_changeset(ui, repo, opts) |
|
462 | displayer = cmdutil.show_changeset(ui, repo, opts) | |
463 | transplants = [] |
|
463 | transplants = [] | |
464 | merges = [] |
|
464 | merges = [] | |
465 | for node in nodes: |
|
465 | for node in nodes: | |
466 | displayer.show(repo[node]) |
|
466 | displayer.show(repo[node]) | |
467 | action = None |
|
467 | action = None | |
468 | while not action: |
|
468 | while not action: | |
469 | action = ui.prompt(_('apply changeset? [ynmpcq?]:')) |
|
469 | action = ui.prompt(_('apply changeset? [ynmpcq?]:')) | |
470 | if action == '?': |
|
470 | if action == '?': | |
471 | browsehelp(ui) |
|
471 | browsehelp(ui) | |
472 | action = None |
|
472 | action = None | |
473 | elif action == 'p': |
|
473 | elif action == 'p': | |
474 | parent = repo.changelog.parents(node)[0] |
|
474 | parent = repo.changelog.parents(node)[0] | |
475 | for chunk in patch.diff(repo, parent, node): |
|
475 | for chunk in patch.diff(repo, parent, node): | |
476 | ui.write(chunk) |
|
476 | ui.write(chunk) | |
477 | action = None |
|
477 | action = None | |
478 | elif action not in ('y', 'n', 'm', 'c', 'q'): |
|
478 | elif action not in ('y', 'n', 'm', 'c', 'q'): | |
479 | ui.write(_('no such option\n')) |
|
479 | ui.write(_('no such option\n')) | |
480 | action = None |
|
480 | action = None | |
481 | if action == 'y': |
|
481 | if action == 'y': | |
482 | transplants.append(node) |
|
482 | transplants.append(node) | |
483 | elif action == 'm': |
|
483 | elif action == 'm': | |
484 | merges.append(node) |
|
484 | merges.append(node) | |
485 | elif action == 'c': |
|
485 | elif action == 'c': | |
486 | break |
|
486 | break | |
487 | elif action == 'q': |
|
487 | elif action == 'q': | |
488 | transplants = () |
|
488 | transplants = () | |
489 | merges = () |
|
489 | merges = () | |
490 | break |
|
490 | break | |
491 | displayer.close() |
|
491 | displayer.close() | |
492 | return (transplants, merges) |
|
492 | return (transplants, merges) | |
493 |
|
493 | |||
494 | @command('transplant', |
|
494 | @command('transplant', | |
495 | [('s', 'source', '', _('pull patches from REPO'), _('REPO')), |
|
495 | [('s', 'source', '', _('pull patches from REPO'), _('REPO')), | |
496 | ('b', 'branch', [], |
|
496 | ('b', 'branch', [], | |
497 | _('pull patches from branch BRANCH'), _('BRANCH')), |
|
497 | _('pull patches from branch BRANCH'), _('BRANCH')), | |
498 | ('a', 'all', None, _('pull all changesets up to BRANCH')), |
|
498 | ('a', 'all', None, _('pull all changesets up to BRANCH')), | |
499 | ('p', 'prune', [], _('skip over REV'), _('REV')), |
|
499 | ('p', 'prune', [], _('skip over REV'), _('REV')), | |
500 | ('m', 'merge', [], _('merge at REV'), _('REV')), |
|
500 | ('m', 'merge', [], _('merge at REV'), _('REV')), | |
501 | ('', 'parent', '', |
|
501 | ('', 'parent', '', | |
502 | _('parent to choose when transplanting merge'), _('REV')), |
|
502 | _('parent to choose when transplanting merge'), _('REV')), | |
503 | ('e', 'edit', False, _('invoke editor on commit messages')), |
|
503 | ('e', 'edit', False, _('invoke editor on commit messages')), | |
504 | ('', 'log', None, _('append transplant info to log message')), |
|
504 | ('', 'log', None, _('append transplant info to log message')), | |
505 | ('c', 'continue', None, _('continue last transplant session ' |
|
505 | ('c', 'continue', None, _('continue last transplant session ' | |
506 | 'after repair')), |
|
506 | 'after repair')), | |
507 | ('', 'filter', '', |
|
507 | ('', 'filter', '', | |
508 | _('filter changesets through command'), _('CMD'))], |
|
508 | _('filter changesets through command'), _('CMD'))], | |
509 | _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] ' |
|
509 | _('hg transplant [-s REPO] [-b BRANCH [-a]] [-p REV] ' | |
510 | '[-m REV] [REV]...')) |
|
510 | '[-m REV] [REV]...')) | |
511 | def transplant(ui, repo, *revs, **opts): |
|
511 | def transplant(ui, repo, *revs, **opts): | |
512 | '''transplant changesets from another branch |
|
512 | '''transplant changesets from another branch | |
513 |
|
513 | |||
514 | Selected changesets will be applied on top of the current working |
|
514 | Selected changesets will be applied on top of the current working | |
515 | directory with the log of the original changeset. The changesets |
|
515 | directory with the log of the original changeset. The changesets | |
516 | are copied and will thus appear twice in the history. Use the |
|
516 | are copied and will thus appear twice in the history. Use the | |
517 | rebase extension instead if you want to move a whole branch of |
|
517 | rebase extension instead if you want to move a whole branch of | |
518 | unpublished changesets. |
|
518 | unpublished changesets. | |
519 |
|
519 | |||
520 | If --log is specified, log messages will have a comment appended |
|
520 | If --log is specified, log messages will have a comment appended | |
521 | of the form:: |
|
521 | of the form:: | |
522 |
|
522 | |||
523 | (transplanted from CHANGESETHASH) |
|
523 | (transplanted from CHANGESETHASH) | |
524 |
|
524 | |||
525 | You can rewrite the changelog message with the --filter option. |
|
525 | You can rewrite the changelog message with the --filter option. | |
526 | Its argument will be invoked with the current changelog message as |
|
526 | Its argument will be invoked with the current changelog message as | |
527 | $1 and the patch as $2. |
|
527 | $1 and the patch as $2. | |
528 |
|
528 | |||
529 | If --source/-s is specified, selects changesets from the named |
|
529 | If --source/-s is specified, selects changesets from the named | |
530 | repository. If --branch/-b is specified, selects changesets from |
|
530 | repository. If --branch/-b is specified, selects changesets from | |
531 | the branch holding the named revision, up to that revision. If |
|
531 | the branch holding the named revision, up to that revision. If | |
532 | --all/-a is specified, all changesets on the branch will be |
|
532 | --all/-a is specified, all changesets on the branch will be | |
533 | transplanted, otherwise you will be prompted to select the |
|
533 | transplanted, otherwise you will be prompted to select the | |
534 | changesets you want. |
|
534 | changesets you want. | |
535 |
|
535 | |||
536 | :hg:`transplant --branch REV --all` will transplant the |
|
536 | :hg:`transplant --branch REV --all` will transplant the | |
537 | selected branch (up to the named revision) onto your current |
|
537 | selected branch (up to the named revision) onto your current | |
538 | working directory. |
|
538 | working directory. | |
539 |
|
539 | |||
540 | You can optionally mark selected transplanted changesets as merge |
|
540 | You can optionally mark selected transplanted changesets as merge | |
541 | changesets. You will not be prompted to transplant any ancestors |
|
541 | changesets. You will not be prompted to transplant any ancestors | |
542 | of a merged transplant, and you can merge descendants of them |
|
542 | of a merged transplant, and you can merge descendants of them | |
543 | normally instead of transplanting them. |
|
543 | normally instead of transplanting them. | |
544 |
|
544 | |||
545 | Merge changesets may be transplanted directly by specifying the |
|
545 | Merge changesets may be transplanted directly by specifying the | |
546 | proper parent changeset by calling :hg:`transplant --parent`. |
|
546 | proper parent changeset by calling :hg:`transplant --parent`. | |
547 |
|
547 | |||
548 | If no merges or revisions are provided, :hg:`transplant` will |
|
548 | If no merges or revisions are provided, :hg:`transplant` will | |
549 | start an interactive changeset browser. |
|
549 | start an interactive changeset browser. | |
550 |
|
550 | |||
551 | If a changeset application fails, you can fix the merge by hand |
|
551 | If a changeset application fails, you can fix the merge by hand | |
552 | and then resume where you left off by calling :hg:`transplant |
|
552 | and then resume where you left off by calling :hg:`transplant | |
553 | --continue/-c`. |
|
553 | --continue/-c`. | |
554 | ''' |
|
554 | ''' | |
555 | def incwalk(repo, csets, match=util.always): |
|
555 | def incwalk(repo, csets, match=util.always): | |
556 | for node in csets: |
|
556 | for node in csets: | |
557 | if match(node): |
|
557 | if match(node): | |
558 | yield node |
|
558 | yield node | |
559 |
|
559 | |||
560 | def transplantwalk(repo, root, branches, match=util.always): |
|
560 | def transplantwalk(repo, root, branches, match=util.always): | |
561 | if not branches: |
|
561 | if not branches: | |
562 | branches = repo.heads() |
|
562 | branches = repo.heads() | |
563 | ancestors = [] |
|
563 | ancestors = [] | |
564 | for branch in branches: |
|
564 | for branch in branches: | |
565 | ancestors.append(repo.changelog.ancestor(root, branch)) |
|
565 | ancestors.append(repo.changelog.ancestor(root, branch)) | |
566 | for node in repo.changelog.nodesbetween(ancestors, branches)[0]: |
|
566 | for node in repo.changelog.nodesbetween(ancestors, branches)[0]: | |
567 | if match(node): |
|
567 | if match(node): | |
568 | yield node |
|
568 | yield node | |
569 |
|
569 | |||
570 | def checkopts(opts, revs): |
|
570 | def checkopts(opts, revs): | |
571 | if opts.get('continue'): |
|
571 | if opts.get('continue'): | |
572 | if opts.get('branch') or opts.get('all') or opts.get('merge'): |
|
572 | if opts.get('branch') or opts.get('all') or opts.get('merge'): | |
573 | raise util.Abort(_('--continue is incompatible with ' |
|
573 | raise util.Abort(_('--continue is incompatible with ' | |
574 | 'branch, all or merge')) |
|
574 | 'branch, all or merge')) | |
575 | return |
|
575 | return | |
576 | if not (opts.get('source') or revs or |
|
576 | if not (opts.get('source') or revs or | |
577 | opts.get('merge') or opts.get('branch')): |
|
577 | opts.get('merge') or opts.get('branch')): | |
578 | raise util.Abort(_('no source URL, branch tag or revision ' |
|
578 | raise util.Abort(_('no source URL, branch tag or revision ' | |
579 | 'list provided')) |
|
579 | 'list provided')) | |
580 | if opts.get('all'): |
|
580 | if opts.get('all'): | |
581 | if not opts.get('branch'): |
|
581 | if not opts.get('branch'): | |
582 | raise util.Abort(_('--all requires a branch revision')) |
|
582 | raise util.Abort(_('--all requires a branch revision')) | |
583 | if revs: |
|
583 | if revs: | |
584 | raise util.Abort(_('--all is incompatible with a ' |
|
584 | raise util.Abort(_('--all is incompatible with a ' | |
585 | 'revision list')) |
|
585 | 'revision list')) | |
586 |
|
586 | |||
587 | checkopts(opts, revs) |
|
587 | checkopts(opts, revs) | |
588 |
|
588 | |||
589 | if not opts.get('log'): |
|
589 | if not opts.get('log'): | |
590 | opts['log'] = ui.config('transplant', 'log') |
|
590 | opts['log'] = ui.config('transplant', 'log') | |
591 | if not opts.get('filter'): |
|
591 | if not opts.get('filter'): | |
592 | opts['filter'] = ui.config('transplant', 'filter') |
|
592 | opts['filter'] = ui.config('transplant', 'filter') | |
593 |
|
593 | |||
594 | tp = transplanter(ui, repo) |
|
594 | tp = transplanter(ui, repo) | |
595 | if opts.get('edit'): |
|
595 | if opts.get('edit'): | |
596 | tp.editor = cmdutil.commitforceeditor |
|
596 | tp.editor = cmdutil.commitforceeditor | |
597 |
|
597 | |||
598 | p1, p2 = repo.dirstate.parents() |
|
598 | p1, p2 = repo.dirstate.parents() | |
599 | if len(repo) > 0 and p1 == revlog.nullid: |
|
599 | if len(repo) > 0 and p1 == revlog.nullid: | |
600 | raise util.Abort(_('no revision checked out')) |
|
600 | raise util.Abort(_('no revision checked out')) | |
601 | if not opts.get('continue'): |
|
601 | if not opts.get('continue'): | |
602 | if p2 != revlog.nullid: |
|
602 | if p2 != revlog.nullid: | |
603 | raise util.Abort(_('outstanding uncommitted merges')) |
|
603 | raise util.Abort(_('outstanding uncommitted merges')) | |
604 | m, a, r, d = repo.status()[:4] |
|
604 | m, a, r, d = repo.status()[:4] | |
605 | if m or a or r or d: |
|
605 | if m or a or r or d: | |
606 | raise util.Abort(_('outstanding local changes')) |
|
606 | raise util.Abort(_('outstanding local changes')) | |
607 |
|
607 | |||
608 | sourcerepo = opts.get('source') |
|
608 | sourcerepo = opts.get('source') | |
609 | if sourcerepo: |
|
609 | if sourcerepo: | |
610 | peer = hg.peer(repo, opts, ui.expandpath(sourcerepo)) |
|
610 | peer = hg.peer(repo, opts, ui.expandpath(sourcerepo)) | |
611 | branches = map(peer.lookup, opts.get('branch', ())) |
|
611 | branches = map(peer.lookup, opts.get('branch', ())) | |
612 | source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer, |
|
612 | source, csets, cleanupfn = bundlerepo.getremotechanges(ui, repo, peer, | |
613 | onlyheads=branches, force=True) |
|
613 | onlyheads=branches, force=True) | |
614 | else: |
|
614 | else: | |
615 | source = repo |
|
615 | source = repo | |
616 | branches = map(source.lookup, opts.get('branch', ())) |
|
616 | branches = map(source.lookup, opts.get('branch', ())) | |
617 | cleanupfn = None |
|
617 | cleanupfn = None | |
618 |
|
618 | |||
619 | try: |
|
619 | try: | |
620 | if opts.get('continue'): |
|
620 | if opts.get('continue'): | |
621 | tp.resume(repo, source, opts) |
|
621 | tp.resume(repo, source, opts) | |
622 | return |
|
622 | return | |
623 |
|
623 | |||
624 | tf = tp.transplantfilter(repo, source, p1) |
|
624 | tf = tp.transplantfilter(repo, source, p1) | |
625 | if opts.get('prune'): |
|
625 | if opts.get('prune'): | |
626 | prune = [source.lookup(r) |
|
626 | prune = [source.lookup(r) | |
627 | for r in scmutil.revrange(source, opts.get('prune'))] |
|
627 | for r in scmutil.revrange(source, opts.get('prune'))] | |
628 | matchfn = lambda x: tf(x) and x not in prune |
|
628 | matchfn = lambda x: tf(x) and x not in prune | |
629 | else: |
|
629 | else: | |
630 | matchfn = tf |
|
630 | matchfn = tf | |
631 | merges = map(source.lookup, opts.get('merge', ())) |
|
631 | merges = map(source.lookup, opts.get('merge', ())) | |
632 | revmap = {} |
|
632 | revmap = {} | |
633 | if revs: |
|
633 | if revs: | |
634 | for r in scmutil.revrange(source, revs): |
|
634 | for r in scmutil.revrange(source, revs): | |
635 | revmap[int(r)] = source.lookup(r) |
|
635 | revmap[int(r)] = source.lookup(r) | |
636 | elif opts.get('all') or not merges: |
|
636 | elif opts.get('all') or not merges: | |
637 | if source != repo: |
|
637 | if source != repo: | |
638 | alltransplants = incwalk(source, csets, match=matchfn) |
|
638 | alltransplants = incwalk(source, csets, match=matchfn) | |
639 | else: |
|
639 | else: | |
640 | alltransplants = transplantwalk(source, p1, branches, |
|
640 | alltransplants = transplantwalk(source, p1, branches, | |
641 | match=matchfn) |
|
641 | match=matchfn) | |
642 | if opts.get('all'): |
|
642 | if opts.get('all'): | |
643 | revs = alltransplants |
|
643 | revs = alltransplants | |
644 | else: |
|
644 | else: | |
645 | revs, newmerges = browserevs(ui, source, alltransplants, opts) |
|
645 | revs, newmerges = browserevs(ui, source, alltransplants, opts) | |
646 | merges.extend(newmerges) |
|
646 | merges.extend(newmerges) | |
647 | for r in revs: |
|
647 | for r in revs: | |
648 | revmap[source.changelog.rev(r)] = r |
|
648 | revmap[source.changelog.rev(r)] = r | |
649 | for r in merges: |
|
649 | for r in merges: | |
650 | revmap[source.changelog.rev(r)] = r |
|
650 | revmap[source.changelog.rev(r)] = r | |
651 |
|
651 | |||
652 | tp.apply(repo, source, revmap, merges, opts) |
|
652 | tp.apply(repo, source, revmap, merges, opts) | |
653 | finally: |
|
653 | finally: | |
654 | if cleanupfn: |
|
654 | if cleanupfn: | |
655 | cleanupfn() |
|
655 | cleanupfn() | |
656 |
|
656 | |||
657 | def revsettransplanted(repo, subset, x): |
|
657 | def revsettransplanted(repo, subset, x): | |
658 | """``transplanted([set])`` |
|
658 | """``transplanted([set])`` | |
659 | Transplanted changesets in set, or all transplanted changesets. |
|
659 | Transplanted changesets in set, or all transplanted changesets. | |
660 | """ |
|
660 | """ | |
661 | if x: |
|
661 | if x: | |
662 | s = revset.getset(repo, subset, x) |
|
662 | s = revset.getset(repo, subset, x) | |
663 | else: |
|
663 | else: | |
664 | s = subset |
|
664 | s = subset | |
665 | return [r for r in s if repo[r].extra().get('transplant_source')] |
|
665 | return [r for r in s if repo[r].extra().get('transplant_source')] | |
666 |
|
666 | |||
667 | def kwtransplanted(repo, ctx, **args): |
|
667 | def kwtransplanted(repo, ctx, **args): | |
668 | """:transplanted: String. The node identifier of the transplanted |
|
668 | """:transplanted: String. The node identifier of the transplanted | |
669 | changeset if any.""" |
|
669 | changeset if any.""" | |
670 | n = ctx.extra().get('transplant_source') |
|
670 | n = ctx.extra().get('transplant_source') | |
671 | return n and revlog.hex(n) or '' |
|
671 | return n and revlog.hex(n) or '' | |
672 |
|
672 | |||
673 | def extsetup(ui): |
|
673 | def extsetup(ui): | |
674 | revset.symbols['transplanted'] = revsettransplanted |
|
674 | revset.symbols['transplanted'] = revsettransplanted | |
675 | templatekw.keywords['transplanted'] = kwtransplanted |
|
675 | templatekw.keywords['transplanted'] = kwtransplanted | |
676 |
|
676 | |||
677 | # tell hggettext to extract docstrings from these functions: |
|
677 | # tell hggettext to extract docstrings from these functions: | |
678 | i18nfunctions = [revsettransplanted, kwtransplanted] |
|
678 | i18nfunctions = [revsettransplanted, kwtransplanted] |
@@ -1,219 +1,218 | |||||
1 | # repoview.py - Filtered view of a localrepo object |
|
1 | # repoview.py - Filtered view of a localrepo object | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | import copy |
|
9 | import copy | |
10 | import phases |
|
10 | import phases | |
11 | import util |
|
11 | import util | |
12 | import obsolete, revset |
|
12 | import obsolete, revset | |
13 |
|
13 | |||
14 |
|
14 | |||
15 | def hideablerevs(repo): |
|
15 | def hideablerevs(repo): | |
16 | """Revisions candidates to be hidden |
|
16 | """Revisions candidates to be hidden | |
17 |
|
17 | |||
18 | This is a standalone function to help extensions to wrap it.""" |
|
18 | This is a standalone function to help extensions to wrap it.""" | |
19 | return obsolete.getrevs(repo, 'obsolete') |
|
19 | return obsolete.getrevs(repo, 'obsolete') | |
20 |
|
20 | |||
21 | def computehidden(repo): |
|
21 | def computehidden(repo): | |
22 | """compute the set of hidden revision to filter |
|
22 | """compute the set of hidden revision to filter | |
23 |
|
23 | |||
24 | During most operation hidden should be filtered.""" |
|
24 | During most operation hidden should be filtered.""" | |
25 | assert not repo.changelog.filteredrevs |
|
25 | assert not repo.changelog.filteredrevs | |
26 | hideable = hideablerevs(repo) |
|
26 | hideable = hideablerevs(repo) | |
27 | if hideable: |
|
27 | if hideable: | |
28 | cl = repo.changelog |
|
28 | cl = repo.changelog | |
29 | firsthideable = min(hideable) |
|
29 | firsthideable = min(hideable) | |
30 | revs = cl.revs(start=firsthideable) |
|
30 | revs = cl.revs(start=firsthideable) | |
31 | blockers = [r for r in revset._children(repo, revs, hideable) |
|
31 | blockers = [r for r in revset._children(repo, revs, hideable) | |
32 | if r not in hideable] |
|
32 | if r not in hideable] | |
33 | for par in repo[None].parents(): |
|
33 | for par in repo[None].parents(): | |
34 | blockers.append(par.rev()) |
|
34 | blockers.append(par.rev()) | |
35 | for bm in repo._bookmarks.values(): |
|
35 | for bm in repo._bookmarks.values(): | |
36 | blockers.append(repo[bm].rev()) |
|
36 | blockers.append(repo[bm].rev()) | |
37 | blocked = cl.ancestors(blockers, inclusive=True) |
|
37 | blocked = cl.ancestors(blockers, inclusive=True) | |
38 | return frozenset(r for r in hideable if r not in blocked) |
|
38 | return frozenset(r for r in hideable if r not in blocked) | |
39 | return frozenset() |
|
39 | return frozenset() | |
40 |
|
40 | |||
41 | def computeunserved(repo): |
|
41 | def computeunserved(repo): | |
42 | """compute the set of revision that should be filtered when used a server |
|
42 | """compute the set of revision that should be filtered when used a server | |
43 |
|
43 | |||
44 | Secret and hidden changeset should not pretend to be here.""" |
|
44 | Secret and hidden changeset should not pretend to be here.""" | |
45 | assert not repo.changelog.filteredrevs |
|
45 | assert not repo.changelog.filteredrevs | |
46 | # fast path in simple case to avoid impact of non optimised code |
|
46 | # fast path in simple case to avoid impact of non optimised code | |
47 | hiddens = filterrevs(repo, 'visible') |
|
47 | hiddens = filterrevs(repo, 'visible') | |
48 | if phases.hassecret(repo): |
|
48 | if phases.hassecret(repo): | |
49 | cl = repo.changelog |
|
49 | cl = repo.changelog | |
50 | secret = phases.secret |
|
50 | secret = phases.secret | |
51 | getphase = repo._phasecache.phase |
|
51 | getphase = repo._phasecache.phase | |
52 | first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) |
|
52 | first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) | |
53 | revs = cl.revs(start=first) |
|
53 | revs = cl.revs(start=first) | |
54 | secrets = set(r for r in revs if getphase(repo, r) >= secret) |
|
54 | secrets = set(r for r in revs if getphase(repo, r) >= secret) | |
55 | return frozenset(hiddens | secrets) |
|
55 | return frozenset(hiddens | secrets) | |
56 | else: |
|
56 | else: | |
57 | return hiddens |
|
57 | return hiddens | |
58 | return frozenset() |
|
|||
59 |
|
58 | |||
60 | def computemutable(repo): |
|
59 | def computemutable(repo): | |
61 | """compute the set of revision that should be filtered when used a server |
|
60 | """compute the set of revision that should be filtered when used a server | |
62 |
|
61 | |||
63 | Secret and hidden changeset should not pretend to be here.""" |
|
62 | Secret and hidden changeset should not pretend to be here.""" | |
64 | assert not repo.changelog.filteredrevs |
|
63 | assert not repo.changelog.filteredrevs | |
65 | # fast check to avoid revset call on huge repo |
|
64 | # fast check to avoid revset call on huge repo | |
66 | if util.any(repo._phasecache.phaseroots[1:]): |
|
65 | if util.any(repo._phasecache.phaseroots[1:]): | |
67 | getphase = repo._phasecache.phase |
|
66 | getphase = repo._phasecache.phase | |
68 | maymutable = filterrevs(repo, 'base') |
|
67 | maymutable = filterrevs(repo, 'base') | |
69 | return frozenset(r for r in maymutable if getphase(repo, r)) |
|
68 | return frozenset(r for r in maymutable if getphase(repo, r)) | |
70 | return frozenset() |
|
69 | return frozenset() | |
71 |
|
70 | |||
72 | def computeimpactable(repo): |
|
71 | def computeimpactable(repo): | |
73 | """Everything impactable by mutable revision |
|
72 | """Everything impactable by mutable revision | |
74 |
|
73 | |||
75 | The immutable filter still have some chance to get invalidated. This will |
|
74 | The immutable filter still have some chance to get invalidated. This will | |
76 | happen when: |
|
75 | happen when: | |
77 |
|
76 | |||
78 | - you garbage collect hidden changeset, |
|
77 | - you garbage collect hidden changeset, | |
79 | - public phase is moved backward, |
|
78 | - public phase is moved backward, | |
80 | - something is changed in the filtering (this could be fixed) |
|
79 | - something is changed in the filtering (this could be fixed) | |
81 |
|
80 | |||
82 | This filter out any mutable changeset and any public changeset that may be |
|
81 | This filter out any mutable changeset and any public changeset that may be | |
83 | impacted by something happening to a mutable revision. |
|
82 | impacted by something happening to a mutable revision. | |
84 |
|
83 | |||
85 | This is achieved by filtered everything with a revision number egal or |
|
84 | This is achieved by filtered everything with a revision number egal or | |
86 | higher than the first mutable changeset is filtered.""" |
|
85 | higher than the first mutable changeset is filtered.""" | |
87 | assert not repo.changelog.filteredrevs |
|
86 | assert not repo.changelog.filteredrevs | |
88 | cl = repo.changelog |
|
87 | cl = repo.changelog | |
89 | firstmutable = len(cl) |
|
88 | firstmutable = len(cl) | |
90 | for roots in repo._phasecache.phaseroots[1:]: |
|
89 | for roots in repo._phasecache.phaseroots[1:]: | |
91 | if roots: |
|
90 | if roots: | |
92 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) |
|
91 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) | |
93 | # protect from nullrev root |
|
92 | # protect from nullrev root | |
94 | firstmutable = max(0, firstmutable) |
|
93 | firstmutable = max(0, firstmutable) | |
95 | return frozenset(xrange(firstmutable, len(cl))) |
|
94 | return frozenset(xrange(firstmutable, len(cl))) | |
96 |
|
95 | |||
97 | # function to compute filtered set |
|
96 | # function to compute filtered set | |
98 | filtertable = {'visible': computehidden, |
|
97 | filtertable = {'visible': computehidden, | |
99 | 'served': computeunserved, |
|
98 | 'served': computeunserved, | |
100 | 'immutable': computemutable, |
|
99 | 'immutable': computemutable, | |
101 | 'base': computeimpactable} |
|
100 | 'base': computeimpactable} | |
102 | ### Nearest subset relation |
|
101 | ### Nearest subset relation | |
103 | # Nearest subset of filter X is a filter Y so that: |
|
102 | # Nearest subset of filter X is a filter Y so that: | |
104 | # * Y is included in X, |
|
103 | # * Y is included in X, | |
105 | # * X - Y is as small as possible. |
|
104 | # * X - Y is as small as possible. | |
106 | # This create and ordering used for branchmap purpose. |
|
105 | # This create and ordering used for branchmap purpose. | |
107 | # the ordering may be partial |
|
106 | # the ordering may be partial | |
108 | subsettable = {None: 'visible', |
|
107 | subsettable = {None: 'visible', | |
109 | 'visible': 'served', |
|
108 | 'visible': 'served', | |
110 | 'served': 'immutable', |
|
109 | 'served': 'immutable', | |
111 | 'immutable': 'base'} |
|
110 | 'immutable': 'base'} | |
112 |
|
111 | |||
113 | def filterrevs(repo, filtername): |
|
112 | def filterrevs(repo, filtername): | |
114 | """returns set of filtered revision for this filter name""" |
|
113 | """returns set of filtered revision for this filter name""" | |
115 | if filtername not in repo.filteredrevcache: |
|
114 | if filtername not in repo.filteredrevcache: | |
116 | func = filtertable[filtername] |
|
115 | func = filtertable[filtername] | |
117 | repo.filteredrevcache[filtername] = func(repo.unfiltered()) |
|
116 | repo.filteredrevcache[filtername] = func(repo.unfiltered()) | |
118 | return repo.filteredrevcache[filtername] |
|
117 | return repo.filteredrevcache[filtername] | |
119 |
|
118 | |||
120 | class repoview(object): |
|
119 | class repoview(object): | |
121 | """Provide a read/write view of a repo through a filtered changelog |
|
120 | """Provide a read/write view of a repo through a filtered changelog | |
122 |
|
121 | |||
123 | This object is used to access a filtered version of a repository without |
|
122 | This object is used to access a filtered version of a repository without | |
124 | altering the original repository object itself. We can not alter the |
|
123 | altering the original repository object itself. We can not alter the | |
125 | original object for two main reasons: |
|
124 | original object for two main reasons: | |
126 | - It prevents the use of a repo with multiple filters at the same time. In |
|
125 | - It prevents the use of a repo with multiple filters at the same time. In | |
127 | particular when multiple threads are involved. |
|
126 | particular when multiple threads are involved. | |
128 | - It makes scope of the filtering harder to control. |
|
127 | - It makes scope of the filtering harder to control. | |
129 |
|
128 | |||
130 | This object behaves very closely to the original repository. All attribute |
|
129 | This object behaves very closely to the original repository. All attribute | |
131 | operations are done on the original repository: |
|
130 | operations are done on the original repository: | |
132 | - An access to `repoview.someattr` actually returns `repo.someattr`, |
|
131 | - An access to `repoview.someattr` actually returns `repo.someattr`, | |
133 | - A write to `repoview.someattr` actually sets value of `repo.someattr`, |
|
132 | - A write to `repoview.someattr` actually sets value of `repo.someattr`, | |
134 | - A deletion of `repoview.someattr` actually drops `someattr` |
|
133 | - A deletion of `repoview.someattr` actually drops `someattr` | |
135 | from `repo.__dict__`. |
|
134 | from `repo.__dict__`. | |
136 |
|
135 | |||
137 | The only exception is the `changelog` property. It is overridden to return |
|
136 | The only exception is the `changelog` property. It is overridden to return | |
138 | a (surface) copy of `repo.changelog` with some revisions filtered. The |
|
137 | a (surface) copy of `repo.changelog` with some revisions filtered. The | |
139 | `filtername` attribute of the view control the revisions that need to be |
|
138 | `filtername` attribute of the view control the revisions that need to be | |
140 | filtered. (the fact the changelog is copied is an implementation detail). |
|
139 | filtered. (the fact the changelog is copied is an implementation detail). | |
141 |
|
140 | |||
142 | Unlike attributes, this object intercepts all method calls. This means that |
|
141 | Unlike attributes, this object intercepts all method calls. This means that | |
143 | all methods are run on the `repoview` object with the filtered `changelog` |
|
142 | all methods are run on the `repoview` object with the filtered `changelog` | |
144 | property. For this purpose the simple `repoview` class must be mixed with |
|
143 | property. For this purpose the simple `repoview` class must be mixed with | |
145 | the actual class of the repository. This ensures that the resulting |
|
144 | the actual class of the repository. This ensures that the resulting | |
146 | `repoview` object have the very same methods than the repo object. This |
|
145 | `repoview` object have the very same methods than the repo object. This | |
147 | leads to the property below. |
|
146 | leads to the property below. | |
148 |
|
147 | |||
149 | repoview.method() --> repo.__class__.method(repoview) |
|
148 | repoview.method() --> repo.__class__.method(repoview) | |
150 |
|
149 | |||
151 | The inheritance has to be done dynamically because `repo` can be of any |
|
150 | The inheritance has to be done dynamically because `repo` can be of any | |
152 | subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`. |
|
151 | subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`. | |
153 | """ |
|
152 | """ | |
154 |
|
153 | |||
155 | def __init__(self, repo, filtername): |
|
154 | def __init__(self, repo, filtername): | |
156 | object.__setattr__(self, '_unfilteredrepo', repo) |
|
155 | object.__setattr__(self, '_unfilteredrepo', repo) | |
157 | object.__setattr__(self, 'filtername', filtername) |
|
156 | object.__setattr__(self, 'filtername', filtername) | |
158 | object.__setattr__(self, '_clcachekey', None) |
|
157 | object.__setattr__(self, '_clcachekey', None) | |
159 | object.__setattr__(self, '_clcache', None) |
|
158 | object.__setattr__(self, '_clcache', None) | |
160 |
|
159 | |||
161 | # not a propertycache on purpose we shall implement a proper cache later |
|
160 | # not a propertycache on purpose we shall implement a proper cache later | |
162 | @property |
|
161 | @property | |
163 | def changelog(self): |
|
162 | def changelog(self): | |
164 | """return a filtered version of the changeset |
|
163 | """return a filtered version of the changeset | |
165 |
|
164 | |||
166 | this changelog must not be used for writing""" |
|
165 | this changelog must not be used for writing""" | |
167 | # some cache may be implemented later |
|
166 | # some cache may be implemented later | |
168 | unfi = self._unfilteredrepo |
|
167 | unfi = self._unfilteredrepo | |
169 | unfichangelog = unfi.changelog |
|
168 | unfichangelog = unfi.changelog | |
170 | revs = filterrevs(unfi, self.filtername) |
|
169 | revs = filterrevs(unfi, self.filtername) | |
171 | cl = self._clcache |
|
170 | cl = self._clcache | |
172 | newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs)) |
|
171 | newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs)) | |
173 | if cl is not None: |
|
172 | if cl is not None: | |
174 | # we need to check curkey too for some obscure reason. |
|
173 | # we need to check curkey too for some obscure reason. | |
175 | # MQ test show a corruption of the underlying repo (in _clcache) |
|
174 | # MQ test show a corruption of the underlying repo (in _clcache) | |
176 | # without change in the cachekey. |
|
175 | # without change in the cachekey. | |
177 | oldfilter = cl.filteredrevs |
|
176 | oldfilter = cl.filteredrevs | |
178 | try: |
|
177 | try: | |
179 | cl.filterrevs = () # disable filtering for tip |
|
178 | cl.filterrevs = () # disable filtering for tip | |
180 | curkey = (len(cl), cl.tip(), hash(oldfilter)) |
|
179 | curkey = (len(cl), cl.tip(), hash(oldfilter)) | |
181 | finally: |
|
180 | finally: | |
182 | cl.filteredrevs = oldfilter |
|
181 | cl.filteredrevs = oldfilter | |
183 | if newkey != self._clcachekey or newkey != curkey: |
|
182 | if newkey != self._clcachekey or newkey != curkey: | |
184 | cl = None |
|
183 | cl = None | |
185 | # could have been made None by the previous if |
|
184 | # could have been made None by the previous if | |
186 | if cl is None: |
|
185 | if cl is None: | |
187 | cl = copy.copy(unfichangelog) |
|
186 | cl = copy.copy(unfichangelog) | |
188 | cl.filteredrevs = revs |
|
187 | cl.filteredrevs = revs | |
189 | object.__setattr__(self, '_clcache', cl) |
|
188 | object.__setattr__(self, '_clcache', cl) | |
190 | object.__setattr__(self, '_clcachekey', newkey) |
|
189 | object.__setattr__(self, '_clcachekey', newkey) | |
191 | return cl |
|
190 | return cl | |
192 |
|
191 | |||
193 | def unfiltered(self): |
|
192 | def unfiltered(self): | |
194 | """Return an unfiltered version of a repo""" |
|
193 | """Return an unfiltered version of a repo""" | |
195 | return self._unfilteredrepo |
|
194 | return self._unfilteredrepo | |
196 |
|
195 | |||
197 | def filtered(self, name): |
|
196 | def filtered(self, name): | |
198 | """Return a filtered version of a repository""" |
|
197 | """Return a filtered version of a repository""" | |
199 | if name == self.filtername: |
|
198 | if name == self.filtername: | |
200 | return self |
|
199 | return self | |
201 | return self.unfiltered().filtered(name) |
|
200 | return self.unfiltered().filtered(name) | |
202 |
|
201 | |||
203 | # everything access are forwarded to the proxied repo |
|
202 | # everything access are forwarded to the proxied repo | |
204 | def __getattr__(self, attr): |
|
203 | def __getattr__(self, attr): | |
205 | return getattr(self._unfilteredrepo, attr) |
|
204 | return getattr(self._unfilteredrepo, attr) | |
206 |
|
205 | |||
207 | def __setattr__(self, attr, value): |
|
206 | def __setattr__(self, attr, value): | |
208 | return setattr(self._unfilteredrepo, attr, value) |
|
207 | return setattr(self._unfilteredrepo, attr, value) | |
209 |
|
208 | |||
210 | def __delattr__(self, attr): |
|
209 | def __delattr__(self, attr): | |
211 | return delattr(self._unfilteredrepo, attr) |
|
210 | return delattr(self._unfilteredrepo, attr) | |
212 |
|
211 | |||
213 | # The `requirements` attribute is initialized during __init__. But |
|
212 | # The `requirements` attribute is initialized during __init__. But | |
214 | # __getattr__ won't be called as it also exists on the class. We need |
|
213 | # __getattr__ won't be called as it also exists on the class. We need | |
215 | # explicit forwarding to main repo here |
|
214 | # explicit forwarding to main repo here | |
216 | @property |
|
215 | @property | |
217 | def requirements(self): |
|
216 | def requirements(self): | |
218 | return self._unfilteredrepo.requirements |
|
217 | return self._unfilteredrepo.requirements | |
219 |
|
218 |
General Comments 0
You need to be logged in to leave comments.
Login now