Show More
@@ -1,385 +1,380 b'' | |||||
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption |
|
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | changegroup, |
|
12 | changegroup, | |
13 | error, |
|
13 | error, | |
14 | extensions, |
|
14 | extensions, | |
15 | manifest, |
|
15 | manifest, | |
16 | mdiff, |
|
16 | mdiff, | |
17 | node, |
|
17 | node, | |
18 | revlog, |
|
18 | revlog, | |
19 | util, |
|
19 | util, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from . import ( |
|
22 | from . import ( | |
23 | narrowrepo, |
|
23 | narrowrepo, | |
24 | ) |
|
24 | ) | |
25 |
|
25 | |||
26 | def setup(): |
|
26 | def setup(): | |
27 |
|
27 | |||
28 | def supportedoutgoingversions(orig, repo): |
|
28 | def supportedoutgoingversions(orig, repo): | |
29 | versions = orig(repo) |
|
29 | versions = orig(repo) | |
30 | if narrowrepo.REQUIREMENT in repo.requirements: |
|
30 | if narrowrepo.REQUIREMENT in repo.requirements: | |
31 | versions.discard('01') |
|
31 | versions.discard('01') | |
32 | versions.discard('02') |
|
32 | versions.discard('02') | |
33 | return versions |
|
33 | return versions | |
34 |
|
34 | |||
35 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', |
|
35 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', | |
36 | supportedoutgoingversions) |
|
36 | supportedoutgoingversions) | |
37 |
|
37 | |||
38 | def prune(orig, self, revlog, missing, commonrevs): |
|
38 | def prune(orig, self, revlog, missing, commonrevs): | |
39 | if isinstance(revlog, manifest.manifestrevlog): |
|
39 | if isinstance(revlog, manifest.manifestrevlog): | |
40 | matcher = getattr(self._repo, 'narrowmatch', |
|
40 | matcher = getattr(self._repo, 'narrowmatch', | |
41 | getattr(self, '_narrow_matcher', None)) |
|
41 | getattr(self, '_narrow_matcher', None)) | |
42 | if (matcher is not None and |
|
42 | if (matcher is not None and | |
43 | not matcher().visitdir(revlog._dir[:-1] or '.')): |
|
43 | not matcher().visitdir(revlog._dir[:-1] or '.')): | |
44 | return [] |
|
44 | return [] | |
45 | return orig(self, revlog, missing, commonrevs) |
|
45 | return orig(self, revlog, missing, commonrevs) | |
46 |
|
46 | |||
47 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) |
|
47 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) | |
48 |
|
48 | |||
49 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, |
|
49 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, | |
50 | source): |
|
50 | source): | |
51 | matcher = getattr(self._repo, 'narrowmatch', |
|
51 | matcher = getattr(self._repo, 'narrowmatch', | |
52 | getattr(self, '_narrow_matcher', None)) |
|
52 | getattr(self, '_narrow_matcher', None)) | |
53 | if matcher is not None: |
|
53 | if matcher is not None: | |
54 | narrowmatch = matcher() |
|
54 | narrowmatch = matcher() | |
55 | changedfiles = [f for f in changedfiles if narrowmatch(f)] |
|
55 | changedfiles = [f for f in changedfiles if narrowmatch(f)] | |
56 | if getattr(self, 'is_shallow', False): |
|
56 | if getattr(self, 'is_shallow', False): | |
57 | # See comment in generate() for why this sadness is a thing. |
|
57 | # See comment in generate() for why this sadness is a thing. | |
58 | mfdicts = self._mfdicts |
|
58 | mfdicts = self._mfdicts | |
59 | del self._mfdicts |
|
59 | del self._mfdicts | |
60 | # In a shallow clone, the linknodes callback needs to also include |
|
60 | # In a shallow clone, the linknodes callback needs to also include | |
61 | # those file nodes that are in the manifests we sent but weren't |
|
61 | # those file nodes that are in the manifests we sent but weren't | |
62 | # introduced by those manifests. |
|
62 | # introduced by those manifests. | |
63 | commonctxs = [self._repo[c] for c in commonrevs] |
|
63 | commonctxs = [self._repo[c] for c in commonrevs] | |
64 | oldlinknodes = linknodes |
|
64 | oldlinknodes = linknodes | |
65 | clrev = self._repo.changelog.rev |
|
65 | clrev = self._repo.changelog.rev | |
66 | def linknodes(flog, fname): |
|
66 | def linknodes(flog, fname): | |
67 | for c in commonctxs: |
|
67 | for c in commonctxs: | |
68 | try: |
|
68 | try: | |
69 | fnode = c.filenode(fname) |
|
69 | fnode = c.filenode(fname) | |
70 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) |
|
70 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) | |
71 | except error.ManifestLookupError: |
|
71 | except error.ManifestLookupError: | |
72 | pass |
|
72 | pass | |
73 | links = oldlinknodes(flog, fname) |
|
73 | links = oldlinknodes(flog, fname) | |
74 | if len(links) != len(mfdicts): |
|
74 | if len(links) != len(mfdicts): | |
75 | for mf, lr in mfdicts: |
|
75 | for mf, lr in mfdicts: | |
76 | fnode = mf.get(fname, None) |
|
76 | fnode = mf.get(fname, None) | |
77 | if fnode in links: |
|
77 | if fnode in links: | |
78 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
78 | links[fnode] = min(links[fnode], lr, key=clrev) | |
79 | elif fnode: |
|
79 | elif fnode: | |
80 | links[fnode] = lr |
|
80 | links[fnode] = lr | |
81 | return links |
|
81 | return links | |
82 | return orig(self, changedfiles, linknodes, commonrevs, source) |
|
82 | return orig(self, changedfiles, linknodes, commonrevs, source) | |
83 | extensions.wrapfunction( |
|
83 | extensions.wrapfunction( | |
84 | changegroup.cg1packer, 'generatefiles', generatefiles) |
|
84 | changegroup.cg1packer, 'generatefiles', generatefiles) | |
85 |
|
85 | |||
86 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): |
|
86 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): | |
87 | n = revlog_.node(rev) |
|
87 | n = revlog_.node(rev) | |
88 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) |
|
88 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) | |
89 | flags = revlog_.flags(rev) |
|
89 | flags = revlog_.flags(rev) | |
90 | flags |= revlog.REVIDX_ELLIPSIS |
|
90 | flags |= revlog.REVIDX_ELLIPSIS | |
91 | meta = packer.builddeltaheader( |
|
91 | meta = packer.builddeltaheader( | |
92 | n, p1n, p2n, node.nullid, linknode, flags) |
|
92 | n, p1n, p2n, node.nullid, linknode, flags) | |
93 | # TODO: try and actually send deltas for ellipsis data blocks |
|
93 | # TODO: try and actually send deltas for ellipsis data blocks | |
94 | diffheader = mdiff.trivialdiffheader(len(data)) |
|
94 | diffheader = mdiff.trivialdiffheader(len(data)) | |
95 | l = len(meta) + len(diffheader) + len(data) |
|
95 | l = len(meta) + len(diffheader) + len(data) | |
96 | return ''.join((changegroup.chunkheader(l), |
|
96 | return ''.join((changegroup.chunkheader(l), | |
97 | meta, |
|
97 | meta, | |
98 | diffheader, |
|
98 | diffheader, | |
99 | data)) |
|
99 | data)) | |
100 |
|
100 | |||
101 | def close(orig, self): |
|
101 | def close(orig, self): | |
102 | getattr(self, 'clrev_to_localrev', {}).clear() |
|
102 | getattr(self, 'clrev_to_localrev', {}).clear() | |
103 | if getattr(self, 'next_clrev_to_localrev', {}): |
|
103 | if getattr(self, 'next_clrev_to_localrev', {}): | |
104 | self.clrev_to_localrev = self.next_clrev_to_localrev |
|
104 | self.clrev_to_localrev = self.next_clrev_to_localrev | |
105 | del self.next_clrev_to_localrev |
|
105 | del self.next_clrev_to_localrev | |
106 | self.changelog_done = True |
|
106 | self.changelog_done = True | |
107 | return orig(self) |
|
107 | return orig(self) | |
108 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) |
|
108 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) | |
109 |
|
109 | |||
110 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
110 | # In a perfect world, we'd generate better ellipsis-ified graphs | |
111 | # for non-changelog revlogs. In practice, we haven't started doing |
|
111 | # for non-changelog revlogs. In practice, we haven't started doing | |
112 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
112 | # that yet, so the resulting DAGs for the manifestlog and filelogs | |
113 | # are actually full of bogus parentage on all the ellipsis |
|
113 | # are actually full of bogus parentage on all the ellipsis | |
114 | # nodes. This has the side effect that, while the contents are |
|
114 | # nodes. This has the side effect that, while the contents are | |
115 | # correct, the individual DAGs might be completely out of whack in |
|
115 | # correct, the individual DAGs might be completely out of whack in | |
116 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
116 | # a case like 882681bc3166 and its ancestors (back about 10 | |
117 | # revisions or so) in the main hg repo. |
|
117 | # revisions or so) in the main hg repo. | |
118 | # |
|
118 | # | |
119 | # The one invariant we *know* holds is that the new (potentially |
|
119 | # The one invariant we *know* holds is that the new (potentially | |
120 | # bogus) DAG shape will be valid if we order the nodes in the |
|
120 | # bogus) DAG shape will be valid if we order the nodes in the | |
121 | # order that they're introduced in dramatis personae by the |
|
121 | # order that they're introduced in dramatis personae by the | |
122 | # changelog, so what we do is we sort the non-changelog histories |
|
122 | # changelog, so what we do is we sort the non-changelog histories | |
123 | # by the order in which they are used by the changelog. |
|
123 | # by the order in which they are used by the changelog. | |
124 | def _sortgroup(orig, self, revlog, nodelist, lookup): |
|
124 | def _sortgroup(orig, self, revlog, nodelist, lookup): | |
125 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: |
|
125 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: | |
126 | return orig(self, revlog, nodelist, lookup) |
|
126 | return orig(self, revlog, nodelist, lookup) | |
127 | key = lambda n: self.clnode_to_rev[lookup(n)] |
|
127 | key = lambda n: self.clnode_to_rev[lookup(n)] | |
128 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] |
|
128 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] | |
129 |
|
129 | |||
130 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) |
|
130 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) | |
131 |
|
131 | |||
132 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): |
|
132 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): | |
133 | '''yield a sequence of changegroup chunks (strings)''' |
|
133 | '''yield a sequence of changegroup chunks (strings)''' | |
134 | # Note: other than delegating to orig, the only deviation in |
|
134 | # Note: other than delegating to orig, the only deviation in | |
135 | # logic from normal hg's generate is marked with BEGIN/END |
|
135 | # logic from normal hg's generate is marked with BEGIN/END | |
136 | # NARROW HACK. |
|
136 | # NARROW HACK. | |
137 | if not util.safehasattr(self, 'full_nodes'): |
|
137 | if not util.safehasattr(self, 'full_nodes'): | |
138 | # not sending a narrow bundle |
|
138 | # not sending a narrow bundle | |
139 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
139 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): | |
140 | yield x |
|
140 | yield x | |
141 | return |
|
141 | return | |
142 |
|
142 | |||
143 | repo = self._repo |
|
143 | repo = self._repo | |
144 | cl = repo.changelog |
|
144 | cl = repo.changelog | |
145 | mfl = repo.manifestlog |
|
145 | mfl = repo.manifestlog | |
146 | mfrevlog = mfl._revlog |
|
146 | mfrevlog = mfl._revlog | |
147 |
|
147 | |||
148 | clrevorder = {} |
|
148 | clrevorder = {} | |
149 | mfs = {} # needed manifests |
|
149 | mfs = {} # needed manifests | |
150 | fnodes = {} # needed file nodes |
|
150 | fnodes = {} # needed file nodes | |
151 | changedfiles = set() |
|
151 | changedfiles = set() | |
152 |
|
152 | |||
153 | # Callback for the changelog, used to collect changed files and manifest |
|
153 | # Callback for the changelog, used to collect changed files and manifest | |
154 | # nodes. |
|
154 | # nodes. | |
155 | # Returns the linkrev node (identity in the changelog case). |
|
155 | # Returns the linkrev node (identity in the changelog case). | |
156 | def lookupcl(x): |
|
156 | def lookupcl(x): | |
157 | c = cl.read(x) |
|
157 | c = cl.read(x) | |
158 | clrevorder[x] = len(clrevorder) |
|
158 | clrevorder[x] = len(clrevorder) | |
159 | # BEGIN NARROW HACK |
|
159 | # BEGIN NARROW HACK | |
160 | # |
|
160 | # | |
161 | # Only update mfs if x is going to be sent. Otherwise we |
|
161 | # Only update mfs if x is going to be sent. Otherwise we | |
162 | # end up with bogus linkrevs specified for manifests and |
|
162 | # end up with bogus linkrevs specified for manifests and | |
163 | # we skip some manifest nodes that we should otherwise |
|
163 | # we skip some manifest nodes that we should otherwise | |
164 | # have sent. |
|
164 | # have sent. | |
165 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: |
|
165 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: | |
166 | n = c[0] |
|
166 | n = c[0] | |
167 | # record the first changeset introducing this manifest version |
|
167 | # record the first changeset introducing this manifest version | |
168 | mfs.setdefault(n, x) |
|
168 | mfs.setdefault(n, x) | |
169 | # Set this narrow-specific dict so we have the lowest manifest |
|
169 | # Set this narrow-specific dict so we have the lowest manifest | |
170 | # revnum to look up for this cl revnum. (Part of mapping |
|
170 | # revnum to look up for this cl revnum. (Part of mapping | |
171 | # changelog ellipsis parents to manifest ellipsis parents) |
|
171 | # changelog ellipsis parents to manifest ellipsis parents) | |
172 | self.next_clrev_to_localrev.setdefault(cl.rev(x), |
|
172 | self.next_clrev_to_localrev.setdefault(cl.rev(x), | |
173 | mfrevlog.rev(n)) |
|
173 | mfrevlog.rev(n)) | |
174 | # We can't trust the changed files list in the changeset if the |
|
174 | # We can't trust the changed files list in the changeset if the | |
175 | # client requested a shallow clone. |
|
175 | # client requested a shallow clone. | |
176 | if self.is_shallow: |
|
176 | if self.is_shallow: | |
177 | changedfiles.update(mfl[c[0]].read().keys()) |
|
177 | changedfiles.update(mfl[c[0]].read().keys()) | |
178 | else: |
|
178 | else: | |
179 | changedfiles.update(c[3]) |
|
179 | changedfiles.update(c[3]) | |
180 | # END NARROW HACK |
|
180 | # END NARROW HACK | |
181 | # Record a complete list of potentially-changed files in |
|
181 | # Record a complete list of potentially-changed files in | |
182 | # this manifest. |
|
182 | # this manifest. | |
183 | return x |
|
183 | return x | |
184 |
|
184 | |||
185 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
185 | self._verbosenote(_('uncompressed size of bundle content:\n')) | |
186 | size = 0 |
|
186 | size = 0 | |
187 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
187 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): | |
188 | size += len(chunk) |
|
188 | size += len(chunk) | |
189 | yield chunk |
|
189 | yield chunk | |
190 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
190 | self._verbosenote(_('%8.i (changelog)\n') % size) | |
191 |
|
191 | |||
192 | # We need to make sure that the linkrev in the changegroup refers to |
|
192 | # We need to make sure that the linkrev in the changegroup refers to | |
193 | # the first changeset that introduced the manifest or file revision. |
|
193 | # the first changeset that introduced the manifest or file revision. | |
194 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
194 | # The fastpath is usually safer than the slowpath, because the filelogs | |
195 | # are walked in revlog order. |
|
195 | # are walked in revlog order. | |
196 | # |
|
196 | # | |
197 | # When taking the slowpath with reorder=None and the manifest revlog |
|
197 | # When taking the slowpath with reorder=None and the manifest revlog | |
198 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
198 | # uses generaldelta, the manifest may be walked in the "wrong" order. | |
199 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
199 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in | |
200 | # cc0ff93d0c0c). |
|
200 | # cc0ff93d0c0c). | |
201 | # |
|
201 | # | |
202 | # When taking the fastpath, we are only vulnerable to reordering |
|
202 | # When taking the fastpath, we are only vulnerable to reordering | |
203 | # of the changelog itself. The changelog never uses generaldelta, so |
|
203 | # of the changelog itself. The changelog never uses generaldelta, so | |
204 | # it is only reordered when reorder=True. To handle this case, we |
|
204 | # it is only reordered when reorder=True. To handle this case, we | |
205 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
205 | # simply take the slowpath, which already has the 'clrevorder' logic. | |
206 | # This was also fixed in cc0ff93d0c0c. |
|
206 | # This was also fixed in cc0ff93d0c0c. | |
207 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
207 | fastpathlinkrev = fastpathlinkrev and not self._reorder | |
208 | # Treemanifests don't work correctly with fastpathlinkrev |
|
208 | # Treemanifests don't work correctly with fastpathlinkrev | |
209 | # either, because we don't discover which directory nodes to |
|
209 | # either, because we don't discover which directory nodes to | |
210 | # send along with files. This could probably be fixed. |
|
210 | # send along with files. This could probably be fixed. | |
211 | fastpathlinkrev = fastpathlinkrev and ( |
|
211 | fastpathlinkrev = fastpathlinkrev and ( | |
212 | 'treemanifest' not in repo.requirements) |
|
212 | 'treemanifest' not in repo.requirements) | |
213 | # Shallow clones also don't work correctly with fastpathlinkrev |
|
213 | # Shallow clones also don't work correctly with fastpathlinkrev | |
214 | # because file nodes may need to be sent for a manifest even if they |
|
214 | # because file nodes may need to be sent for a manifest even if they | |
215 | # weren't introduced by that manifest. |
|
215 | # weren't introduced by that manifest. | |
216 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow |
|
216 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow | |
217 |
|
217 | |||
218 | moreargs = [] |
|
|||
219 | if self.generatemanifests.func_code.co_argcount == 7: |
|
|||
220 | # The source argument was added to generatemanifests in hg in |
|
|||
221 | # 75cc1f1e11f2 (2017/09/11). |
|
|||
222 | moreargs.append(source) |
|
|||
223 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
218 | for chunk in self.generatemanifests(commonrevs, clrevorder, | |
224 |
fastpathlinkrev, mfs, fnodes, |
|
219 | fastpathlinkrev, mfs, fnodes, source): | |
225 | yield chunk |
|
220 | yield chunk | |
226 | # BEGIN NARROW HACK |
|
221 | # BEGIN NARROW HACK | |
227 | mfdicts = None |
|
222 | mfdicts = None | |
228 | if self.is_shallow: |
|
223 | if self.is_shallow: | |
229 | mfdicts = [(self._repo.manifestlog[n].read(), lr) |
|
224 | mfdicts = [(self._repo.manifestlog[n].read(), lr) | |
230 | for (n, lr) in mfs.iteritems()] |
|
225 | for (n, lr) in mfs.iteritems()] | |
231 | # END NARROW HACK |
|
226 | # END NARROW HACK | |
232 | mfs.clear() |
|
227 | mfs.clear() | |
233 | clrevs = set(cl.rev(x) for x in clnodes) |
|
228 | clrevs = set(cl.rev(x) for x in clnodes) | |
234 |
|
229 | |||
235 | if not fastpathlinkrev: |
|
230 | if not fastpathlinkrev: | |
236 | def linknodes(unused, fname): |
|
231 | def linknodes(unused, fname): | |
237 | return fnodes.get(fname, {}) |
|
232 | return fnodes.get(fname, {}) | |
238 | else: |
|
233 | else: | |
239 | cln = cl.node |
|
234 | cln = cl.node | |
240 | def linknodes(filerevlog, fname): |
|
235 | def linknodes(filerevlog, fname): | |
241 | llr = filerevlog.linkrev |
|
236 | llr = filerevlog.linkrev | |
242 | fln = filerevlog.node |
|
237 | fln = filerevlog.node | |
243 | revs = ((r, llr(r)) for r in filerevlog) |
|
238 | revs = ((r, llr(r)) for r in filerevlog) | |
244 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
239 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | |
245 |
|
240 | |||
246 | # BEGIN NARROW HACK |
|
241 | # BEGIN NARROW HACK | |
247 | # |
|
242 | # | |
248 | # We need to pass the mfdicts variable down into |
|
243 | # We need to pass the mfdicts variable down into | |
249 | # generatefiles(), but more than one command might have |
|
244 | # generatefiles(), but more than one command might have | |
250 | # wrapped generatefiles so we can't modify the function |
|
245 | # wrapped generatefiles so we can't modify the function | |
251 | # signature. Instead, we pass the data to ourselves using an |
|
246 | # signature. Instead, we pass the data to ourselves using an | |
252 | # instance attribute. I'm sorry. |
|
247 | # instance attribute. I'm sorry. | |
253 | self._mfdicts = mfdicts |
|
248 | self._mfdicts = mfdicts | |
254 | # END NARROW HACK |
|
249 | # END NARROW HACK | |
255 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
250 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | |
256 | source): |
|
251 | source): | |
257 | yield chunk |
|
252 | yield chunk | |
258 |
|
253 | |||
259 | yield self.close() |
|
254 | yield self.close() | |
260 |
|
255 | |||
261 | if clnodes: |
|
256 | if clnodes: | |
262 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) |
|
257 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) | |
263 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) |
|
258 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) | |
264 |
|
259 | |||
265 | def revchunk(orig, self, revlog, rev, prev, linknode): |
|
260 | def revchunk(orig, self, revlog, rev, prev, linknode): | |
266 | if not util.safehasattr(self, 'full_nodes'): |
|
261 | if not util.safehasattr(self, 'full_nodes'): | |
267 | # not sending a narrow changegroup |
|
262 | # not sending a narrow changegroup | |
268 | for x in orig(self, revlog, rev, prev, linknode): |
|
263 | for x in orig(self, revlog, rev, prev, linknode): | |
269 | yield x |
|
264 | yield x | |
270 | return |
|
265 | return | |
271 | # build up some mapping information that's useful later. See |
|
266 | # build up some mapping information that's useful later. See | |
272 | # the local() nested function below. |
|
267 | # the local() nested function below. | |
273 | if not self.changelog_done: |
|
268 | if not self.changelog_done: | |
274 | self.clnode_to_rev[linknode] = rev |
|
269 | self.clnode_to_rev[linknode] = rev | |
275 | linkrev = rev |
|
270 | linkrev = rev | |
276 | self.clrev_to_localrev[linkrev] = rev |
|
271 | self.clrev_to_localrev[linkrev] = rev | |
277 | else: |
|
272 | else: | |
278 | linkrev = self.clnode_to_rev[linknode] |
|
273 | linkrev = self.clnode_to_rev[linknode] | |
279 | self.clrev_to_localrev[linkrev] = rev |
|
274 | self.clrev_to_localrev[linkrev] = rev | |
280 | # This is a node to send in full, because the changeset it |
|
275 | # This is a node to send in full, because the changeset it | |
281 | # corresponds to was a full changeset. |
|
276 | # corresponds to was a full changeset. | |
282 | if linknode in self.full_nodes: |
|
277 | if linknode in self.full_nodes: | |
283 | for x in orig(self, revlog, rev, prev, linknode): |
|
278 | for x in orig(self, revlog, rev, prev, linknode): | |
284 | yield x |
|
279 | yield x | |
285 | return |
|
280 | return | |
286 | # At this point, a node can either be one we should skip or an |
|
281 | # At this point, a node can either be one we should skip or an | |
287 | # ellipsis. If it's not an ellipsis, bail immediately. |
|
282 | # ellipsis. If it's not an ellipsis, bail immediately. | |
288 | if linkrev not in self.precomputed_ellipsis: |
|
283 | if linkrev not in self.precomputed_ellipsis: | |
289 | return |
|
284 | return | |
290 | linkparents = self.precomputed_ellipsis[linkrev] |
|
285 | linkparents = self.precomputed_ellipsis[linkrev] | |
291 | def local(clrev): |
|
286 | def local(clrev): | |
292 | """Turn a changelog revnum into a local revnum. |
|
287 | """Turn a changelog revnum into a local revnum. | |
293 |
|
288 | |||
294 | The ellipsis dag is stored as revnums on the changelog, |
|
289 | The ellipsis dag is stored as revnums on the changelog, | |
295 | but when we're producing ellipsis entries for |
|
290 | but when we're producing ellipsis entries for | |
296 | non-changelog revlogs, we need to turn those numbers into |
|
291 | non-changelog revlogs, we need to turn those numbers into | |
297 | something local. This does that for us, and during the |
|
292 | something local. This does that for us, and during the | |
298 | changelog sending phase will also expand the stored |
|
293 | changelog sending phase will also expand the stored | |
299 | mappings as needed. |
|
294 | mappings as needed. | |
300 | """ |
|
295 | """ | |
301 | if clrev == node.nullrev: |
|
296 | if clrev == node.nullrev: | |
302 | return node.nullrev |
|
297 | return node.nullrev | |
303 | if not self.changelog_done: |
|
298 | if not self.changelog_done: | |
304 | # If we're doing the changelog, it's possible that we |
|
299 | # If we're doing the changelog, it's possible that we | |
305 | # have a parent that is already on the client, and we |
|
300 | # have a parent that is already on the client, and we | |
306 | # need to store some extra mapping information so that |
|
301 | # need to store some extra mapping information so that | |
307 | # our contained ellipsis nodes will be able to resolve |
|
302 | # our contained ellipsis nodes will be able to resolve | |
308 | # their parents. |
|
303 | # their parents. | |
309 | if clrev not in self.clrev_to_localrev: |
|
304 | if clrev not in self.clrev_to_localrev: | |
310 | clnode = revlog.node(clrev) |
|
305 | clnode = revlog.node(clrev) | |
311 | self.clnode_to_rev[clnode] = clrev |
|
306 | self.clnode_to_rev[clnode] = clrev | |
312 | return clrev |
|
307 | return clrev | |
313 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
308 | # Walk the ellipsis-ized changelog breadth-first looking for a | |
314 | # change that has been linked from the current revlog. |
|
309 | # change that has been linked from the current revlog. | |
315 | # |
|
310 | # | |
316 | # For a flat manifest revlog only a single step should be necessary |
|
311 | # For a flat manifest revlog only a single step should be necessary | |
317 | # as all relevant changelog entries are relevant to the flat |
|
312 | # as all relevant changelog entries are relevant to the flat | |
318 | # manifest. |
|
313 | # manifest. | |
319 | # |
|
314 | # | |
320 | # For a filelog or tree manifest dirlog however not every changelog |
|
315 | # For a filelog or tree manifest dirlog however not every changelog | |
321 | # entry will have been relevant, so we need to skip some changelog |
|
316 | # entry will have been relevant, so we need to skip some changelog | |
322 | # nodes even after ellipsis-izing. |
|
317 | # nodes even after ellipsis-izing. | |
323 | walk = [clrev] |
|
318 | walk = [clrev] | |
324 | while walk: |
|
319 | while walk: | |
325 | p = walk[0] |
|
320 | p = walk[0] | |
326 | walk = walk[1:] |
|
321 | walk = walk[1:] | |
327 | if p in self.clrev_to_localrev: |
|
322 | if p in self.clrev_to_localrev: | |
328 | return self.clrev_to_localrev[p] |
|
323 | return self.clrev_to_localrev[p] | |
329 | elif p in self.full_nodes: |
|
324 | elif p in self.full_nodes: | |
330 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) |
|
325 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) | |
331 | if pp != node.nullrev]) |
|
326 | if pp != node.nullrev]) | |
332 | elif p in self.precomputed_ellipsis: |
|
327 | elif p in self.precomputed_ellipsis: | |
333 | walk.extend([pp for pp in self.precomputed_ellipsis[p] |
|
328 | walk.extend([pp for pp in self.precomputed_ellipsis[p] | |
334 | if pp != node.nullrev]) |
|
329 | if pp != node.nullrev]) | |
335 | else: |
|
330 | else: | |
336 | # In this case, we've got an ellipsis with parents |
|
331 | # In this case, we've got an ellipsis with parents | |
337 | # outside the current bundle (likely an |
|
332 | # outside the current bundle (likely an | |
338 | # incremental pull). We "know" that we can use the |
|
333 | # incremental pull). We "know" that we can use the | |
339 | # value of this same revlog at whatever revision |
|
334 | # value of this same revlog at whatever revision | |
340 | # is pointed to by linknode. "Know" is in scare |
|
335 | # is pointed to by linknode. "Know" is in scare | |
341 | # quotes because I haven't done enough examination |
|
336 | # quotes because I haven't done enough examination | |
342 | # of edge cases to convince myself this is really |
|
337 | # of edge cases to convince myself this is really | |
343 | # a fact - it works for all the (admittedly |
|
338 | # a fact - it works for all the (admittedly | |
344 | # thorough) cases in our testsuite, but I would be |
|
339 | # thorough) cases in our testsuite, but I would be | |
345 | # somewhat unsurprised to find a case in the wild |
|
340 | # somewhat unsurprised to find a case in the wild | |
346 | # where this breaks down a bit. That said, I don't |
|
341 | # where this breaks down a bit. That said, I don't | |
347 | # know if it would hurt anything. |
|
342 | # know if it would hurt anything. | |
348 | for i in xrange(rev, 0, -1): |
|
343 | for i in xrange(rev, 0, -1): | |
349 | if revlog.linkrev(i) == clrev: |
|
344 | if revlog.linkrev(i) == clrev: | |
350 | return i |
|
345 | return i | |
351 | # We failed to resolve a parent for this node, so |
|
346 | # We failed to resolve a parent for this node, so | |
352 | # we crash the changegroup construction. |
|
347 | # we crash the changegroup construction. | |
353 | raise error.Abort( |
|
348 | raise error.Abort( | |
354 | 'unable to resolve parent while packing %r %r' |
|
349 | 'unable to resolve parent while packing %r %r' | |
355 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) |
|
350 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) | |
356 | return node.nullrev |
|
351 | return node.nullrev | |
357 |
|
352 | |||
358 | if not linkparents or ( |
|
353 | if not linkparents or ( | |
359 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): |
|
354 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): | |
360 | p1, p2 = node.nullrev, node.nullrev |
|
355 | p1, p2 = node.nullrev, node.nullrev | |
361 | elif len(linkparents) == 1: |
|
356 | elif len(linkparents) == 1: | |
362 | p1, = sorted(local(p) for p in linkparents) |
|
357 | p1, = sorted(local(p) for p in linkparents) | |
363 | p2 = node.nullrev |
|
358 | p2 = node.nullrev | |
364 | else: |
|
359 | else: | |
365 | p1, p2 = sorted(local(p) for p in linkparents) |
|
360 | p1, p2 = sorted(local(p) for p in linkparents) | |
366 | yield ellipsisdata( |
|
361 | yield ellipsisdata( | |
367 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) |
|
362 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) | |
368 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) |
|
363 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) | |
369 |
|
364 | |||
370 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): |
|
365 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): | |
371 | if util.safehasattr(self, 'full_nodes'): |
|
366 | if util.safehasattr(self, 'full_nodes'): | |
372 | # TODO: send better deltas when in narrow mode. |
|
367 | # TODO: send better deltas when in narrow mode. | |
373 | # |
|
368 | # | |
374 | # changegroup.group() loops over revisions to send, |
|
369 | # changegroup.group() loops over revisions to send, | |
375 | # including revisions we'll skip. What this means is that |
|
370 | # including revisions we'll skip. What this means is that | |
376 | # `prev` will be a potentially useless delta base for all |
|
371 | # `prev` will be a potentially useless delta base for all | |
377 | # ellipsis nodes, as the client likely won't have it. In |
|
372 | # ellipsis nodes, as the client likely won't have it. In | |
378 | # the future we should do bookkeeping about which nodes |
|
373 | # the future we should do bookkeeping about which nodes | |
379 | # have been sent to the client, and try to be |
|
374 | # have been sent to the client, and try to be | |
380 | # significantly smarter about delta bases. This is |
|
375 | # significantly smarter about delta bases. This is | |
381 | # slightly tricky because this same code has to work for |
|
376 | # slightly tricky because this same code has to work for | |
382 | # all revlogs, and we don't have the linkrev/linknode here. |
|
377 | # all revlogs, and we don't have the linkrev/linknode here. | |
383 | return p1 |
|
378 | return p1 | |
384 | return orig(self, revlog, rev, p1, p2, prev) |
|
379 | return orig(self, revlog, rev, p1, p2, prev) | |
385 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
|
380 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
General Comments 0
You need to be logged in to leave comments.
Login now