Show More
@@ -1,385 +1,385 | |||||
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption |
|
1 | # narrowchangegroup.py - narrow clone changegroup creation and consumption | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | changegroup, |
|
12 | changegroup, | |
13 | error, |
|
13 | error, | |
14 | extensions, |
|
14 | extensions, | |
15 | manifest, |
|
15 | manifest, | |
16 | mdiff, |
|
16 | mdiff, | |
17 | node, |
|
17 | node, | |
18 | revlog, |
|
18 | revlog, | |
19 | util, |
|
19 | util, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from . import ( |
|
22 | from . import ( | |
23 | narrowrepo, |
|
23 | narrowrepo, | |
24 | ) |
|
24 | ) | |
25 |
|
25 | |||
26 | def setup(): |
|
26 | def setup(): | |
27 |
|
27 | |||
28 | def supportedoutgoingversions(orig, repo): |
|
28 | def supportedoutgoingversions(orig, repo): | |
29 | versions = orig(repo) |
|
29 | versions = orig(repo) | |
30 | if narrowrepo.REQUIREMENT in repo.requirements: |
|
30 | if narrowrepo.REQUIREMENT in repo.requirements: | |
31 | versions.discard('01') |
|
31 | versions.discard('01') | |
32 | versions.discard('02') |
|
32 | versions.discard('02') | |
33 | return versions |
|
33 | return versions | |
34 |
|
34 | |||
35 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', |
|
35 | extensions.wrapfunction(changegroup, 'supportedoutgoingversions', | |
36 | supportedoutgoingversions) |
|
36 | supportedoutgoingversions) | |
37 |
|
37 | |||
38 | def prune(orig, self, revlog, missing, commonrevs): |
|
38 | def prune(orig, self, revlog, missing, commonrevs): | |
39 | if isinstance(revlog, manifest.manifestrevlog): |
|
39 | if isinstance(revlog, manifest.manifestrevlog): | |
40 | matcher = getattr(self._repo, 'narrowmatch', |
|
40 | matcher = getattr(self._repo, 'narrowmatch', | |
41 | getattr(self, '_narrow_matcher', None)) |
|
41 | getattr(self, '_narrow_matcher', None)) | |
42 | if (matcher is not None and |
|
42 | if (matcher is not None and | |
43 | not matcher().visitdir(revlog._dir[:-1] or '.')): |
|
43 | not matcher().visitdir(revlog._dir[:-1] or '.')): | |
44 | return [] |
|
44 | return [] | |
45 | return orig(self, revlog, missing, commonrevs) |
|
45 | return orig(self, revlog, missing, commonrevs) | |
46 |
|
46 | |||
47 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) |
|
47 | extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) | |
48 |
|
48 | |||
49 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, |
|
49 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, | |
50 | source): |
|
50 | source): | |
51 | matcher = getattr(self._repo, 'narrowmatch', |
|
51 | matcher = getattr(self._repo, 'narrowmatch', | |
52 | getattr(self, '_narrow_matcher', None)) |
|
52 | getattr(self, '_narrow_matcher', None)) | |
53 | if matcher is not None: |
|
53 | if matcher is not None: | |
54 | narrowmatch = matcher() |
|
54 | narrowmatch = matcher() | |
55 |
changedfiles = |
|
55 | changedfiles = [f for f in changedfiles if narrowmatch(f)] | |
56 | if getattr(self, 'is_shallow', False): |
|
56 | if getattr(self, 'is_shallow', False): | |
57 | # See comment in generate() for why this sadness is a thing. |
|
57 | # See comment in generate() for why this sadness is a thing. | |
58 | mfdicts = self._mfdicts |
|
58 | mfdicts = self._mfdicts | |
59 | del self._mfdicts |
|
59 | del self._mfdicts | |
60 | # In a shallow clone, the linknodes callback needs to also include |
|
60 | # In a shallow clone, the linknodes callback needs to also include | |
61 | # those file nodes that are in the manifests we sent but weren't |
|
61 | # those file nodes that are in the manifests we sent but weren't | |
62 | # introduced by those manifests. |
|
62 | # introduced by those manifests. | |
63 | commonctxs = [self._repo[c] for c in commonrevs] |
|
63 | commonctxs = [self._repo[c] for c in commonrevs] | |
64 | oldlinknodes = linknodes |
|
64 | oldlinknodes = linknodes | |
65 | clrev = self._repo.changelog.rev |
|
65 | clrev = self._repo.changelog.rev | |
66 | def linknodes(flog, fname): |
|
66 | def linknodes(flog, fname): | |
67 | for c in commonctxs: |
|
67 | for c in commonctxs: | |
68 | try: |
|
68 | try: | |
69 | fnode = c.filenode(fname) |
|
69 | fnode = c.filenode(fname) | |
70 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) |
|
70 | self.clrev_to_localrev[c.rev()] = flog.rev(fnode) | |
71 | except error.ManifestLookupError: |
|
71 | except error.ManifestLookupError: | |
72 | pass |
|
72 | pass | |
73 | links = oldlinknodes(flog, fname) |
|
73 | links = oldlinknodes(flog, fname) | |
74 | if len(links) != len(mfdicts): |
|
74 | if len(links) != len(mfdicts): | |
75 | for mf, lr in mfdicts: |
|
75 | for mf, lr in mfdicts: | |
76 | fnode = mf.get(fname, None) |
|
76 | fnode = mf.get(fname, None) | |
77 | if fnode in links: |
|
77 | if fnode in links: | |
78 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
78 | links[fnode] = min(links[fnode], lr, key=clrev) | |
79 | elif fnode: |
|
79 | elif fnode: | |
80 | links[fnode] = lr |
|
80 | links[fnode] = lr | |
81 | return links |
|
81 | return links | |
82 | return orig(self, changedfiles, linknodes, commonrevs, source) |
|
82 | return orig(self, changedfiles, linknodes, commonrevs, source) | |
83 | extensions.wrapfunction( |
|
83 | extensions.wrapfunction( | |
84 | changegroup.cg1packer, 'generatefiles', generatefiles) |
|
84 | changegroup.cg1packer, 'generatefiles', generatefiles) | |
85 |
|
85 | |||
86 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): |
|
86 | def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): | |
87 | n = revlog_.node(rev) |
|
87 | n = revlog_.node(rev) | |
88 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) |
|
88 | p1n, p2n = revlog_.node(p1), revlog_.node(p2) | |
89 | flags = revlog_.flags(rev) |
|
89 | flags = revlog_.flags(rev) | |
90 | flags |= revlog.REVIDX_ELLIPSIS |
|
90 | flags |= revlog.REVIDX_ELLIPSIS | |
91 | meta = packer.builddeltaheader( |
|
91 | meta = packer.builddeltaheader( | |
92 | n, p1n, p2n, node.nullid, linknode, flags) |
|
92 | n, p1n, p2n, node.nullid, linknode, flags) | |
93 | # TODO: try and actually send deltas for ellipsis data blocks |
|
93 | # TODO: try and actually send deltas for ellipsis data blocks | |
94 | diffheader = mdiff.trivialdiffheader(len(data)) |
|
94 | diffheader = mdiff.trivialdiffheader(len(data)) | |
95 | l = len(meta) + len(diffheader) + len(data) |
|
95 | l = len(meta) + len(diffheader) + len(data) | |
96 | return ''.join((changegroup.chunkheader(l), |
|
96 | return ''.join((changegroup.chunkheader(l), | |
97 | meta, |
|
97 | meta, | |
98 | diffheader, |
|
98 | diffheader, | |
99 | data)) |
|
99 | data)) | |
100 |
|
100 | |||
101 | def close(orig, self): |
|
101 | def close(orig, self): | |
102 | getattr(self, 'clrev_to_localrev', {}).clear() |
|
102 | getattr(self, 'clrev_to_localrev', {}).clear() | |
103 | if getattr(self, 'next_clrev_to_localrev', {}): |
|
103 | if getattr(self, 'next_clrev_to_localrev', {}): | |
104 | self.clrev_to_localrev = self.next_clrev_to_localrev |
|
104 | self.clrev_to_localrev = self.next_clrev_to_localrev | |
105 | del self.next_clrev_to_localrev |
|
105 | del self.next_clrev_to_localrev | |
106 | self.changelog_done = True |
|
106 | self.changelog_done = True | |
107 | return orig(self) |
|
107 | return orig(self) | |
108 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) |
|
108 | extensions.wrapfunction(changegroup.cg1packer, 'close', close) | |
109 |
|
109 | |||
110 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
110 | # In a perfect world, we'd generate better ellipsis-ified graphs | |
111 | # for non-changelog revlogs. In practice, we haven't started doing |
|
111 | # for non-changelog revlogs. In practice, we haven't started doing | |
112 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
112 | # that yet, so the resulting DAGs for the manifestlog and filelogs | |
113 | # are actually full of bogus parentage on all the ellipsis |
|
113 | # are actually full of bogus parentage on all the ellipsis | |
114 | # nodes. This has the side effect that, while the contents are |
|
114 | # nodes. This has the side effect that, while the contents are | |
115 | # correct, the individual DAGs might be completely out of whack in |
|
115 | # correct, the individual DAGs might be completely out of whack in | |
116 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
116 | # a case like 882681bc3166 and its ancestors (back about 10 | |
117 | # revisions or so) in the main hg repo. |
|
117 | # revisions or so) in the main hg repo. | |
118 | # |
|
118 | # | |
119 | # The one invariant we *know* holds is that the new (potentially |
|
119 | # The one invariant we *know* holds is that the new (potentially | |
120 | # bogus) DAG shape will be valid if we order the nodes in the |
|
120 | # bogus) DAG shape will be valid if we order the nodes in the | |
121 | # order that they're introduced in dramatis personae by the |
|
121 | # order that they're introduced in dramatis personae by the | |
122 | # changelog, so what we do is we sort the non-changelog histories |
|
122 | # changelog, so what we do is we sort the non-changelog histories | |
123 | # by the order in which they are used by the changelog. |
|
123 | # by the order in which they are used by the changelog. | |
124 | def _sortgroup(orig, self, revlog, nodelist, lookup): |
|
124 | def _sortgroup(orig, self, revlog, nodelist, lookup): | |
125 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: |
|
125 | if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: | |
126 | return orig(self, revlog, nodelist, lookup) |
|
126 | return orig(self, revlog, nodelist, lookup) | |
127 | key = lambda n: self.clnode_to_rev[lookup(n)] |
|
127 | key = lambda n: self.clnode_to_rev[lookup(n)] | |
128 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] |
|
128 | return [revlog.rev(n) for n in sorted(nodelist, key=key)] | |
129 |
|
129 | |||
130 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) |
|
130 | extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) | |
131 |
|
131 | |||
132 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): |
|
132 | def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): | |
133 | '''yield a sequence of changegroup chunks (strings)''' |
|
133 | '''yield a sequence of changegroup chunks (strings)''' | |
134 | # Note: other than delegating to orig, the only deviation in |
|
134 | # Note: other than delegating to orig, the only deviation in | |
135 | # logic from normal hg's generate is marked with BEGIN/END |
|
135 | # logic from normal hg's generate is marked with BEGIN/END | |
136 | # NARROW HACK. |
|
136 | # NARROW HACK. | |
137 | if not util.safehasattr(self, 'full_nodes'): |
|
137 | if not util.safehasattr(self, 'full_nodes'): | |
138 | # not sending a narrow bundle |
|
138 | # not sending a narrow bundle | |
139 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
139 | for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): | |
140 | yield x |
|
140 | yield x | |
141 | return |
|
141 | return | |
142 |
|
142 | |||
143 | repo = self._repo |
|
143 | repo = self._repo | |
144 | cl = repo.changelog |
|
144 | cl = repo.changelog | |
145 | mfl = repo.manifestlog |
|
145 | mfl = repo.manifestlog | |
146 | mfrevlog = mfl._revlog |
|
146 | mfrevlog = mfl._revlog | |
147 |
|
147 | |||
148 | clrevorder = {} |
|
148 | clrevorder = {} | |
149 | mfs = {} # needed manifests |
|
149 | mfs = {} # needed manifests | |
150 | fnodes = {} # needed file nodes |
|
150 | fnodes = {} # needed file nodes | |
151 | changedfiles = set() |
|
151 | changedfiles = set() | |
152 |
|
152 | |||
153 | # Callback for the changelog, used to collect changed files and manifest |
|
153 | # Callback for the changelog, used to collect changed files and manifest | |
154 | # nodes. |
|
154 | # nodes. | |
155 | # Returns the linkrev node (identity in the changelog case). |
|
155 | # Returns the linkrev node (identity in the changelog case). | |
156 | def lookupcl(x): |
|
156 | def lookupcl(x): | |
157 | c = cl.read(x) |
|
157 | c = cl.read(x) | |
158 | clrevorder[x] = len(clrevorder) |
|
158 | clrevorder[x] = len(clrevorder) | |
159 | # BEGIN NARROW HACK |
|
159 | # BEGIN NARROW HACK | |
160 | # |
|
160 | # | |
161 | # Only update mfs if x is going to be sent. Otherwise we |
|
161 | # Only update mfs if x is going to be sent. Otherwise we | |
162 | # end up with bogus linkrevs specified for manifests and |
|
162 | # end up with bogus linkrevs specified for manifests and | |
163 | # we skip some manifest nodes that we should otherwise |
|
163 | # we skip some manifest nodes that we should otherwise | |
164 | # have sent. |
|
164 | # have sent. | |
165 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: |
|
165 | if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: | |
166 | n = c[0] |
|
166 | n = c[0] | |
167 | # record the first changeset introducing this manifest version |
|
167 | # record the first changeset introducing this manifest version | |
168 | mfs.setdefault(n, x) |
|
168 | mfs.setdefault(n, x) | |
169 | # Set this narrow-specific dict so we have the lowest manifest |
|
169 | # Set this narrow-specific dict so we have the lowest manifest | |
170 | # revnum to look up for this cl revnum. (Part of mapping |
|
170 | # revnum to look up for this cl revnum. (Part of mapping | |
171 | # changelog ellipsis parents to manifest ellipsis parents) |
|
171 | # changelog ellipsis parents to manifest ellipsis parents) | |
172 | self.next_clrev_to_localrev.setdefault(cl.rev(x), |
|
172 | self.next_clrev_to_localrev.setdefault(cl.rev(x), | |
173 | mfrevlog.rev(n)) |
|
173 | mfrevlog.rev(n)) | |
174 | # We can't trust the changed files list in the changeset if the |
|
174 | # We can't trust the changed files list in the changeset if the | |
175 | # client requested a shallow clone. |
|
175 | # client requested a shallow clone. | |
176 | if self.is_shallow: |
|
176 | if self.is_shallow: | |
177 | changedfiles.update(mfl[c[0]].read().keys()) |
|
177 | changedfiles.update(mfl[c[0]].read().keys()) | |
178 | else: |
|
178 | else: | |
179 | changedfiles.update(c[3]) |
|
179 | changedfiles.update(c[3]) | |
180 | # END NARROW HACK |
|
180 | # END NARROW HACK | |
181 | # Record a complete list of potentially-changed files in |
|
181 | # Record a complete list of potentially-changed files in | |
182 | # this manifest. |
|
182 | # this manifest. | |
183 | return x |
|
183 | return x | |
184 |
|
184 | |||
185 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
185 | self._verbosenote(_('uncompressed size of bundle content:\n')) | |
186 | size = 0 |
|
186 | size = 0 | |
187 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
187 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): | |
188 | size += len(chunk) |
|
188 | size += len(chunk) | |
189 | yield chunk |
|
189 | yield chunk | |
190 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
190 | self._verbosenote(_('%8.i (changelog)\n') % size) | |
191 |
|
191 | |||
192 | # We need to make sure that the linkrev in the changegroup refers to |
|
192 | # We need to make sure that the linkrev in the changegroup refers to | |
193 | # the first changeset that introduced the manifest or file revision. |
|
193 | # the first changeset that introduced the manifest or file revision. | |
194 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
194 | # The fastpath is usually safer than the slowpath, because the filelogs | |
195 | # are walked in revlog order. |
|
195 | # are walked in revlog order. | |
196 | # |
|
196 | # | |
197 | # When taking the slowpath with reorder=None and the manifest revlog |
|
197 | # When taking the slowpath with reorder=None and the manifest revlog | |
198 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
198 | # uses generaldelta, the manifest may be walked in the "wrong" order. | |
199 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
199 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in | |
200 | # cc0ff93d0c0c). |
|
200 | # cc0ff93d0c0c). | |
201 | # |
|
201 | # | |
202 | # When taking the fastpath, we are only vulnerable to reordering |
|
202 | # When taking the fastpath, we are only vulnerable to reordering | |
203 | # of the changelog itself. The changelog never uses generaldelta, so |
|
203 | # of the changelog itself. The changelog never uses generaldelta, so | |
204 | # it is only reordered when reorder=True. To handle this case, we |
|
204 | # it is only reordered when reorder=True. To handle this case, we | |
205 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
205 | # simply take the slowpath, which already has the 'clrevorder' logic. | |
206 | # This was also fixed in cc0ff93d0c0c. |
|
206 | # This was also fixed in cc0ff93d0c0c. | |
207 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
207 | fastpathlinkrev = fastpathlinkrev and not self._reorder | |
208 | # Treemanifests don't work correctly with fastpathlinkrev |
|
208 | # Treemanifests don't work correctly with fastpathlinkrev | |
209 | # either, because we don't discover which directory nodes to |
|
209 | # either, because we don't discover which directory nodes to | |
210 | # send along with files. This could probably be fixed. |
|
210 | # send along with files. This could probably be fixed. | |
211 | fastpathlinkrev = fastpathlinkrev and ( |
|
211 | fastpathlinkrev = fastpathlinkrev and ( | |
212 | 'treemanifest' not in repo.requirements) |
|
212 | 'treemanifest' not in repo.requirements) | |
213 | # Shallow clones also don't work correctly with fastpathlinkrev |
|
213 | # Shallow clones also don't work correctly with fastpathlinkrev | |
214 | # because file nodes may need to be sent for a manifest even if they |
|
214 | # because file nodes may need to be sent for a manifest even if they | |
215 | # weren't introduced by that manifest. |
|
215 | # weren't introduced by that manifest. | |
216 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow |
|
216 | fastpathlinkrev = fastpathlinkrev and not self.is_shallow | |
217 |
|
217 | |||
218 | moreargs = [] |
|
218 | moreargs = [] | |
219 | if self.generatemanifests.func_code.co_argcount == 7: |
|
219 | if self.generatemanifests.func_code.co_argcount == 7: | |
220 | # The source argument was added to generatemanifests in hg in |
|
220 | # The source argument was added to generatemanifests in hg in | |
221 | # 75cc1f1e11f2 (2017/09/11). |
|
221 | # 75cc1f1e11f2 (2017/09/11). | |
222 | moreargs.append(source) |
|
222 | moreargs.append(source) | |
223 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
223 | for chunk in self.generatemanifests(commonrevs, clrevorder, | |
224 | fastpathlinkrev, mfs, fnodes, *moreargs): |
|
224 | fastpathlinkrev, mfs, fnodes, *moreargs): | |
225 | yield chunk |
|
225 | yield chunk | |
226 | # BEGIN NARROW HACK |
|
226 | # BEGIN NARROW HACK | |
227 | mfdicts = None |
|
227 | mfdicts = None | |
228 | if self.is_shallow: |
|
228 | if self.is_shallow: | |
229 | mfdicts = [(self._repo.manifestlog[n].read(), lr) |
|
229 | mfdicts = [(self._repo.manifestlog[n].read(), lr) | |
230 | for (n, lr) in mfs.iteritems()] |
|
230 | for (n, lr) in mfs.iteritems()] | |
231 | # END NARROW HACK |
|
231 | # END NARROW HACK | |
232 | mfs.clear() |
|
232 | mfs.clear() | |
233 | clrevs = set(cl.rev(x) for x in clnodes) |
|
233 | clrevs = set(cl.rev(x) for x in clnodes) | |
234 |
|
234 | |||
235 | if not fastpathlinkrev: |
|
235 | if not fastpathlinkrev: | |
236 | def linknodes(unused, fname): |
|
236 | def linknodes(unused, fname): | |
237 | return fnodes.get(fname, {}) |
|
237 | return fnodes.get(fname, {}) | |
238 | else: |
|
238 | else: | |
239 | cln = cl.node |
|
239 | cln = cl.node | |
240 | def linknodes(filerevlog, fname): |
|
240 | def linknodes(filerevlog, fname): | |
241 | llr = filerevlog.linkrev |
|
241 | llr = filerevlog.linkrev | |
242 | fln = filerevlog.node |
|
242 | fln = filerevlog.node | |
243 | revs = ((r, llr(r)) for r in filerevlog) |
|
243 | revs = ((r, llr(r)) for r in filerevlog) | |
244 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
244 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) | |
245 |
|
245 | |||
246 | # BEGIN NARROW HACK |
|
246 | # BEGIN NARROW HACK | |
247 | # |
|
247 | # | |
248 | # We need to pass the mfdicts variable down into |
|
248 | # We need to pass the mfdicts variable down into | |
249 | # generatefiles(), but more than one command might have |
|
249 | # generatefiles(), but more than one command might have | |
250 | # wrapped generatefiles so we can't modify the function |
|
250 | # wrapped generatefiles so we can't modify the function | |
251 | # signature. Instead, we pass the data to ourselves using an |
|
251 | # signature. Instead, we pass the data to ourselves using an | |
252 | # instance attribute. I'm sorry. |
|
252 | # instance attribute. I'm sorry. | |
253 | self._mfdicts = mfdicts |
|
253 | self._mfdicts = mfdicts | |
254 | # END NARROW HACK |
|
254 | # END NARROW HACK | |
255 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
255 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, | |
256 | source): |
|
256 | source): | |
257 | yield chunk |
|
257 | yield chunk | |
258 |
|
258 | |||
259 | yield self.close() |
|
259 | yield self.close() | |
260 |
|
260 | |||
261 | if clnodes: |
|
261 | if clnodes: | |
262 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) |
|
262 | repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) | |
263 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) |
|
263 | extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) | |
264 |
|
264 | |||
265 | def revchunk(orig, self, revlog, rev, prev, linknode): |
|
265 | def revchunk(orig, self, revlog, rev, prev, linknode): | |
266 | if not util.safehasattr(self, 'full_nodes'): |
|
266 | if not util.safehasattr(self, 'full_nodes'): | |
267 | # not sending a narrow changegroup |
|
267 | # not sending a narrow changegroup | |
268 | for x in orig(self, revlog, rev, prev, linknode): |
|
268 | for x in orig(self, revlog, rev, prev, linknode): | |
269 | yield x |
|
269 | yield x | |
270 | return |
|
270 | return | |
271 | # build up some mapping information that's useful later. See |
|
271 | # build up some mapping information that's useful later. See | |
272 | # the local() nested function below. |
|
272 | # the local() nested function below. | |
273 | if not self.changelog_done: |
|
273 | if not self.changelog_done: | |
274 | self.clnode_to_rev[linknode] = rev |
|
274 | self.clnode_to_rev[linknode] = rev | |
275 | linkrev = rev |
|
275 | linkrev = rev | |
276 | self.clrev_to_localrev[linkrev] = rev |
|
276 | self.clrev_to_localrev[linkrev] = rev | |
277 | else: |
|
277 | else: | |
278 | linkrev = self.clnode_to_rev[linknode] |
|
278 | linkrev = self.clnode_to_rev[linknode] | |
279 | self.clrev_to_localrev[linkrev] = rev |
|
279 | self.clrev_to_localrev[linkrev] = rev | |
280 | # This is a node to send in full, because the changeset it |
|
280 | # This is a node to send in full, because the changeset it | |
281 | # corresponds to was a full changeset. |
|
281 | # corresponds to was a full changeset. | |
282 | if linknode in self.full_nodes: |
|
282 | if linknode in self.full_nodes: | |
283 | for x in orig(self, revlog, rev, prev, linknode): |
|
283 | for x in orig(self, revlog, rev, prev, linknode): | |
284 | yield x |
|
284 | yield x | |
285 | return |
|
285 | return | |
286 | # At this point, a node can either be one we should skip or an |
|
286 | # At this point, a node can either be one we should skip or an | |
287 | # ellipsis. If it's not an ellipsis, bail immediately. |
|
287 | # ellipsis. If it's not an ellipsis, bail immediately. | |
288 | if linkrev not in self.precomputed_ellipsis: |
|
288 | if linkrev not in self.precomputed_ellipsis: | |
289 | return |
|
289 | return | |
290 | linkparents = self.precomputed_ellipsis[linkrev] |
|
290 | linkparents = self.precomputed_ellipsis[linkrev] | |
291 | def local(clrev): |
|
291 | def local(clrev): | |
292 | """Turn a changelog revnum into a local revnum. |
|
292 | """Turn a changelog revnum into a local revnum. | |
293 |
|
293 | |||
294 | The ellipsis dag is stored as revnums on the changelog, |
|
294 | The ellipsis dag is stored as revnums on the changelog, | |
295 | but when we're producing ellipsis entries for |
|
295 | but when we're producing ellipsis entries for | |
296 | non-changelog revlogs, we need to turn those numbers into |
|
296 | non-changelog revlogs, we need to turn those numbers into | |
297 | something local. This does that for us, and during the |
|
297 | something local. This does that for us, and during the | |
298 | changelog sending phase will also expand the stored |
|
298 | changelog sending phase will also expand the stored | |
299 | mappings as needed. |
|
299 | mappings as needed. | |
300 | """ |
|
300 | """ | |
301 | if clrev == node.nullrev: |
|
301 | if clrev == node.nullrev: | |
302 | return node.nullrev |
|
302 | return node.nullrev | |
303 | if not self.changelog_done: |
|
303 | if not self.changelog_done: | |
304 | # If we're doing the changelog, it's possible that we |
|
304 | # If we're doing the changelog, it's possible that we | |
305 | # have a parent that is already on the client, and we |
|
305 | # have a parent that is already on the client, and we | |
306 | # need to store some extra mapping information so that |
|
306 | # need to store some extra mapping information so that | |
307 | # our contained ellipsis nodes will be able to resolve |
|
307 | # our contained ellipsis nodes will be able to resolve | |
308 | # their parents. |
|
308 | # their parents. | |
309 | if clrev not in self.clrev_to_localrev: |
|
309 | if clrev not in self.clrev_to_localrev: | |
310 | clnode = revlog.node(clrev) |
|
310 | clnode = revlog.node(clrev) | |
311 | self.clnode_to_rev[clnode] = clrev |
|
311 | self.clnode_to_rev[clnode] = clrev | |
312 | return clrev |
|
312 | return clrev | |
313 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
313 | # Walk the ellipsis-ized changelog breadth-first looking for a | |
314 | # change that has been linked from the current revlog. |
|
314 | # change that has been linked from the current revlog. | |
315 | # |
|
315 | # | |
316 | # For a flat manifest revlog only a single step should be necessary |
|
316 | # For a flat manifest revlog only a single step should be necessary | |
317 | # as all relevant changelog entries are relevant to the flat |
|
317 | # as all relevant changelog entries are relevant to the flat | |
318 | # manifest. |
|
318 | # manifest. | |
319 | # |
|
319 | # | |
320 | # For a filelog or tree manifest dirlog however not every changelog |
|
320 | # For a filelog or tree manifest dirlog however not every changelog | |
321 | # entry will have been relevant, so we need to skip some changelog |
|
321 | # entry will have been relevant, so we need to skip some changelog | |
322 | # nodes even after ellipsis-izing. |
|
322 | # nodes even after ellipsis-izing. | |
323 | walk = [clrev] |
|
323 | walk = [clrev] | |
324 | while walk: |
|
324 | while walk: | |
325 | p = walk[0] |
|
325 | p = walk[0] | |
326 | walk = walk[1:] |
|
326 | walk = walk[1:] | |
327 | if p in self.clrev_to_localrev: |
|
327 | if p in self.clrev_to_localrev: | |
328 | return self.clrev_to_localrev[p] |
|
328 | return self.clrev_to_localrev[p] | |
329 | elif p in self.full_nodes: |
|
329 | elif p in self.full_nodes: | |
330 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) |
|
330 | walk.extend([pp for pp in self._repo.changelog.parentrevs(p) | |
331 | if pp != node.nullrev]) |
|
331 | if pp != node.nullrev]) | |
332 | elif p in self.precomputed_ellipsis: |
|
332 | elif p in self.precomputed_ellipsis: | |
333 | walk.extend([pp for pp in self.precomputed_ellipsis[p] |
|
333 | walk.extend([pp for pp in self.precomputed_ellipsis[p] | |
334 | if pp != node.nullrev]) |
|
334 | if pp != node.nullrev]) | |
335 | else: |
|
335 | else: | |
336 | # In this case, we've got an ellipsis with parents |
|
336 | # In this case, we've got an ellipsis with parents | |
337 | # outside the current bundle (likely an |
|
337 | # outside the current bundle (likely an | |
338 | # incremental pull). We "know" that we can use the |
|
338 | # incremental pull). We "know" that we can use the | |
339 | # value of this same revlog at whatever revision |
|
339 | # value of this same revlog at whatever revision | |
340 | # is pointed to by linknode. "Know" is in scare |
|
340 | # is pointed to by linknode. "Know" is in scare | |
341 | # quotes because I haven't done enough examination |
|
341 | # quotes because I haven't done enough examination | |
342 | # of edge cases to convince myself this is really |
|
342 | # of edge cases to convince myself this is really | |
343 | # a fact - it works for all the (admittedly |
|
343 | # a fact - it works for all the (admittedly | |
344 | # thorough) cases in our testsuite, but I would be |
|
344 | # thorough) cases in our testsuite, but I would be | |
345 | # somewhat unsurprised to find a case in the wild |
|
345 | # somewhat unsurprised to find a case in the wild | |
346 | # where this breaks down a bit. That said, I don't |
|
346 | # where this breaks down a bit. That said, I don't | |
347 | # know if it would hurt anything. |
|
347 | # know if it would hurt anything. | |
348 | for i in xrange(rev, 0, -1): |
|
348 | for i in xrange(rev, 0, -1): | |
349 | if revlog.linkrev(i) == clrev: |
|
349 | if revlog.linkrev(i) == clrev: | |
350 | return i |
|
350 | return i | |
351 | # We failed to resolve a parent for this node, so |
|
351 | # We failed to resolve a parent for this node, so | |
352 | # we crash the changegroup construction. |
|
352 | # we crash the changegroup construction. | |
353 | raise error.Abort( |
|
353 | raise error.Abort( | |
354 | 'unable to resolve parent while packing %r %r' |
|
354 | 'unable to resolve parent while packing %r %r' | |
355 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) |
|
355 | ' for changeset %r' % (revlog.indexfile, rev, clrev)) | |
356 | return node.nullrev |
|
356 | return node.nullrev | |
357 |
|
357 | |||
358 | if not linkparents or ( |
|
358 | if not linkparents or ( | |
359 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): |
|
359 | revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): | |
360 | p1, p2 = node.nullrev, node.nullrev |
|
360 | p1, p2 = node.nullrev, node.nullrev | |
361 | elif len(linkparents) == 1: |
|
361 | elif len(linkparents) == 1: | |
362 | p1, = sorted(local(p) for p in linkparents) |
|
362 | p1, = sorted(local(p) for p in linkparents) | |
363 | p2 = node.nullrev |
|
363 | p2 = node.nullrev | |
364 | else: |
|
364 | else: | |
365 | p1, p2 = sorted(local(p) for p in linkparents) |
|
365 | p1, p2 = sorted(local(p) for p in linkparents) | |
366 | yield ellipsisdata( |
|
366 | yield ellipsisdata( | |
367 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) |
|
367 | self, rev, revlog, p1, p2, revlog.revision(rev), linknode) | |
368 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) |
|
368 | extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) | |
369 |
|
369 | |||
370 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): |
|
370 | def deltaparent(orig, self, revlog, rev, p1, p2, prev): | |
371 | if util.safehasattr(self, 'full_nodes'): |
|
371 | if util.safehasattr(self, 'full_nodes'): | |
372 | # TODO: send better deltas when in narrow mode. |
|
372 | # TODO: send better deltas when in narrow mode. | |
373 | # |
|
373 | # | |
374 | # changegroup.group() loops over revisions to send, |
|
374 | # changegroup.group() loops over revisions to send, | |
375 | # including revisions we'll skip. What this means is that |
|
375 | # including revisions we'll skip. What this means is that | |
376 | # `prev` will be a potentially useless delta base for all |
|
376 | # `prev` will be a potentially useless delta base for all | |
377 | # ellipsis nodes, as the client likely won't have it. In |
|
377 | # ellipsis nodes, as the client likely won't have it. In | |
378 | # the future we should do bookkeeping about which nodes |
|
378 | # the future we should do bookkeeping about which nodes | |
379 | # have been sent to the client, and try to be |
|
379 | # have been sent to the client, and try to be | |
380 | # significantly smarter about delta bases. This is |
|
380 | # significantly smarter about delta bases. This is | |
381 | # slightly tricky because this same code has to work for |
|
381 | # slightly tricky because this same code has to work for | |
382 | # all revlogs, and we don't have the linkrev/linknode here. |
|
382 | # all revlogs, and we don't have the linkrev/linknode here. | |
383 | return p1 |
|
383 | return p1 | |
384 | return orig(self, revlog, rev, p1, p2, prev) |
|
384 | return orig(self, revlog, rev, p1, p2, prev) | |
385 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
|
385 | extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent) |
@@ -1,35 +1,35 | |||||
1 | # narrowcopies.py - extensions to mercurial copies module to support narrow |
|
1 | # narrowcopies.py - extensions to mercurial copies module to support narrow | |
2 | # clones |
|
2 | # clones | |
3 | # |
|
3 | # | |
4 | # Copyright 2017 Google, Inc. |
|
4 | # Copyright 2017 Google, Inc. | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | copies, |
|
12 | copies, | |
13 | extensions, |
|
13 | extensions, | |
14 | util, |
|
14 | util, | |
15 | ) |
|
15 | ) | |
16 |
|
16 | |||
17 | def setup(repo): |
|
17 | def setup(repo): | |
18 | def _computeforwardmissing(orig, a, b, match=None): |
|
18 | def _computeforwardmissing(orig, a, b, match=None): | |
19 | missing = orig(a, b, match) |
|
19 | missing = orig(a, b, match) | |
20 | if util.safehasattr(repo, 'narrowmatch'): |
|
20 | if util.safehasattr(repo, 'narrowmatch'): | |
21 | narrowmatch = repo.narrowmatch() |
|
21 | narrowmatch = repo.narrowmatch() | |
22 |
missing = |
|
22 | missing = [f for f in missing if narrowmatch(f)] | |
23 | return missing |
|
23 | return missing | |
24 |
|
24 | |||
25 | def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit, |
|
25 | def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit, | |
26 | data): |
|
26 | data): | |
27 | if util.safehasattr(repo, 'narrowmatch'): |
|
27 | if util.safehasattr(repo, 'narrowmatch'): | |
28 | narrowmatch = repo.narrowmatch() |
|
28 | narrowmatch = repo.narrowmatch() | |
29 | if not narrowmatch(f): |
|
29 | if not narrowmatch(f): | |
30 | return |
|
30 | return | |
31 | orig(srcctx, dstctx, f, base, tca, remotebase, limit, data) |
|
31 | orig(srcctx, dstctx, f, base, tca, remotebase, limit, data) | |
32 |
|
32 | |||
33 | extensions.wrapfunction(copies, '_computeforwardmissing', |
|
33 | extensions.wrapfunction(copies, '_computeforwardmissing', | |
34 | _computeforwardmissing) |
|
34 | _computeforwardmissing) | |
35 | extensions.wrapfunction(copies, '_checkcopies', _checkcopies) |
|
35 | extensions.wrapfunction(copies, '_checkcopies', _checkcopies) |
@@ -1,42 +1,42 | |||||
1 | # narrowpatch.py - extensions to mercurial patch module to support narrow clones |
|
1 | # narrowpatch.py - extensions to mercurial patch module to support narrow clones | |
2 | # |
|
2 | # | |
3 | # Copyright 2017 Google, Inc. |
|
3 | # Copyright 2017 Google, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | from mercurial import ( |
|
10 | from mercurial import ( | |
11 | extensions, |
|
11 | extensions, | |
12 | patch, |
|
12 | patch, | |
13 | util, |
|
13 | util, | |
14 | ) |
|
14 | ) | |
15 |
|
15 | |||
16 | def setup(repo): |
|
16 | def setup(repo): | |
17 | def _filepairs(orig, *args): |
|
17 | def _filepairs(orig, *args): | |
18 | """Only includes files within the narrow spec in the diff.""" |
|
18 | """Only includes files within the narrow spec in the diff.""" | |
19 | if util.safehasattr(repo, 'narrowmatch'): |
|
19 | if util.safehasattr(repo, 'narrowmatch'): | |
20 | narrowmatch = repo.narrowmatch() |
|
20 | narrowmatch = repo.narrowmatch() | |
21 | for x in orig(*args): |
|
21 | for x in orig(*args): | |
22 | f1, f2, copyop = x |
|
22 | f1, f2, copyop = x | |
23 | if ((not f1 or narrowmatch(f1)) and |
|
23 | if ((not f1 or narrowmatch(f1)) and | |
24 | (not f2 or narrowmatch(f2))): |
|
24 | (not f2 or narrowmatch(f2))): | |
25 | yield x |
|
25 | yield x | |
26 | else: |
|
26 | else: | |
27 | for x in orig(*args): |
|
27 | for x in orig(*args): | |
28 | yield x |
|
28 | yield x | |
29 |
|
29 | |||
30 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, |
|
30 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, | |
31 | copy, getfilectx, *args, **kwargs): |
|
31 | copy, getfilectx, *args, **kwargs): | |
32 | if util.safehasattr(repo, 'narrowmatch'): |
|
32 | if util.safehasattr(repo, 'narrowmatch'): | |
33 | narrowmatch = repo.narrowmatch() |
|
33 | narrowmatch = repo.narrowmatch() | |
34 |
modified = |
|
34 | modified = [f for f in modified if narrowmatch(f)] | |
35 |
added = |
|
35 | added = [f for f in added if narrowmatch(f)] | |
36 |
removed = |
|
36 | removed = [f for f in removed if narrowmatch(f)] | |
37 | copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)} |
|
37 | copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)} | |
38 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy, |
|
38 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy, | |
39 | getfilectx, *args, **kwargs) |
|
39 | getfilectx, *args, **kwargs) | |
40 |
|
40 | |||
41 | extensions.wrapfunction(patch, '_filepairs', _filepairs) |
|
41 | extensions.wrapfunction(patch, '_filepairs', _filepairs) | |
42 | extensions.wrapfunction(patch, 'trydiff', trydiff) |
|
42 | extensions.wrapfunction(patch, 'trydiff', trydiff) |
General Comments 0
You need to be logged in to leave comments.
Login now