##// END OF EJS Templates
revbranchcache: disable the new part for narrow hg bundle...
Boris Feld -
r36985:f62873db default
parent child Browse files
Show More
@@ -1,489 +1,502 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 narrowspec,
27 narrowspec,
28 repair,
28 repair,
29 util,
29 util,
30 wireproto,
30 wireproto,
31 )
31 )
32
32
33 NARROWCAP = 'narrow'
33 NARROWCAP = 'narrow'
34 _NARROWACL_SECTION = 'narrowhgacl'
34 _NARROWACL_SECTION = 'narrowhgacl'
35 _CHANGESPECPART = NARROWCAP + ':changespec'
35 _CHANGESPECPART = NARROWCAP + ':changespec'
36 _SPECPART = NARROWCAP + ':spec'
36 _SPECPART = NARROWCAP + ':spec'
37 _SPECPART_INCLUDE = 'include'
37 _SPECPART_INCLUDE = 'include'
38 _SPECPART_EXCLUDE = 'exclude'
38 _SPECPART_EXCLUDE = 'exclude'
39 _KILLNODESIGNAL = 'KILL'
39 _KILLNODESIGNAL = 'KILL'
40 _DONESIGNAL = 'DONE'
40 _DONESIGNAL = 'DONE'
41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
45
45
46 # When advertising capabilities, always include narrow clone support.
46 # When advertising capabilities, always include narrow clone support.
47 def getrepocaps_narrow(orig, repo, **kwargs):
47 def getrepocaps_narrow(orig, repo, **kwargs):
48 caps = orig(repo, **kwargs)
48 caps = orig(repo, **kwargs)
49 caps[NARROWCAP] = ['v0']
49 caps[NARROWCAP] = ['v0']
50 return caps
50 return caps
51
51
52 def _computeellipsis(repo, common, heads, known, match, depth=None):
52 def _computeellipsis(repo, common, heads, known, match, depth=None):
53 """Compute the shape of a narrowed DAG.
53 """Compute the shape of a narrowed DAG.
54
54
55 Args:
55 Args:
56 repo: The repository we're transferring.
56 repo: The repository we're transferring.
57 common: The roots of the DAG range we're transferring.
57 common: The roots of the DAG range we're transferring.
58 May be just [nullid], which means all ancestors of heads.
58 May be just [nullid], which means all ancestors of heads.
59 heads: The heads of the DAG range we're transferring.
59 heads: The heads of the DAG range we're transferring.
60 match: The narrowmatcher that allows us to identify relevant changes.
60 match: The narrowmatcher that allows us to identify relevant changes.
61 depth: If not None, only consider nodes to be full nodes if they are at
61 depth: If not None, only consider nodes to be full nodes if they are at
62 most depth changesets away from one of heads.
62 most depth changesets away from one of heads.
63
63
64 Returns:
64 Returns:
65 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
65 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
66
66
67 visitnodes: The list of nodes (either full or ellipsis) which
67 visitnodes: The list of nodes (either full or ellipsis) which
68 need to be sent to the client.
68 need to be sent to the client.
69 relevant_nodes: The set of changelog nodes which change a file inside
69 relevant_nodes: The set of changelog nodes which change a file inside
70 the narrowspec. The client needs these as non-ellipsis nodes.
70 the narrowspec. The client needs these as non-ellipsis nodes.
71 ellipsisroots: A dict of {rev: parents} that is used in
71 ellipsisroots: A dict of {rev: parents} that is used in
72 narrowchangegroup to produce ellipsis nodes with the
72 narrowchangegroup to produce ellipsis nodes with the
73 correct parents.
73 correct parents.
74 """
74 """
75 cl = repo.changelog
75 cl = repo.changelog
76 mfl = repo.manifestlog
76 mfl = repo.manifestlog
77
77
78 cldag = dagutil.revlogdag(cl)
78 cldag = dagutil.revlogdag(cl)
79 # dagutil does not like nullid/nullrev
79 # dagutil does not like nullid/nullrev
80 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
80 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
81 headsrevs = cldag.internalizeall(heads)
81 headsrevs = cldag.internalizeall(heads)
82 if depth:
82 if depth:
83 revdepth = {h: 0 for h in headsrevs}
83 revdepth = {h: 0 for h in headsrevs}
84
84
85 ellipsisheads = collections.defaultdict(set)
85 ellipsisheads = collections.defaultdict(set)
86 ellipsisroots = collections.defaultdict(set)
86 ellipsisroots = collections.defaultdict(set)
87
87
88 def addroot(head, curchange):
88 def addroot(head, curchange):
89 """Add a root to an ellipsis head, splitting heads with 3 roots."""
89 """Add a root to an ellipsis head, splitting heads with 3 roots."""
90 ellipsisroots[head].add(curchange)
90 ellipsisroots[head].add(curchange)
91 # Recursively split ellipsis heads with 3 roots by finding the
91 # Recursively split ellipsis heads with 3 roots by finding the
92 # roots' youngest common descendant which is an elided merge commit.
92 # roots' youngest common descendant which is an elided merge commit.
93 # That descendant takes 2 of the 3 roots as its own, and becomes a
93 # That descendant takes 2 of the 3 roots as its own, and becomes a
94 # root of the head.
94 # root of the head.
95 while len(ellipsisroots[head]) > 2:
95 while len(ellipsisroots[head]) > 2:
96 child, roots = splithead(head)
96 child, roots = splithead(head)
97 splitroots(head, child, roots)
97 splitroots(head, child, roots)
98 head = child # Recurse in case we just added a 3rd root
98 head = child # Recurse in case we just added a 3rd root
99
99
100 def splitroots(head, child, roots):
100 def splitroots(head, child, roots):
101 ellipsisroots[head].difference_update(roots)
101 ellipsisroots[head].difference_update(roots)
102 ellipsisroots[head].add(child)
102 ellipsisroots[head].add(child)
103 ellipsisroots[child].update(roots)
103 ellipsisroots[child].update(roots)
104 ellipsisroots[child].discard(child)
104 ellipsisroots[child].discard(child)
105
105
106 def splithead(head):
106 def splithead(head):
107 r1, r2, r3 = sorted(ellipsisroots[head])
107 r1, r2, r3 = sorted(ellipsisroots[head])
108 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
108 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
109 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
109 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
110 nr1, head, nr2, head)
110 nr1, head, nr2, head)
111 for j in mid:
111 for j in mid:
112 if j == nr2:
112 if j == nr2:
113 return nr2, (nr1, nr2)
113 return nr2, (nr1, nr2)
114 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
114 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
115 return j, (nr1, nr2)
115 return j, (nr1, nr2)
116 raise error.Abort('Failed to split up ellipsis node! head: %d, '
116 raise error.Abort('Failed to split up ellipsis node! head: %d, '
117 'roots: %d %d %d' % (head, r1, r2, r3))
117 'roots: %d %d %d' % (head, r1, r2, r3))
118
118
119 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
119 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
120 visit = reversed(missing)
120 visit = reversed(missing)
121 relevant_nodes = set()
121 relevant_nodes = set()
122 visitnodes = [cl.node(m) for m in missing]
122 visitnodes = [cl.node(m) for m in missing]
123 required = set(headsrevs) | known
123 required = set(headsrevs) | known
124 for rev in visit:
124 for rev in visit:
125 clrev = cl.changelogrevision(rev)
125 clrev = cl.changelogrevision(rev)
126 ps = cldag.parents(rev)
126 ps = cldag.parents(rev)
127 if depth is not None:
127 if depth is not None:
128 curdepth = revdepth[rev]
128 curdepth = revdepth[rev]
129 for p in ps:
129 for p in ps:
130 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
130 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
131 needed = False
131 needed = False
132 shallow_enough = depth is None or revdepth[rev] <= depth
132 shallow_enough = depth is None or revdepth[rev] <= depth
133 if shallow_enough:
133 if shallow_enough:
134 curmf = mfl[clrev.manifest].read()
134 curmf = mfl[clrev.manifest].read()
135 if ps:
135 if ps:
136 # We choose to not trust the changed files list in
136 # We choose to not trust the changed files list in
137 # changesets because it's not always correct. TODO: could
137 # changesets because it's not always correct. TODO: could
138 # we trust it for the non-merge case?
138 # we trust it for the non-merge case?
139 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
139 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
140 needed = bool(curmf.diff(p1mf, match))
140 needed = bool(curmf.diff(p1mf, match))
141 if not needed and len(ps) > 1:
141 if not needed and len(ps) > 1:
142 # For merge changes, the list of changed files is not
142 # For merge changes, the list of changed files is not
143 # helpful, since we need to emit the merge if a file
143 # helpful, since we need to emit the merge if a file
144 # in the narrow spec has changed on either side of the
144 # in the narrow spec has changed on either side of the
145 # merge. As a result, we do a manifest diff to check.
145 # merge. As a result, we do a manifest diff to check.
146 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
146 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
147 needed = bool(curmf.diff(p2mf, match))
147 needed = bool(curmf.diff(p2mf, match))
148 else:
148 else:
149 # For a root node, we need to include the node if any
149 # For a root node, we need to include the node if any
150 # files in the node match the narrowspec.
150 # files in the node match the narrowspec.
151 needed = any(curmf.walk(match))
151 needed = any(curmf.walk(match))
152
152
153 if needed:
153 if needed:
154 for head in ellipsisheads[rev]:
154 for head in ellipsisheads[rev]:
155 addroot(head, rev)
155 addroot(head, rev)
156 for p in ps:
156 for p in ps:
157 required.add(p)
157 required.add(p)
158 relevant_nodes.add(cl.node(rev))
158 relevant_nodes.add(cl.node(rev))
159 else:
159 else:
160 if not ps:
160 if not ps:
161 ps = [nullrev]
161 ps = [nullrev]
162 if rev in required:
162 if rev in required:
163 for head in ellipsisheads[rev]:
163 for head in ellipsisheads[rev]:
164 addroot(head, rev)
164 addroot(head, rev)
165 for p in ps:
165 for p in ps:
166 ellipsisheads[p].add(rev)
166 ellipsisheads[p].add(rev)
167 else:
167 else:
168 for p in ps:
168 for p in ps:
169 ellipsisheads[p] |= ellipsisheads[rev]
169 ellipsisheads[p] |= ellipsisheads[rev]
170
170
171 # add common changesets as roots of their reachable ellipsis heads
171 # add common changesets as roots of their reachable ellipsis heads
172 for c in commonrevs:
172 for c in commonrevs:
173 for head in ellipsisheads[c]:
173 for head in ellipsisheads[c]:
174 addroot(head, c)
174 addroot(head, c)
175 return visitnodes, relevant_nodes, ellipsisroots
175 return visitnodes, relevant_nodes, ellipsisroots
176
176
177 def _packellipsischangegroup(repo, common, match, relevant_nodes,
177 def _packellipsischangegroup(repo, common, match, relevant_nodes,
178 ellipsisroots, visitnodes, depth, source, version):
178 ellipsisroots, visitnodes, depth, source, version):
179 if version in ('01', '02'):
179 if version in ('01', '02'):
180 raise error.Abort(
180 raise error.Abort(
181 'ellipsis nodes require at least cg3 on client and server, '
181 'ellipsis nodes require at least cg3 on client and server, '
182 'but negotiated version %s' % version)
182 'but negotiated version %s' % version)
183 # We wrap cg1packer.revchunk, using a side channel to pass
183 # We wrap cg1packer.revchunk, using a side channel to pass
184 # relevant_nodes into that area. Then if linknode isn't in the
184 # relevant_nodes into that area. Then if linknode isn't in the
185 # set, we know we have an ellipsis node and we should defer
185 # set, we know we have an ellipsis node and we should defer
186 # sending that node's data. We override close() to detect
186 # sending that node's data. We override close() to detect
187 # pending ellipsis nodes and flush them.
187 # pending ellipsis nodes and flush them.
188 packer = changegroup.getbundler(version, repo)
188 packer = changegroup.getbundler(version, repo)
189 # Let the packer have access to the narrow matcher so it can
189 # Let the packer have access to the narrow matcher so it can
190 # omit filelogs and dirlogs as needed
190 # omit filelogs and dirlogs as needed
191 packer._narrow_matcher = lambda : match
191 packer._narrow_matcher = lambda : match
192 # Give the packer the list of nodes which should not be
192 # Give the packer the list of nodes which should not be
193 # ellipsis nodes. We store this rather than the set of nodes
193 # ellipsis nodes. We store this rather than the set of nodes
194 # that should be an ellipsis because for very large histories
194 # that should be an ellipsis because for very large histories
195 # we expect this to be significantly smaller.
195 # we expect this to be significantly smaller.
196 packer.full_nodes = relevant_nodes
196 packer.full_nodes = relevant_nodes
197 # Maps ellipsis revs to their roots at the changelog level.
197 # Maps ellipsis revs to their roots at the changelog level.
198 packer.precomputed_ellipsis = ellipsisroots
198 packer.precomputed_ellipsis = ellipsisroots
199 # Maps CL revs to per-revlog revisions. Cleared in close() at
199 # Maps CL revs to per-revlog revisions. Cleared in close() at
200 # the end of each group.
200 # the end of each group.
201 packer.clrev_to_localrev = {}
201 packer.clrev_to_localrev = {}
202 packer.next_clrev_to_localrev = {}
202 packer.next_clrev_to_localrev = {}
203 # Maps changelog nodes to changelog revs. Filled in once
203 # Maps changelog nodes to changelog revs. Filled in once
204 # during changelog stage and then left unmodified.
204 # during changelog stage and then left unmodified.
205 packer.clnode_to_rev = {}
205 packer.clnode_to_rev = {}
206 packer.changelog_done = False
206 packer.changelog_done = False
207 # If true, informs the packer that it is serving shallow content and might
207 # If true, informs the packer that it is serving shallow content and might
208 # need to pack file contents not introduced by the changes being packed.
208 # need to pack file contents not introduced by the changes being packed.
209 packer.is_shallow = depth is not None
209 packer.is_shallow = depth is not None
210
210
211 return packer.generate(common, visitnodes, False, source)
211 return packer.generate(common, visitnodes, False, source)
212
212
213 # Serve a changegroup for a client with a narrow clone.
213 # Serve a changegroup for a client with a narrow clone.
214 def getbundlechangegrouppart_narrow(bundler, repo, source,
214 def getbundlechangegrouppart_narrow(bundler, repo, source,
215 bundlecaps=None, b2caps=None, heads=None,
215 bundlecaps=None, b2caps=None, heads=None,
216 common=None, **kwargs):
216 common=None, **kwargs):
217 cgversions = b2caps.get('changegroup')
217 cgversions = b2caps.get('changegroup')
218 if cgversions: # 3.1 and 3.2 ship with an empty value
218 if cgversions: # 3.1 and 3.2 ship with an empty value
219 cgversions = [v for v in cgversions
219 cgversions = [v for v in cgversions
220 if v in changegroup.supportedoutgoingversions(repo)]
220 if v in changegroup.supportedoutgoingversions(repo)]
221 if not cgversions:
221 if not cgversions:
222 raise ValueError(_('no common changegroup version'))
222 raise ValueError(_('no common changegroup version'))
223 version = max(cgversions)
223 version = max(cgversions)
224 else:
224 else:
225 raise ValueError(_("server does not advertise changegroup version,"
225 raise ValueError(_("server does not advertise changegroup version,"
226 " can't negotiate support for ellipsis nodes"))
226 " can't negotiate support for ellipsis nodes"))
227
227
228 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
228 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
229 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
229 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
230 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
230 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
231 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
231 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
232 outgoing = exchange._computeoutgoing(repo, heads, common)
232 outgoing = exchange._computeoutgoing(repo, heads, common)
233 if not outgoing.missing:
233 if not outgoing.missing:
234 return
234 return
235 def wrappedgetbundler(orig, *args, **kwargs):
235 def wrappedgetbundler(orig, *args, **kwargs):
236 bundler = orig(*args, **kwargs)
236 bundler = orig(*args, **kwargs)
237 bundler._narrow_matcher = lambda : newmatch
237 bundler._narrow_matcher = lambda : newmatch
238 return bundler
238 return bundler
239 with extensions.wrappedfunction(changegroup, 'getbundler',
239 with extensions.wrappedfunction(changegroup, 'getbundler',
240 wrappedgetbundler):
240 wrappedgetbundler):
241 cg = changegroup.makestream(repo, outgoing, version, source)
241 cg = changegroup.makestream(repo, outgoing, version, source)
242 part = bundler.newpart('changegroup', data=cg)
242 part = bundler.newpart('changegroup', data=cg)
243 part.addparam('version', version)
243 part.addparam('version', version)
244 if 'treemanifest' in repo.requirements:
244 if 'treemanifest' in repo.requirements:
245 part.addparam('treemanifest', '1')
245 part.addparam('treemanifest', '1')
246
246
247 if include or exclude:
247 if include or exclude:
248 narrowspecpart = bundler.newpart(_SPECPART)
248 narrowspecpart = bundler.newpart(_SPECPART)
249 if include:
249 if include:
250 narrowspecpart.addparam(
250 narrowspecpart.addparam(
251 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
251 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
252 if exclude:
252 if exclude:
253 narrowspecpart.addparam(
253 narrowspecpart.addparam(
254 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
254 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
255
255
256 return
256 return
257
257
258 depth = kwargs.get(r'depth', None)
258 depth = kwargs.get(r'depth', None)
259 if depth is not None:
259 if depth is not None:
260 depth = int(depth)
260 depth = int(depth)
261 if depth < 1:
261 if depth < 1:
262 raise error.Abort(_('depth must be positive, got %d') % depth)
262 raise error.Abort(_('depth must be positive, got %d') % depth)
263
263
264 heads = set(heads or repo.heads())
264 heads = set(heads or repo.heads())
265 common = set(common or [nullid])
265 common = set(common or [nullid])
266 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
266 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
267 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
267 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
268 known = {bin(n) for n in kwargs.get(r'known', [])}
268 known = {bin(n) for n in kwargs.get(r'known', [])}
269 if known and (oldinclude != include or oldexclude != exclude):
269 if known and (oldinclude != include or oldexclude != exclude):
270 # Steps:
270 # Steps:
271 # 1. Send kill for "$known & ::common"
271 # 1. Send kill for "$known & ::common"
272 #
272 #
273 # 2. Send changegroup for ::common
273 # 2. Send changegroup for ::common
274 #
274 #
275 # 3. Proceed.
275 # 3. Proceed.
276 #
276 #
277 # In the future, we can send kills for only the specific
277 # In the future, we can send kills for only the specific
278 # nodes we know should go away or change shape, and then
278 # nodes we know should go away or change shape, and then
279 # send a data stream that tells the client something like this:
279 # send a data stream that tells the client something like this:
280 #
280 #
281 # a) apply this changegroup
281 # a) apply this changegroup
282 # b) apply nodes XXX, YYY, ZZZ that you already have
282 # b) apply nodes XXX, YYY, ZZZ that you already have
283 # c) goto a
283 # c) goto a
284 #
284 #
285 # until they've built up the full new state.
285 # until they've built up the full new state.
286 # Convert to revnums and intersect with "common". The client should
286 # Convert to revnums and intersect with "common". The client should
287 # have made it a subset of "common" already, but let's be safe.
287 # have made it a subset of "common" already, but let's be safe.
288 known = set(repo.revs("%ln & ::%ln", known, common))
288 known = set(repo.revs("%ln & ::%ln", known, common))
289 # TODO: we could send only roots() of this set, and the
289 # TODO: we could send only roots() of this set, and the
290 # list of nodes in common, and the client could work out
290 # list of nodes in common, and the client could work out
291 # what to strip, instead of us explicitly sending every
291 # what to strip, instead of us explicitly sending every
292 # single node.
292 # single node.
293 deadrevs = known
293 deadrevs = known
294 def genkills():
294 def genkills():
295 for r in deadrevs:
295 for r in deadrevs:
296 yield _KILLNODESIGNAL
296 yield _KILLNODESIGNAL
297 yield repo.changelog.node(r)
297 yield repo.changelog.node(r)
298 yield _DONESIGNAL
298 yield _DONESIGNAL
299 bundler.newpart(_CHANGESPECPART, data=genkills())
299 bundler.newpart(_CHANGESPECPART, data=genkills())
300 newvisit, newfull, newellipsis = _computeellipsis(
300 newvisit, newfull, newellipsis = _computeellipsis(
301 repo, set(), common, known, newmatch)
301 repo, set(), common, known, newmatch)
302 if newvisit:
302 if newvisit:
303 cg = _packellipsischangegroup(
303 cg = _packellipsischangegroup(
304 repo, common, newmatch, newfull, newellipsis,
304 repo, common, newmatch, newfull, newellipsis,
305 newvisit, depth, source, version)
305 newvisit, depth, source, version)
306 part = bundler.newpart('changegroup', data=cg)
306 part = bundler.newpart('changegroup', data=cg)
307 part.addparam('version', version)
307 part.addparam('version', version)
308 if 'treemanifest' in repo.requirements:
308 if 'treemanifest' in repo.requirements:
309 part.addparam('treemanifest', '1')
309 part.addparam('treemanifest', '1')
310
310
311 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
311 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
312 repo, common, heads, set(), newmatch, depth=depth)
312 repo, common, heads, set(), newmatch, depth=depth)
313
313
314 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
314 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
315 if visitnodes:
315 if visitnodes:
316 cg = _packellipsischangegroup(
316 cg = _packellipsischangegroup(
317 repo, common, newmatch, relevant_nodes, ellipsisroots,
317 repo, common, newmatch, relevant_nodes, ellipsisroots,
318 visitnodes, depth, source, version)
318 visitnodes, depth, source, version)
319 part = bundler.newpart('changegroup', data=cg)
319 part = bundler.newpart('changegroup', data=cg)
320 part.addparam('version', version)
320 part.addparam('version', version)
321 if 'treemanifest' in repo.requirements:
321 if 'treemanifest' in repo.requirements:
322 part.addparam('treemanifest', '1')
322 part.addparam('treemanifest', '1')
323
323
324 def applyacl_narrow(repo, kwargs):
324 def applyacl_narrow(repo, kwargs):
325 ui = repo.ui
325 ui = repo.ui
326 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
326 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
327 user_includes = ui.configlist(
327 user_includes = ui.configlist(
328 _NARROWACL_SECTION, username + '.includes',
328 _NARROWACL_SECTION, username + '.includes',
329 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
329 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
330 user_excludes = ui.configlist(
330 user_excludes = ui.configlist(
331 _NARROWACL_SECTION, username + '.excludes',
331 _NARROWACL_SECTION, username + '.excludes',
332 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
332 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
333 if not user_includes:
333 if not user_includes:
334 raise error.Abort(_("{} configuration for user {} is empty")
334 raise error.Abort(_("{} configuration for user {} is empty")
335 .format(_NARROWACL_SECTION, username))
335 .format(_NARROWACL_SECTION, username))
336
336
337 user_includes = [
337 user_includes = [
338 'path:.' if p == '*' else 'path:' + p for p in user_includes]
338 'path:.' if p == '*' else 'path:' + p for p in user_includes]
339 user_excludes = [
339 user_excludes = [
340 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
340 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
341
341
342 req_includes = set(kwargs.get(r'includepats', []))
342 req_includes = set(kwargs.get(r'includepats', []))
343 req_excludes = set(kwargs.get(r'excludepats', []))
343 req_excludes = set(kwargs.get(r'excludepats', []))
344
344
345 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
345 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
346 req_includes, req_excludes, user_includes, user_excludes)
346 req_includes, req_excludes, user_includes, user_excludes)
347
347
348 if invalid_includes:
348 if invalid_includes:
349 raise error.Abort(
349 raise error.Abort(
350 _("The following includes are not accessible for {}: {}")
350 _("The following includes are not accessible for {}: {}")
351 .format(username, invalid_includes))
351 .format(username, invalid_includes))
352
352
353 new_args = {}
353 new_args = {}
354 new_args.update(kwargs)
354 new_args.update(kwargs)
355 new_args['includepats'] = req_includes
355 new_args['includepats'] = req_includes
356 if req_excludes:
356 if req_excludes:
357 new_args['excludepats'] = req_excludes
357 new_args['excludepats'] = req_excludes
358 return new_args
358 return new_args
359
359
360 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
360 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
361 def _handlechangespec_2(op, inpart):
361 def _handlechangespec_2(op, inpart):
362 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
362 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
363 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
363 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
364 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
364 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
365 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
365 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
366 op.repo._writerequirements()
366 op.repo._writerequirements()
367 op.repo.setnarrowpats(includepats, excludepats)
367 op.repo.setnarrowpats(includepats, excludepats)
368
368
369 @bundle2.parthandler(_CHANGESPECPART)
369 @bundle2.parthandler(_CHANGESPECPART)
370 def _handlechangespec(op, inpart):
370 def _handlechangespec(op, inpart):
371 repo = op.repo
371 repo = op.repo
372 cl = repo.changelog
372 cl = repo.changelog
373
373
374 # changesets which need to be stripped entirely. either they're no longer
374 # changesets which need to be stripped entirely. either they're no longer
375 # needed in the new narrow spec, or the server is sending a replacement
375 # needed in the new narrow spec, or the server is sending a replacement
376 # in the changegroup part.
376 # in the changegroup part.
377 clkills = set()
377 clkills = set()
378
378
379 # A changespec part contains all the updates to ellipsis nodes
379 # A changespec part contains all the updates to ellipsis nodes
380 # that will happen as a result of widening or narrowing a
380 # that will happen as a result of widening or narrowing a
381 # repo. All the changes that this block encounters are ellipsis
381 # repo. All the changes that this block encounters are ellipsis
382 # nodes or flags to kill an existing ellipsis.
382 # nodes or flags to kill an existing ellipsis.
383 chunksignal = changegroup.readexactly(inpart, 4)
383 chunksignal = changegroup.readexactly(inpart, 4)
384 while chunksignal != _DONESIGNAL:
384 while chunksignal != _DONESIGNAL:
385 if chunksignal == _KILLNODESIGNAL:
385 if chunksignal == _KILLNODESIGNAL:
386 # a node used to be an ellipsis but isn't anymore
386 # a node used to be an ellipsis but isn't anymore
387 ck = changegroup.readexactly(inpart, 20)
387 ck = changegroup.readexactly(inpart, 20)
388 if cl.hasnode(ck):
388 if cl.hasnode(ck):
389 clkills.add(ck)
389 clkills.add(ck)
390 else:
390 else:
391 raise error.Abort(
391 raise error.Abort(
392 _('unexpected changespec node chunk type: %s') % chunksignal)
392 _('unexpected changespec node chunk type: %s') % chunksignal)
393 chunksignal = changegroup.readexactly(inpart, 4)
393 chunksignal = changegroup.readexactly(inpart, 4)
394
394
395 if clkills:
395 if clkills:
396 # preserve bookmarks that repair.strip() would otherwise strip
396 # preserve bookmarks that repair.strip() would otherwise strip
397 bmstore = repo._bookmarks
397 bmstore = repo._bookmarks
398 class dummybmstore(dict):
398 class dummybmstore(dict):
399 def applychanges(self, repo, tr, changes):
399 def applychanges(self, repo, tr, changes):
400 pass
400 pass
401 def recordchange(self, tr): # legacy version
401 def recordchange(self, tr): # legacy version
402 pass
402 pass
403 repo._bookmarks = dummybmstore()
403 repo._bookmarks = dummybmstore()
404 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
404 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
405 topic='widen')
405 topic='widen')
406 repo._bookmarks = bmstore
406 repo._bookmarks = bmstore
407 if chgrpfile:
407 if chgrpfile:
408 # presence of _widen_bundle attribute activates widen handler later
408 # presence of _widen_bundle attribute activates widen handler later
409 op._widen_bundle = chgrpfile
409 op._widen_bundle = chgrpfile
410 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
410 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
411 # will currently always be there when using the core+narrowhg server, but
411 # will currently always be there when using the core+narrowhg server, but
412 # other servers may include a changespec part even when not widening (e.g.
412 # other servers may include a changespec part even when not widening (e.g.
413 # because we're deepening a shallow repo).
413 # because we're deepening a shallow repo).
414 if util.safehasattr(repo, 'setnewnarrowpats'):
414 if util.safehasattr(repo, 'setnewnarrowpats'):
415 repo.setnewnarrowpats()
415 repo.setnewnarrowpats()
416
416
417 def handlechangegroup_widen(op, inpart):
417 def handlechangegroup_widen(op, inpart):
418 """Changegroup exchange handler which restores temporarily-stripped nodes"""
418 """Changegroup exchange handler which restores temporarily-stripped nodes"""
419 # We saved a bundle with stripped node data we must now restore.
419 # We saved a bundle with stripped node data we must now restore.
420 # This approach is based on mercurial/repair.py@6ee26a53c111.
420 # This approach is based on mercurial/repair.py@6ee26a53c111.
421 repo = op.repo
421 repo = op.repo
422 ui = op.ui
422 ui = op.ui
423
423
424 chgrpfile = op._widen_bundle
424 chgrpfile = op._widen_bundle
425 del op._widen_bundle
425 del op._widen_bundle
426 vfs = repo.vfs
426 vfs = repo.vfs
427
427
428 ui.note(_("adding branch\n"))
428 ui.note(_("adding branch\n"))
429 f = vfs.open(chgrpfile, "rb")
429 f = vfs.open(chgrpfile, "rb")
430 try:
430 try:
431 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
431 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
432 if not ui.verbose:
432 if not ui.verbose:
433 # silence internal shuffling chatter
433 # silence internal shuffling chatter
434 ui.pushbuffer()
434 ui.pushbuffer()
435 if isinstance(gen, bundle2.unbundle20):
435 if isinstance(gen, bundle2.unbundle20):
436 with repo.transaction('strip') as tr:
436 with repo.transaction('strip') as tr:
437 bundle2.processbundle(repo, gen, lambda: tr)
437 bundle2.processbundle(repo, gen, lambda: tr)
438 else:
438 else:
439 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
439 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
440 if not ui.verbose:
440 if not ui.verbose:
441 ui.popbuffer()
441 ui.popbuffer()
442 finally:
442 finally:
443 f.close()
443 f.close()
444
444
445 # remove undo files
445 # remove undo files
446 for undovfs, undofile in repo.undofiles():
446 for undovfs, undofile in repo.undofiles():
447 try:
447 try:
448 undovfs.unlink(undofile)
448 undovfs.unlink(undofile)
449 except OSError as e:
449 except OSError as e:
450 if e.errno != errno.ENOENT:
450 if e.errno != errno.ENOENT:
451 ui.warn(_('error removing %s: %s\n') %
451 ui.warn(_('error removing %s: %s\n') %
452 (undovfs.join(undofile), util.forcebytestr(e)))
452 (undovfs.join(undofile), util.forcebytestr(e)))
453
453
454 # Remove partial backup only if there were no exceptions
454 # Remove partial backup only if there were no exceptions
455 vfs.unlink(chgrpfile)
455 vfs.unlink(chgrpfile)
456
456
457 def setup():
457 def setup():
458 """Enable narrow repo support in bundle2-related extension points."""
458 """Enable narrow repo support in bundle2-related extension points."""
459 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
459 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
460
460
461 wireproto.gboptsmap['narrow'] = 'boolean'
461 wireproto.gboptsmap['narrow'] = 'boolean'
462 wireproto.gboptsmap['depth'] = 'plain'
462 wireproto.gboptsmap['depth'] = 'plain'
463 wireproto.gboptsmap['oldincludepats'] = 'csv'
463 wireproto.gboptsmap['oldincludepats'] = 'csv'
464 wireproto.gboptsmap['oldexcludepats'] = 'csv'
464 wireproto.gboptsmap['oldexcludepats'] = 'csv'
465 wireproto.gboptsmap['includepats'] = 'csv'
465 wireproto.gboptsmap['includepats'] = 'csv'
466 wireproto.gboptsmap['excludepats'] = 'csv'
466 wireproto.gboptsmap['excludepats'] = 'csv'
467 wireproto.gboptsmap['known'] = 'csv'
467 wireproto.gboptsmap['known'] = 'csv'
468
468
469 # Extend changegroup serving to handle requests from narrow clients.
469 # Extend changegroup serving to handle requests from narrow clients.
470 origcgfn = exchange.getbundle2partsmapping['changegroup']
470 origcgfn = exchange.getbundle2partsmapping['changegroup']
471 def wrappedcgfn(*args, **kwargs):
471 def wrappedcgfn(*args, **kwargs):
472 repo = args[1]
472 repo = args[1]
473 if repo.ui.has_section(_NARROWACL_SECTION):
473 if repo.ui.has_section(_NARROWACL_SECTION):
474 getbundlechangegrouppart_narrow(
474 getbundlechangegrouppart_narrow(
475 *args, **applyacl_narrow(repo, kwargs))
475 *args, **applyacl_narrow(repo, kwargs))
476 elif kwargs.get(r'narrow', False):
476 elif kwargs.get(r'narrow', False):
477 getbundlechangegrouppart_narrow(*args, **kwargs)
477 getbundlechangegrouppart_narrow(*args, **kwargs)
478 else:
478 else:
479 origcgfn(*args, **kwargs)
479 origcgfn(*args, **kwargs)
480 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
480 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
481
481
482 # disable rev branch cache exchange when serving a narrow bundle
483 # (currently incompatible with that part)
484 origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
485 def wrappedcgfn(*args, **kwargs):
486 repo = args[1]
487 if repo.ui.has_section(_NARROWACL_SECTION):
488 return
489 elif kwargs.get(r'narrow', False):
490 return
491 else:
492 origrbcfn(*args, **kwargs)
493 exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
494
482 # Extend changegroup receiver so client can fixup after widen requests.
495 # Extend changegroup receiver so client can fixup after widen requests.
483 origcghandler = bundle2.parthandlermapping['changegroup']
496 origcghandler = bundle2.parthandlermapping['changegroup']
484 def wrappedcghandler(op, inpart):
497 def wrappedcghandler(op, inpart):
485 origcghandler(op, inpart)
498 origcghandler(op, inpart)
486 if util.safehasattr(op, '_widen_bundle'):
499 if util.safehasattr(op, '_widen_bundle'):
487 handlechangegroup_widen(op, inpart)
500 handlechangegroup_widen(op, inpart)
488 wrappedcghandler.params = origcghandler.params
501 wrappedcghandler.params = origcghandler.params
489 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
502 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
General Comments 0
You need to be logged in to leave comments. Login now