##// END OF EJS Templates
narrow: fix for getting the username when running http server...
idlsoft -
r36180:4224f26c default
parent child Browse files
Show More
@@ -1,494 +1,495 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 narrowspec,
27 narrowspec,
28 repair,
28 repair,
29 util,
29 util,
30 wireproto,
30 wireproto,
31 )
31 )
32
32
33 from . import (
33 from . import (
34 narrowrepo,
34 narrowrepo,
35 )
35 )
36
36
37 NARROWCAP = 'narrow'
37 NARROWCAP = 'narrow'
38 _NARROWACL_SECTION = 'narrowhgacl'
38 _NARROWACL_SECTION = 'narrowhgacl'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
40 _SPECPART = NARROWCAP + ':spec'
40 _SPECPART = NARROWCAP + ':spec'
41 _SPECPART_INCLUDE = 'include'
41 _SPECPART_INCLUDE = 'include'
42 _SPECPART_EXCLUDE = 'exclude'
42 _SPECPART_EXCLUDE = 'exclude'
43 _KILLNODESIGNAL = 'KILL'
43 _KILLNODESIGNAL = 'KILL'
44 _DONESIGNAL = 'DONE'
44 _DONESIGNAL = 'DONE'
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
49
49
50 # When advertising capabilities, always include narrow clone support.
50 # When advertising capabilities, always include narrow clone support.
51 def getrepocaps_narrow(orig, repo, **kwargs):
51 def getrepocaps_narrow(orig, repo, **kwargs):
52 caps = orig(repo, **kwargs)
52 caps = orig(repo, **kwargs)
53 caps[NARROWCAP] = ['v0']
53 caps[NARROWCAP] = ['v0']
54 return caps
54 return caps
55
55
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
57 """Compute the shape of a narrowed DAG.
57 """Compute the shape of a narrowed DAG.
58
58
59 Args:
59 Args:
60 repo: The repository we're transferring.
60 repo: The repository we're transferring.
61 common: The roots of the DAG range we're transferring.
61 common: The roots of the DAG range we're transferring.
62 May be just [nullid], which means all ancestors of heads.
62 May be just [nullid], which means all ancestors of heads.
63 heads: The heads of the DAG range we're transferring.
63 heads: The heads of the DAG range we're transferring.
64 match: The narrowmatcher that allows us to identify relevant changes.
64 match: The narrowmatcher that allows us to identify relevant changes.
65 depth: If not None, only consider nodes to be full nodes if they are at
65 depth: If not None, only consider nodes to be full nodes if they are at
66 most depth changesets away from one of heads.
66 most depth changesets away from one of heads.
67
67
68 Returns:
68 Returns:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
70
70
71 visitnodes: The list of nodes (either full or ellipsis) which
71 visitnodes: The list of nodes (either full or ellipsis) which
72 need to be sent to the client.
72 need to be sent to the client.
73 relevant_nodes: The set of changelog nodes which change a file inside
73 relevant_nodes: The set of changelog nodes which change a file inside
74 the narrowspec. The client needs these as non-ellipsis nodes.
74 the narrowspec. The client needs these as non-ellipsis nodes.
75 ellipsisroots: A dict of {rev: parents} that is used in
75 ellipsisroots: A dict of {rev: parents} that is used in
76 narrowchangegroup to produce ellipsis nodes with the
76 narrowchangegroup to produce ellipsis nodes with the
77 correct parents.
77 correct parents.
78 """
78 """
79 cl = repo.changelog
79 cl = repo.changelog
80 mfl = repo.manifestlog
80 mfl = repo.manifestlog
81
81
82 cldag = dagutil.revlogdag(cl)
82 cldag = dagutil.revlogdag(cl)
83 # dagutil does not like nullid/nullrev
83 # dagutil does not like nullid/nullrev
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
85 headsrevs = cldag.internalizeall(heads)
85 headsrevs = cldag.internalizeall(heads)
86 if depth:
86 if depth:
87 revdepth = {h: 0 for h in headsrevs}
87 revdepth = {h: 0 for h in headsrevs}
88
88
89 ellipsisheads = collections.defaultdict(set)
89 ellipsisheads = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
91
91
92 def addroot(head, curchange):
92 def addroot(head, curchange):
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
94 ellipsisroots[head].add(curchange)
94 ellipsisroots[head].add(curchange)
95 # Recursively split ellipsis heads with 3 roots by finding the
95 # Recursively split ellipsis heads with 3 roots by finding the
96 # roots' youngest common descendant which is an elided merge commit.
96 # roots' youngest common descendant which is an elided merge commit.
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
98 # root of the head.
98 # root of the head.
99 while len(ellipsisroots[head]) > 2:
99 while len(ellipsisroots[head]) > 2:
100 child, roots = splithead(head)
100 child, roots = splithead(head)
101 splitroots(head, child, roots)
101 splitroots(head, child, roots)
102 head = child # Recurse in case we just added a 3rd root
102 head = child # Recurse in case we just added a 3rd root
103
103
104 def splitroots(head, child, roots):
104 def splitroots(head, child, roots):
105 ellipsisroots[head].difference_update(roots)
105 ellipsisroots[head].difference_update(roots)
106 ellipsisroots[head].add(child)
106 ellipsisroots[head].add(child)
107 ellipsisroots[child].update(roots)
107 ellipsisroots[child].update(roots)
108 ellipsisroots[child].discard(child)
108 ellipsisroots[child].discard(child)
109
109
110 def splithead(head):
110 def splithead(head):
111 r1, r2, r3 = sorted(ellipsisroots[head])
111 r1, r2, r3 = sorted(ellipsisroots[head])
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
114 nr1, head, nr2, head)
114 nr1, head, nr2, head)
115 for j in mid:
115 for j in mid:
116 if j == nr2:
116 if j == nr2:
117 return nr2, (nr1, nr2)
117 return nr2, (nr1, nr2)
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
119 return j, (nr1, nr2)
119 return j, (nr1, nr2)
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
121 'roots: %d %d %d' % (head, r1, r2, r3))
121 'roots: %d %d %d' % (head, r1, r2, r3))
122
122
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
124 visit = reversed(missing)
124 visit = reversed(missing)
125 relevant_nodes = set()
125 relevant_nodes = set()
126 visitnodes = map(cl.node, missing)
126 visitnodes = map(cl.node, missing)
127 required = set(headsrevs) | known
127 required = set(headsrevs) | known
128 for rev in visit:
128 for rev in visit:
129 clrev = cl.changelogrevision(rev)
129 clrev = cl.changelogrevision(rev)
130 ps = cldag.parents(rev)
130 ps = cldag.parents(rev)
131 if depth is not None:
131 if depth is not None:
132 curdepth = revdepth[rev]
132 curdepth = revdepth[rev]
133 for p in ps:
133 for p in ps:
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
135 needed = False
135 needed = False
136 shallow_enough = depth is None or revdepth[rev] <= depth
136 shallow_enough = depth is None or revdepth[rev] <= depth
137 if shallow_enough:
137 if shallow_enough:
138 curmf = mfl[clrev.manifest].read()
138 curmf = mfl[clrev.manifest].read()
139 if ps:
139 if ps:
140 # We choose to not trust the changed files list in
140 # We choose to not trust the changed files list in
141 # changesets because it's not always correct. TODO: could
141 # changesets because it's not always correct. TODO: could
142 # we trust it for the non-merge case?
142 # we trust it for the non-merge case?
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
144 needed = any(match(f) for f in curmf.diff(p1mf).iterkeys())
144 needed = any(match(f) for f in curmf.diff(p1mf).iterkeys())
145 if not needed and len(ps) > 1:
145 if not needed and len(ps) > 1:
146 # For merge changes, the list of changed files is not
146 # For merge changes, the list of changed files is not
147 # helpful, since we need to emit the merge if a file
147 # helpful, since we need to emit the merge if a file
148 # in the narrow spec has changed on either side of the
148 # in the narrow spec has changed on either side of the
149 # merge. As a result, we do a manifest diff to check.
149 # merge. As a result, we do a manifest diff to check.
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
151 needed = any(match(f) for f in curmf.diff(p2mf).iterkeys())
151 needed = any(match(f) for f in curmf.diff(p2mf).iterkeys())
152 else:
152 else:
153 # For a root node, we need to include the node if any
153 # For a root node, we need to include the node if any
154 # files in the node match the narrowspec.
154 # files in the node match the narrowspec.
155 needed = any(match(f) for f in curmf)
155 needed = any(match(f) for f in curmf)
156
156
157 if needed:
157 if needed:
158 for head in ellipsisheads[rev]:
158 for head in ellipsisheads[rev]:
159 addroot(head, rev)
159 addroot(head, rev)
160 for p in ps:
160 for p in ps:
161 required.add(p)
161 required.add(p)
162 relevant_nodes.add(cl.node(rev))
162 relevant_nodes.add(cl.node(rev))
163 else:
163 else:
164 if not ps:
164 if not ps:
165 ps = [nullrev]
165 ps = [nullrev]
166 if rev in required:
166 if rev in required:
167 for head in ellipsisheads[rev]:
167 for head in ellipsisheads[rev]:
168 addroot(head, rev)
168 addroot(head, rev)
169 for p in ps:
169 for p in ps:
170 ellipsisheads[p].add(rev)
170 ellipsisheads[p].add(rev)
171 else:
171 else:
172 for p in ps:
172 for p in ps:
173 ellipsisheads[p] |= ellipsisheads[rev]
173 ellipsisheads[p] |= ellipsisheads[rev]
174
174
175 # add common changesets as roots of their reachable ellipsis heads
175 # add common changesets as roots of their reachable ellipsis heads
176 for c in commonrevs:
176 for c in commonrevs:
177 for head in ellipsisheads[c]:
177 for head in ellipsisheads[c]:
178 addroot(head, c)
178 addroot(head, c)
179 return visitnodes, relevant_nodes, ellipsisroots
179 return visitnodes, relevant_nodes, ellipsisroots
180
180
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
182 ellipsisroots, visitnodes, depth, source, version):
182 ellipsisroots, visitnodes, depth, source, version):
183 if version in ('01', '02'):
183 if version in ('01', '02'):
184 raise error.Abort(
184 raise error.Abort(
185 'ellipsis nodes require at least cg3 on client and server, '
185 'ellipsis nodes require at least cg3 on client and server, '
186 'but negotiated version %s' % version)
186 'but negotiated version %s' % version)
187 # We wrap cg1packer.revchunk, using a side channel to pass
187 # We wrap cg1packer.revchunk, using a side channel to pass
188 # relevant_nodes into that area. Then if linknode isn't in the
188 # relevant_nodes into that area. Then if linknode isn't in the
189 # set, we know we have an ellipsis node and we should defer
189 # set, we know we have an ellipsis node and we should defer
190 # sending that node's data. We override close() to detect
190 # sending that node's data. We override close() to detect
191 # pending ellipsis nodes and flush them.
191 # pending ellipsis nodes and flush them.
192 packer = changegroup.getbundler(version, repo)
192 packer = changegroup.getbundler(version, repo)
193 # Let the packer have access to the narrow matcher so it can
193 # Let the packer have access to the narrow matcher so it can
194 # omit filelogs and dirlogs as needed
194 # omit filelogs and dirlogs as needed
195 packer._narrow_matcher = lambda : match
195 packer._narrow_matcher = lambda : match
196 # Give the packer the list of nodes which should not be
196 # Give the packer the list of nodes which should not be
197 # ellipsis nodes. We store this rather than the set of nodes
197 # ellipsis nodes. We store this rather than the set of nodes
198 # that should be an ellipsis because for very large histories
198 # that should be an ellipsis because for very large histories
199 # we expect this to be significantly smaller.
199 # we expect this to be significantly smaller.
200 packer.full_nodes = relevant_nodes
200 packer.full_nodes = relevant_nodes
201 # Maps ellipsis revs to their roots at the changelog level.
201 # Maps ellipsis revs to their roots at the changelog level.
202 packer.precomputed_ellipsis = ellipsisroots
202 packer.precomputed_ellipsis = ellipsisroots
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
204 # the end of each group.
204 # the end of each group.
205 packer.clrev_to_localrev = {}
205 packer.clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
207 # Maps changelog nodes to changelog revs. Filled in once
207 # Maps changelog nodes to changelog revs. Filled in once
208 # during changelog stage and then left unmodified.
208 # during changelog stage and then left unmodified.
209 packer.clnode_to_rev = {}
209 packer.clnode_to_rev = {}
210 packer.changelog_done = False
210 packer.changelog_done = False
211 # If true, informs the packer that it is serving shallow content and might
211 # If true, informs the packer that it is serving shallow content and might
212 # need to pack file contents not introduced by the changes being packed.
212 # need to pack file contents not introduced by the changes being packed.
213 packer.is_shallow = depth is not None
213 packer.is_shallow = depth is not None
214
214
215 return packer.generate(common, visitnodes, False, source)
215 return packer.generate(common, visitnodes, False, source)
216
216
217 # Serve a changegroup for a client with a narrow clone.
217 # Serve a changegroup for a client with a narrow clone.
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
219 bundlecaps=None, b2caps=None, heads=None,
219 bundlecaps=None, b2caps=None, heads=None,
220 common=None, **kwargs):
220 common=None, **kwargs):
221 cgversions = b2caps.get('changegroup')
221 cgversions = b2caps.get('changegroup')
222 getcgkwargs = {}
222 getcgkwargs = {}
223 if cgversions: # 3.1 and 3.2 ship with an empty value
223 if cgversions: # 3.1 and 3.2 ship with an empty value
224 cgversions = [v for v in cgversions
224 cgversions = [v for v in cgversions
225 if v in changegroup.supportedoutgoingversions(repo)]
225 if v in changegroup.supportedoutgoingversions(repo)]
226 if not cgversions:
226 if not cgversions:
227 raise ValueError(_('no common changegroup version'))
227 raise ValueError(_('no common changegroup version'))
228 version = getcgkwargs['version'] = max(cgversions)
228 version = getcgkwargs['version'] = max(cgversions)
229 else:
229 else:
230 raise ValueError(_("server does not advertise changegroup version,"
230 raise ValueError(_("server does not advertise changegroup version,"
231 " can't negotiate support for ellipsis nodes"))
231 " can't negotiate support for ellipsis nodes"))
232
232
233 include = sorted(filter(bool, kwargs.get('includepats', [])))
233 include = sorted(filter(bool, kwargs.get('includepats', [])))
234 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
234 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
235 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
235 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
236 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
236 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
237 outgoing = exchange._computeoutgoing(repo, heads, common)
237 outgoing = exchange._computeoutgoing(repo, heads, common)
238 if not outgoing.missing:
238 if not outgoing.missing:
239 return
239 return
240 def wrappedgetbundler(orig, *args, **kwargs):
240 def wrappedgetbundler(orig, *args, **kwargs):
241 bundler = orig(*args, **kwargs)
241 bundler = orig(*args, **kwargs)
242 bundler._narrow_matcher = lambda : newmatch
242 bundler._narrow_matcher = lambda : newmatch
243 return bundler
243 return bundler
244 with extensions.wrappedfunction(changegroup, 'getbundler',
244 with extensions.wrappedfunction(changegroup, 'getbundler',
245 wrappedgetbundler):
245 wrappedgetbundler):
246 cg = changegroup.makestream(repo, outgoing, version, source)
246 cg = changegroup.makestream(repo, outgoing, version, source)
247 part = bundler.newpart('changegroup', data=cg)
247 part = bundler.newpart('changegroup', data=cg)
248 part.addparam('version', version)
248 part.addparam('version', version)
249 if 'treemanifest' in repo.requirements:
249 if 'treemanifest' in repo.requirements:
250 part.addparam('treemanifest', '1')
250 part.addparam('treemanifest', '1')
251
251
252 if include or exclude:
252 if include or exclude:
253 narrowspecpart = bundler.newpart(_SPECPART)
253 narrowspecpart = bundler.newpart(_SPECPART)
254 if include:
254 if include:
255 narrowspecpart.addparam(
255 narrowspecpart.addparam(
256 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
256 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
257 if exclude:
257 if exclude:
258 narrowspecpart.addparam(
258 narrowspecpart.addparam(
259 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
259 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
260
260
261 return
261 return
262
262
263 depth = kwargs.get('depth', None)
263 depth = kwargs.get('depth', None)
264 if depth is not None:
264 if depth is not None:
265 depth = int(depth)
265 depth = int(depth)
266 if depth < 1:
266 if depth < 1:
267 raise error.Abort(_('depth must be positive, got %d') % depth)
267 raise error.Abort(_('depth must be positive, got %d') % depth)
268
268
269 heads = set(heads or repo.heads())
269 heads = set(heads or repo.heads())
270 common = set(common or [nullid])
270 common = set(common or [nullid])
271 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
271 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
272 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
272 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
273 known = {bin(n) for n in kwargs.get('known', [])}
273 known = {bin(n) for n in kwargs.get('known', [])}
274 if known and (oldinclude != include or oldexclude != exclude):
274 if known and (oldinclude != include or oldexclude != exclude):
275 # Steps:
275 # Steps:
276 # 1. Send kill for "$known & ::common"
276 # 1. Send kill for "$known & ::common"
277 #
277 #
278 # 2. Send changegroup for ::common
278 # 2. Send changegroup for ::common
279 #
279 #
280 # 3. Proceed.
280 # 3. Proceed.
281 #
281 #
282 # In the future, we can send kills for only the specific
282 # In the future, we can send kills for only the specific
283 # nodes we know should go away or change shape, and then
283 # nodes we know should go away or change shape, and then
284 # send a data stream that tells the client something like this:
284 # send a data stream that tells the client something like this:
285 #
285 #
286 # a) apply this changegroup
286 # a) apply this changegroup
287 # b) apply nodes XXX, YYY, ZZZ that you already have
287 # b) apply nodes XXX, YYY, ZZZ that you already have
288 # c) goto a
288 # c) goto a
289 #
289 #
290 # until they've built up the full new state.
290 # until they've built up the full new state.
291 # Convert to revnums and intersect with "common". The client should
291 # Convert to revnums and intersect with "common". The client should
292 # have made it a subset of "common" already, but let's be safe.
292 # have made it a subset of "common" already, but let's be safe.
293 known = set(repo.revs("%ln & ::%ln", known, common))
293 known = set(repo.revs("%ln & ::%ln", known, common))
294 # TODO: we could send only roots() of this set, and the
294 # TODO: we could send only roots() of this set, and the
295 # list of nodes in common, and the client could work out
295 # list of nodes in common, and the client could work out
296 # what to strip, instead of us explicitly sending every
296 # what to strip, instead of us explicitly sending every
297 # single node.
297 # single node.
298 deadrevs = known
298 deadrevs = known
299 def genkills():
299 def genkills():
300 for r in deadrevs:
300 for r in deadrevs:
301 yield _KILLNODESIGNAL
301 yield _KILLNODESIGNAL
302 yield repo.changelog.node(r)
302 yield repo.changelog.node(r)
303 yield _DONESIGNAL
303 yield _DONESIGNAL
304 bundler.newpart(_CHANGESPECPART, data=genkills())
304 bundler.newpart(_CHANGESPECPART, data=genkills())
305 newvisit, newfull, newellipsis = _computeellipsis(
305 newvisit, newfull, newellipsis = _computeellipsis(
306 repo, set(), common, known, newmatch)
306 repo, set(), common, known, newmatch)
307 if newvisit:
307 if newvisit:
308 cg = _packellipsischangegroup(
308 cg = _packellipsischangegroup(
309 repo, common, newmatch, newfull, newellipsis,
309 repo, common, newmatch, newfull, newellipsis,
310 newvisit, depth, source, version)
310 newvisit, depth, source, version)
311 part = bundler.newpart('changegroup', data=cg)
311 part = bundler.newpart('changegroup', data=cg)
312 part.addparam('version', version)
312 part.addparam('version', version)
313 if 'treemanifest' in repo.requirements:
313 if 'treemanifest' in repo.requirements:
314 part.addparam('treemanifest', '1')
314 part.addparam('treemanifest', '1')
315
315
316 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
316 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
317 repo, common, heads, set(), newmatch, depth=depth)
317 repo, common, heads, set(), newmatch, depth=depth)
318
318
319 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
319 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
320 if visitnodes:
320 if visitnodes:
321 cg = _packellipsischangegroup(
321 cg = _packellipsischangegroup(
322 repo, common, newmatch, relevant_nodes, ellipsisroots,
322 repo, common, newmatch, relevant_nodes, ellipsisroots,
323 visitnodes, depth, source, version)
323 visitnodes, depth, source, version)
324 part = bundler.newpart('changegroup', data=cg)
324 part = bundler.newpart('changegroup', data=cg)
325 part.addparam('version', version)
325 part.addparam('version', version)
326 if 'treemanifest' in repo.requirements:
326 if 'treemanifest' in repo.requirements:
327 part.addparam('treemanifest', '1')
327 part.addparam('treemanifest', '1')
328
328
329 def applyacl_narrow(repo, kwargs):
329 def applyacl_narrow(repo, kwargs):
330 username = repo.ui.shortuser(repo.ui.username())
330 ui = repo.ui
331 user_includes = repo.ui.configlist(
331 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
332 user_includes = ui.configlist(
332 _NARROWACL_SECTION, username + '.includes',
333 _NARROWACL_SECTION, username + '.includes',
333 repo.ui.configlist(_NARROWACL_SECTION, 'default.includes'))
334 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
334 user_excludes = repo.ui.configlist(
335 user_excludes = ui.configlist(
335 _NARROWACL_SECTION, username + '.excludes',
336 _NARROWACL_SECTION, username + '.excludes',
336 repo.ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
337 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
337 if not user_includes:
338 if not user_includes:
338 raise error.Abort(_("{} configuration for user {} is empty")
339 raise error.Abort(_("{} configuration for user {} is empty")
339 .format(_NARROWACL_SECTION, username))
340 .format(_NARROWACL_SECTION, username))
340
341
341 user_includes = [
342 user_includes = [
342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
343 'path:.' if p == '*' else 'path:' + p for p in user_includes]
343 user_excludes = [
344 user_excludes = [
344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
345 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
345
346
346 req_includes = set(kwargs.get('includepats', []))
347 req_includes = set(kwargs.get('includepats', []))
347 req_excludes = set(kwargs.get('excludepats', []))
348 req_excludes = set(kwargs.get('excludepats', []))
348
349
349 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
350 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
350 req_includes, req_excludes, user_includes, user_excludes)
351 req_includes, req_excludes, user_includes, user_excludes)
351
352
352 if invalid_includes:
353 if invalid_includes:
353 raise error.Abort(
354 raise error.Abort(
354 _("The following includes are not accessible for {}: {}")
355 _("The following includes are not accessible for {}: {}")
355 .format(username, invalid_includes))
356 .format(username, invalid_includes))
356
357
357 new_args = {}
358 new_args = {}
358 new_args.update(kwargs)
359 new_args.update(kwargs)
359 new_args['includepats'] = req_includes
360 new_args['includepats'] = req_includes
360 if req_excludes:
361 if req_excludes:
361 new_args['excludepats'] = req_excludes
362 new_args['excludepats'] = req_excludes
362 return new_args
363 return new_args
363
364
364 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
365 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
365 def _handlechangespec_2(op, inpart):
366 def _handlechangespec_2(op, inpart):
366 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
367 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
367 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
368 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
368 narrowspec.save(op.repo, includepats, excludepats)
369 narrowspec.save(op.repo, includepats, excludepats)
369 if not narrowrepo.REQUIREMENT in op.repo.requirements:
370 if not narrowrepo.REQUIREMENT in op.repo.requirements:
370 op.repo.requirements.add(narrowrepo.REQUIREMENT)
371 op.repo.requirements.add(narrowrepo.REQUIREMENT)
371 op.repo._writerequirements()
372 op.repo._writerequirements()
372 op.repo.invalidate(clearfilecache=True)
373 op.repo.invalidate(clearfilecache=True)
373
374
374 @bundle2.parthandler(_CHANGESPECPART)
375 @bundle2.parthandler(_CHANGESPECPART)
375 def _handlechangespec(op, inpart):
376 def _handlechangespec(op, inpart):
376 repo = op.repo
377 repo = op.repo
377 cl = repo.changelog
378 cl = repo.changelog
378
379
379 # changesets which need to be stripped entirely. either they're no longer
380 # changesets which need to be stripped entirely. either they're no longer
380 # needed in the new narrow spec, or the server is sending a replacement
381 # needed in the new narrow spec, or the server is sending a replacement
381 # in the changegroup part.
382 # in the changegroup part.
382 clkills = set()
383 clkills = set()
383
384
384 # A changespec part contains all the updates to ellipsis nodes
385 # A changespec part contains all the updates to ellipsis nodes
385 # that will happen as a result of widening or narrowing a
386 # that will happen as a result of widening or narrowing a
386 # repo. All the changes that this block encounters are ellipsis
387 # repo. All the changes that this block encounters are ellipsis
387 # nodes or flags to kill an existing ellipsis.
388 # nodes or flags to kill an existing ellipsis.
388 chunksignal = changegroup.readexactly(inpart, 4)
389 chunksignal = changegroup.readexactly(inpart, 4)
389 while chunksignal != _DONESIGNAL:
390 while chunksignal != _DONESIGNAL:
390 if chunksignal == _KILLNODESIGNAL:
391 if chunksignal == _KILLNODESIGNAL:
391 # a node used to be an ellipsis but isn't anymore
392 # a node used to be an ellipsis but isn't anymore
392 ck = changegroup.readexactly(inpart, 20)
393 ck = changegroup.readexactly(inpart, 20)
393 if cl.hasnode(ck):
394 if cl.hasnode(ck):
394 clkills.add(ck)
395 clkills.add(ck)
395 else:
396 else:
396 raise error.Abort(
397 raise error.Abort(
397 _('unexpected changespec node chunk type: %s') % chunksignal)
398 _('unexpected changespec node chunk type: %s') % chunksignal)
398 chunksignal = changegroup.readexactly(inpart, 4)
399 chunksignal = changegroup.readexactly(inpart, 4)
399
400
400 if clkills:
401 if clkills:
401 # preserve bookmarks that repair.strip() would otherwise strip
402 # preserve bookmarks that repair.strip() would otherwise strip
402 bmstore = repo._bookmarks
403 bmstore = repo._bookmarks
403 class dummybmstore(dict):
404 class dummybmstore(dict):
404 def applychanges(self, repo, tr, changes):
405 def applychanges(self, repo, tr, changes):
405 pass
406 pass
406 def recordchange(self, tr): # legacy version
407 def recordchange(self, tr): # legacy version
407 pass
408 pass
408 repo._bookmarks = dummybmstore()
409 repo._bookmarks = dummybmstore()
409 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
410 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
410 topic='widen')
411 topic='widen')
411 repo._bookmarks = bmstore
412 repo._bookmarks = bmstore
412 if chgrpfile:
413 if chgrpfile:
413 # presence of _widen_bundle attribute activates widen handler later
414 # presence of _widen_bundle attribute activates widen handler later
414 op._widen_bundle = chgrpfile
415 op._widen_bundle = chgrpfile
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 # will currently always be there when using the core+narrowhg server, but
417 # will currently always be there when using the core+narrowhg server, but
417 # other servers may include a changespec part even when not widening (e.g.
418 # other servers may include a changespec part even when not widening (e.g.
418 # because we're deepening a shallow repo).
419 # because we're deepening a shallow repo).
419 if util.safehasattr(repo, 'setnewnarrowpats'):
420 if util.safehasattr(repo, 'setnewnarrowpats'):
420 repo.setnewnarrowpats()
421 repo.setnewnarrowpats()
421
422
422 def handlechangegroup_widen(op, inpart):
423 def handlechangegroup_widen(op, inpart):
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 # We saved a bundle with stripped node data we must now restore.
425 # We saved a bundle with stripped node data we must now restore.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 repo = op.repo
427 repo = op.repo
427 ui = op.ui
428 ui = op.ui
428
429
429 chgrpfile = op._widen_bundle
430 chgrpfile = op._widen_bundle
430 del op._widen_bundle
431 del op._widen_bundle
431 vfs = repo.vfs
432 vfs = repo.vfs
432
433
433 ui.note(_("adding branch\n"))
434 ui.note(_("adding branch\n"))
434 f = vfs.open(chgrpfile, "rb")
435 f = vfs.open(chgrpfile, "rb")
435 try:
436 try:
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 if not ui.verbose:
438 if not ui.verbose:
438 # silence internal shuffling chatter
439 # silence internal shuffling chatter
439 ui.pushbuffer()
440 ui.pushbuffer()
440 if isinstance(gen, bundle2.unbundle20):
441 if isinstance(gen, bundle2.unbundle20):
441 with repo.transaction('strip') as tr:
442 with repo.transaction('strip') as tr:
442 bundle2.processbundle(repo, gen, lambda: tr)
443 bundle2.processbundle(repo, gen, lambda: tr)
443 else:
444 else:
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 if not ui.verbose:
446 if not ui.verbose:
446 ui.popbuffer()
447 ui.popbuffer()
447 finally:
448 finally:
448 f.close()
449 f.close()
449
450
450 # remove undo files
451 # remove undo files
451 for undovfs, undofile in repo.undofiles():
452 for undovfs, undofile in repo.undofiles():
452 try:
453 try:
453 undovfs.unlink(undofile)
454 undovfs.unlink(undofile)
454 except OSError as e:
455 except OSError as e:
455 if e.errno != errno.ENOENT:
456 if e.errno != errno.ENOENT:
456 ui.warn(_('error removing %s: %s\n') %
457 ui.warn(_('error removing %s: %s\n') %
457 (undovfs.join(undofile), str(e)))
458 (undovfs.join(undofile), str(e)))
458
459
459 # Remove partial backup only if there were no exceptions
460 # Remove partial backup only if there were no exceptions
460 vfs.unlink(chgrpfile)
461 vfs.unlink(chgrpfile)
461
462
462 def setup():
463 def setup():
463 """Enable narrow repo support in bundle2-related extension points."""
464 """Enable narrow repo support in bundle2-related extension points."""
464 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
465 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
465
466
466 wireproto.gboptsmap['narrow'] = 'boolean'
467 wireproto.gboptsmap['narrow'] = 'boolean'
467 wireproto.gboptsmap['depth'] = 'plain'
468 wireproto.gboptsmap['depth'] = 'plain'
468 wireproto.gboptsmap['oldincludepats'] = 'csv'
469 wireproto.gboptsmap['oldincludepats'] = 'csv'
469 wireproto.gboptsmap['oldexcludepats'] = 'csv'
470 wireproto.gboptsmap['oldexcludepats'] = 'csv'
470 wireproto.gboptsmap['includepats'] = 'csv'
471 wireproto.gboptsmap['includepats'] = 'csv'
471 wireproto.gboptsmap['excludepats'] = 'csv'
472 wireproto.gboptsmap['excludepats'] = 'csv'
472 wireproto.gboptsmap['known'] = 'csv'
473 wireproto.gboptsmap['known'] = 'csv'
473
474
474 # Extend changegroup serving to handle requests from narrow clients.
475 # Extend changegroup serving to handle requests from narrow clients.
475 origcgfn = exchange.getbundle2partsmapping['changegroup']
476 origcgfn = exchange.getbundle2partsmapping['changegroup']
476 def wrappedcgfn(*args, **kwargs):
477 def wrappedcgfn(*args, **kwargs):
477 repo = args[1]
478 repo = args[1]
478 if repo.ui.has_section(_NARROWACL_SECTION):
479 if repo.ui.has_section(_NARROWACL_SECTION):
479 getbundlechangegrouppart_narrow(
480 getbundlechangegrouppart_narrow(
480 *args, **applyacl_narrow(repo, kwargs))
481 *args, **applyacl_narrow(repo, kwargs))
481 elif kwargs.get('narrow', False):
482 elif kwargs.get('narrow', False):
482 getbundlechangegrouppart_narrow(*args, **kwargs)
483 getbundlechangegrouppart_narrow(*args, **kwargs)
483 else:
484 else:
484 origcgfn(*args, **kwargs)
485 origcgfn(*args, **kwargs)
485 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
486 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
486
487
487 # Extend changegroup receiver so client can fixup after widen requests.
488 # Extend changegroup receiver so client can fixup after widen requests.
488 origcghandler = bundle2.parthandlermapping['changegroup']
489 origcghandler = bundle2.parthandlermapping['changegroup']
489 def wrappedcghandler(op, inpart):
490 def wrappedcghandler(op, inpart):
490 origcghandler(op, inpart)
491 origcghandler(op, inpart)
491 if util.safehasattr(op, '_widen_bundle'):
492 if util.safehasattr(op, '_widen_bundle'):
492 handlechangegroup_widen(op, inpart)
493 handlechangegroup_widen(op, inpart)
493 wrappedcghandler.params = origcghandler.params
494 wrappedcghandler.params = origcghandler.params
494 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
495 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
General Comments 0
You need to be logged in to leave comments. Login now