##// END OF EJS Templates
narrowbundle2: more kwargs native string fixes...
Augie Fackler -
r36377:adce75cd default
parent child Browse files
Show More
@@ -1,494 +1,494 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 narrowspec,
27 narrowspec,
28 repair,
28 repair,
29 util,
29 util,
30 wireproto,
30 wireproto,
31 )
31 )
32
32
33 from . import (
33 from . import (
34 narrowrepo,
34 narrowrepo,
35 )
35 )
36
36
37 NARROWCAP = 'narrow'
37 NARROWCAP = 'narrow'
38 _NARROWACL_SECTION = 'narrowhgacl'
38 _NARROWACL_SECTION = 'narrowhgacl'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
39 _CHANGESPECPART = NARROWCAP + ':changespec'
40 _SPECPART = NARROWCAP + ':spec'
40 _SPECPART = NARROWCAP + ':spec'
41 _SPECPART_INCLUDE = 'include'
41 _SPECPART_INCLUDE = 'include'
42 _SPECPART_EXCLUDE = 'exclude'
42 _SPECPART_EXCLUDE = 'exclude'
43 _KILLNODESIGNAL = 'KILL'
43 _KILLNODESIGNAL = 'KILL'
44 _DONESIGNAL = 'DONE'
44 _DONESIGNAL = 'DONE'
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
49
49
50 # When advertising capabilities, always include narrow clone support.
50 # When advertising capabilities, always include narrow clone support.
51 def getrepocaps_narrow(orig, repo, **kwargs):
51 def getrepocaps_narrow(orig, repo, **kwargs):
52 caps = orig(repo, **kwargs)
52 caps = orig(repo, **kwargs)
53 caps[NARROWCAP] = ['v0']
53 caps[NARROWCAP] = ['v0']
54 return caps
54 return caps
55
55
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 def _computeellipsis(repo, common, heads, known, match, depth=None):
57 """Compute the shape of a narrowed DAG.
57 """Compute the shape of a narrowed DAG.
58
58
59 Args:
59 Args:
60 repo: The repository we're transferring.
60 repo: The repository we're transferring.
61 common: The roots of the DAG range we're transferring.
61 common: The roots of the DAG range we're transferring.
62 May be just [nullid], which means all ancestors of heads.
62 May be just [nullid], which means all ancestors of heads.
63 heads: The heads of the DAG range we're transferring.
63 heads: The heads of the DAG range we're transferring.
64 match: The narrowmatcher that allows us to identify relevant changes.
64 match: The narrowmatcher that allows us to identify relevant changes.
65 depth: If not None, only consider nodes to be full nodes if they are at
65 depth: If not None, only consider nodes to be full nodes if they are at
66 most depth changesets away from one of heads.
66 most depth changesets away from one of heads.
67
67
68 Returns:
68 Returns:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
70
70
71 visitnodes: The list of nodes (either full or ellipsis) which
71 visitnodes: The list of nodes (either full or ellipsis) which
72 need to be sent to the client.
72 need to be sent to the client.
73 relevant_nodes: The set of changelog nodes which change a file inside
73 relevant_nodes: The set of changelog nodes which change a file inside
74 the narrowspec. The client needs these as non-ellipsis nodes.
74 the narrowspec. The client needs these as non-ellipsis nodes.
75 ellipsisroots: A dict of {rev: parents} that is used in
75 ellipsisroots: A dict of {rev: parents} that is used in
76 narrowchangegroup to produce ellipsis nodes with the
76 narrowchangegroup to produce ellipsis nodes with the
77 correct parents.
77 correct parents.
78 """
78 """
79 cl = repo.changelog
79 cl = repo.changelog
80 mfl = repo.manifestlog
80 mfl = repo.manifestlog
81
81
82 cldag = dagutil.revlogdag(cl)
82 cldag = dagutil.revlogdag(cl)
83 # dagutil does not like nullid/nullrev
83 # dagutil does not like nullid/nullrev
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
85 headsrevs = cldag.internalizeall(heads)
85 headsrevs = cldag.internalizeall(heads)
86 if depth:
86 if depth:
87 revdepth = {h: 0 for h in headsrevs}
87 revdepth = {h: 0 for h in headsrevs}
88
88
89 ellipsisheads = collections.defaultdict(set)
89 ellipsisheads = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
90 ellipsisroots = collections.defaultdict(set)
91
91
92 def addroot(head, curchange):
92 def addroot(head, curchange):
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
94 ellipsisroots[head].add(curchange)
94 ellipsisroots[head].add(curchange)
95 # Recursively split ellipsis heads with 3 roots by finding the
95 # Recursively split ellipsis heads with 3 roots by finding the
96 # roots' youngest common descendant which is an elided merge commit.
96 # roots' youngest common descendant which is an elided merge commit.
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 # That descendant takes 2 of the 3 roots as its own, and becomes a
98 # root of the head.
98 # root of the head.
99 while len(ellipsisroots[head]) > 2:
99 while len(ellipsisroots[head]) > 2:
100 child, roots = splithead(head)
100 child, roots = splithead(head)
101 splitroots(head, child, roots)
101 splitroots(head, child, roots)
102 head = child # Recurse in case we just added a 3rd root
102 head = child # Recurse in case we just added a 3rd root
103
103
104 def splitroots(head, child, roots):
104 def splitroots(head, child, roots):
105 ellipsisroots[head].difference_update(roots)
105 ellipsisroots[head].difference_update(roots)
106 ellipsisroots[head].add(child)
106 ellipsisroots[head].add(child)
107 ellipsisroots[child].update(roots)
107 ellipsisroots[child].update(roots)
108 ellipsisroots[child].discard(child)
108 ellipsisroots[child].discard(child)
109
109
110 def splithead(head):
110 def splithead(head):
111 r1, r2, r3 = sorted(ellipsisroots[head])
111 r1, r2, r3 = sorted(ellipsisroots[head])
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
114 nr1, head, nr2, head)
114 nr1, head, nr2, head)
115 for j in mid:
115 for j in mid:
116 if j == nr2:
116 if j == nr2:
117 return nr2, (nr1, nr2)
117 return nr2, (nr1, nr2)
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
119 return j, (nr1, nr2)
119 return j, (nr1, nr2)
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
121 'roots: %d %d %d' % (head, r1, r2, r3))
121 'roots: %d %d %d' % (head, r1, r2, r3))
122
122
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
124 visit = reversed(missing)
124 visit = reversed(missing)
125 relevant_nodes = set()
125 relevant_nodes = set()
126 visitnodes = [cl.node(m) for m in missing]
126 visitnodes = [cl.node(m) for m in missing]
127 required = set(headsrevs) | known
127 required = set(headsrevs) | known
128 for rev in visit:
128 for rev in visit:
129 clrev = cl.changelogrevision(rev)
129 clrev = cl.changelogrevision(rev)
130 ps = cldag.parents(rev)
130 ps = cldag.parents(rev)
131 if depth is not None:
131 if depth is not None:
132 curdepth = revdepth[rev]
132 curdepth = revdepth[rev]
133 for p in ps:
133 for p in ps:
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
135 needed = False
135 needed = False
136 shallow_enough = depth is None or revdepth[rev] <= depth
136 shallow_enough = depth is None or revdepth[rev] <= depth
137 if shallow_enough:
137 if shallow_enough:
138 curmf = mfl[clrev.manifest].read()
138 curmf = mfl[clrev.manifest].read()
139 if ps:
139 if ps:
140 # We choose to not trust the changed files list in
140 # We choose to not trust the changed files list in
141 # changesets because it's not always correct. TODO: could
141 # changesets because it's not always correct. TODO: could
142 # we trust it for the non-merge case?
142 # we trust it for the non-merge case?
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
144 needed = bool(curmf.diff(p1mf, match))
144 needed = bool(curmf.diff(p1mf, match))
145 if not needed and len(ps) > 1:
145 if not needed and len(ps) > 1:
146 # For merge changes, the list of changed files is not
146 # For merge changes, the list of changed files is not
147 # helpful, since we need to emit the merge if a file
147 # helpful, since we need to emit the merge if a file
148 # in the narrow spec has changed on either side of the
148 # in the narrow spec has changed on either side of the
149 # merge. As a result, we do a manifest diff to check.
149 # merge. As a result, we do a manifest diff to check.
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
151 needed = bool(curmf.diff(p2mf, match))
151 needed = bool(curmf.diff(p2mf, match))
152 else:
152 else:
153 # For a root node, we need to include the node if any
153 # For a root node, we need to include the node if any
154 # files in the node match the narrowspec.
154 # files in the node match the narrowspec.
155 needed = any(curmf.walk(match))
155 needed = any(curmf.walk(match))
156
156
157 if needed:
157 if needed:
158 for head in ellipsisheads[rev]:
158 for head in ellipsisheads[rev]:
159 addroot(head, rev)
159 addroot(head, rev)
160 for p in ps:
160 for p in ps:
161 required.add(p)
161 required.add(p)
162 relevant_nodes.add(cl.node(rev))
162 relevant_nodes.add(cl.node(rev))
163 else:
163 else:
164 if not ps:
164 if not ps:
165 ps = [nullrev]
165 ps = [nullrev]
166 if rev in required:
166 if rev in required:
167 for head in ellipsisheads[rev]:
167 for head in ellipsisheads[rev]:
168 addroot(head, rev)
168 addroot(head, rev)
169 for p in ps:
169 for p in ps:
170 ellipsisheads[p].add(rev)
170 ellipsisheads[p].add(rev)
171 else:
171 else:
172 for p in ps:
172 for p in ps:
173 ellipsisheads[p] |= ellipsisheads[rev]
173 ellipsisheads[p] |= ellipsisheads[rev]
174
174
175 # add common changesets as roots of their reachable ellipsis heads
175 # add common changesets as roots of their reachable ellipsis heads
176 for c in commonrevs:
176 for c in commonrevs:
177 for head in ellipsisheads[c]:
177 for head in ellipsisheads[c]:
178 addroot(head, c)
178 addroot(head, c)
179 return visitnodes, relevant_nodes, ellipsisroots
179 return visitnodes, relevant_nodes, ellipsisroots
180
180
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
182 ellipsisroots, visitnodes, depth, source, version):
182 ellipsisroots, visitnodes, depth, source, version):
183 if version in ('01', '02'):
183 if version in ('01', '02'):
184 raise error.Abort(
184 raise error.Abort(
185 'ellipsis nodes require at least cg3 on client and server, '
185 'ellipsis nodes require at least cg3 on client and server, '
186 'but negotiated version %s' % version)
186 'but negotiated version %s' % version)
187 # We wrap cg1packer.revchunk, using a side channel to pass
187 # We wrap cg1packer.revchunk, using a side channel to pass
188 # relevant_nodes into that area. Then if linknode isn't in the
188 # relevant_nodes into that area. Then if linknode isn't in the
189 # set, we know we have an ellipsis node and we should defer
189 # set, we know we have an ellipsis node and we should defer
190 # sending that node's data. We override close() to detect
190 # sending that node's data. We override close() to detect
191 # pending ellipsis nodes and flush them.
191 # pending ellipsis nodes and flush them.
192 packer = changegroup.getbundler(version, repo)
192 packer = changegroup.getbundler(version, repo)
193 # Let the packer have access to the narrow matcher so it can
193 # Let the packer have access to the narrow matcher so it can
194 # omit filelogs and dirlogs as needed
194 # omit filelogs and dirlogs as needed
195 packer._narrow_matcher = lambda : match
195 packer._narrow_matcher = lambda : match
196 # Give the packer the list of nodes which should not be
196 # Give the packer the list of nodes which should not be
197 # ellipsis nodes. We store this rather than the set of nodes
197 # ellipsis nodes. We store this rather than the set of nodes
198 # that should be an ellipsis because for very large histories
198 # that should be an ellipsis because for very large histories
199 # we expect this to be significantly smaller.
199 # we expect this to be significantly smaller.
200 packer.full_nodes = relevant_nodes
200 packer.full_nodes = relevant_nodes
201 # Maps ellipsis revs to their roots at the changelog level.
201 # Maps ellipsis revs to their roots at the changelog level.
202 packer.precomputed_ellipsis = ellipsisroots
202 packer.precomputed_ellipsis = ellipsisroots
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 # Maps CL revs to per-revlog revisions. Cleared in close() at
204 # the end of each group.
204 # the end of each group.
205 packer.clrev_to_localrev = {}
205 packer.clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
206 packer.next_clrev_to_localrev = {}
207 # Maps changelog nodes to changelog revs. Filled in once
207 # Maps changelog nodes to changelog revs. Filled in once
208 # during changelog stage and then left unmodified.
208 # during changelog stage and then left unmodified.
209 packer.clnode_to_rev = {}
209 packer.clnode_to_rev = {}
210 packer.changelog_done = False
210 packer.changelog_done = False
211 # If true, informs the packer that it is serving shallow content and might
211 # If true, informs the packer that it is serving shallow content and might
212 # need to pack file contents not introduced by the changes being packed.
212 # need to pack file contents not introduced by the changes being packed.
213 packer.is_shallow = depth is not None
213 packer.is_shallow = depth is not None
214
214
215 return packer.generate(common, visitnodes, False, source)
215 return packer.generate(common, visitnodes, False, source)
216
216
217 # Serve a changegroup for a client with a narrow clone.
217 # Serve a changegroup for a client with a narrow clone.
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 def getbundlechangegrouppart_narrow(bundler, repo, source,
219 bundlecaps=None, b2caps=None, heads=None,
219 bundlecaps=None, b2caps=None, heads=None,
220 common=None, **kwargs):
220 common=None, **kwargs):
221 cgversions = b2caps.get('changegroup')
221 cgversions = b2caps.get('changegroup')
222 if cgversions: # 3.1 and 3.2 ship with an empty value
222 if cgversions: # 3.1 and 3.2 ship with an empty value
223 cgversions = [v for v in cgversions
223 cgversions = [v for v in cgversions
224 if v in changegroup.supportedoutgoingversions(repo)]
224 if v in changegroup.supportedoutgoingversions(repo)]
225 if not cgversions:
225 if not cgversions:
226 raise ValueError(_('no common changegroup version'))
226 raise ValueError(_('no common changegroup version'))
227 version = max(cgversions)
227 version = max(cgversions)
228 else:
228 else:
229 raise ValueError(_("server does not advertise changegroup version,"
229 raise ValueError(_("server does not advertise changegroup version,"
230 " can't negotiate support for ellipsis nodes"))
230 " can't negotiate support for ellipsis nodes"))
231
231
232 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
232 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
233 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
233 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
234 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
234 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
235 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
235 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
236 outgoing = exchange._computeoutgoing(repo, heads, common)
236 outgoing = exchange._computeoutgoing(repo, heads, common)
237 if not outgoing.missing:
237 if not outgoing.missing:
238 return
238 return
239 def wrappedgetbundler(orig, *args, **kwargs):
239 def wrappedgetbundler(orig, *args, **kwargs):
240 bundler = orig(*args, **kwargs)
240 bundler = orig(*args, **kwargs)
241 bundler._narrow_matcher = lambda : newmatch
241 bundler._narrow_matcher = lambda : newmatch
242 return bundler
242 return bundler
243 with extensions.wrappedfunction(changegroup, 'getbundler',
243 with extensions.wrappedfunction(changegroup, 'getbundler',
244 wrappedgetbundler):
244 wrappedgetbundler):
245 cg = changegroup.makestream(repo, outgoing, version, source)
245 cg = changegroup.makestream(repo, outgoing, version, source)
246 part = bundler.newpart('changegroup', data=cg)
246 part = bundler.newpart('changegroup', data=cg)
247 part.addparam('version', version)
247 part.addparam('version', version)
248 if 'treemanifest' in repo.requirements:
248 if 'treemanifest' in repo.requirements:
249 part.addparam('treemanifest', '1')
249 part.addparam('treemanifest', '1')
250
250
251 if include or exclude:
251 if include or exclude:
252 narrowspecpart = bundler.newpart(_SPECPART)
252 narrowspecpart = bundler.newpart(_SPECPART)
253 if include:
253 if include:
254 narrowspecpart.addparam(
254 narrowspecpart.addparam(
255 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
255 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
256 if exclude:
256 if exclude:
257 narrowspecpart.addparam(
257 narrowspecpart.addparam(
258 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
258 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
259
259
260 return
260 return
261
261
262 depth = kwargs.get('depth', None)
262 depth = kwargs.get(r'depth', None)
263 if depth is not None:
263 if depth is not None:
264 depth = int(depth)
264 depth = int(depth)
265 if depth < 1:
265 if depth < 1:
266 raise error.Abort(_('depth must be positive, got %d') % depth)
266 raise error.Abort(_('depth must be positive, got %d') % depth)
267
267
268 heads = set(heads or repo.heads())
268 heads = set(heads or repo.heads())
269 common = set(common or [nullid])
269 common = set(common or [nullid])
270 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
270 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
271 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
271 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
272 known = {bin(n) for n in kwargs.get('known', [])}
272 known = {bin(n) for n in kwargs.get(r'known', [])}
273 if known and (oldinclude != include or oldexclude != exclude):
273 if known and (oldinclude != include or oldexclude != exclude):
274 # Steps:
274 # Steps:
275 # 1. Send kill for "$known & ::common"
275 # 1. Send kill for "$known & ::common"
276 #
276 #
277 # 2. Send changegroup for ::common
277 # 2. Send changegroup for ::common
278 #
278 #
279 # 3. Proceed.
279 # 3. Proceed.
280 #
280 #
281 # In the future, we can send kills for only the specific
281 # In the future, we can send kills for only the specific
282 # nodes we know should go away or change shape, and then
282 # nodes we know should go away or change shape, and then
283 # send a data stream that tells the client something like this:
283 # send a data stream that tells the client something like this:
284 #
284 #
285 # a) apply this changegroup
285 # a) apply this changegroup
286 # b) apply nodes XXX, YYY, ZZZ that you already have
286 # b) apply nodes XXX, YYY, ZZZ that you already have
287 # c) goto a
287 # c) goto a
288 #
288 #
289 # until they've built up the full new state.
289 # until they've built up the full new state.
290 # Convert to revnums and intersect with "common". The client should
290 # Convert to revnums and intersect with "common". The client should
291 # have made it a subset of "common" already, but let's be safe.
291 # have made it a subset of "common" already, but let's be safe.
292 known = set(repo.revs("%ln & ::%ln", known, common))
292 known = set(repo.revs("%ln & ::%ln", known, common))
293 # TODO: we could send only roots() of this set, and the
293 # TODO: we could send only roots() of this set, and the
294 # list of nodes in common, and the client could work out
294 # list of nodes in common, and the client could work out
295 # what to strip, instead of us explicitly sending every
295 # what to strip, instead of us explicitly sending every
296 # single node.
296 # single node.
297 deadrevs = known
297 deadrevs = known
298 def genkills():
298 def genkills():
299 for r in deadrevs:
299 for r in deadrevs:
300 yield _KILLNODESIGNAL
300 yield _KILLNODESIGNAL
301 yield repo.changelog.node(r)
301 yield repo.changelog.node(r)
302 yield _DONESIGNAL
302 yield _DONESIGNAL
303 bundler.newpart(_CHANGESPECPART, data=genkills())
303 bundler.newpart(_CHANGESPECPART, data=genkills())
304 newvisit, newfull, newellipsis = _computeellipsis(
304 newvisit, newfull, newellipsis = _computeellipsis(
305 repo, set(), common, known, newmatch)
305 repo, set(), common, known, newmatch)
306 if newvisit:
306 if newvisit:
307 cg = _packellipsischangegroup(
307 cg = _packellipsischangegroup(
308 repo, common, newmatch, newfull, newellipsis,
308 repo, common, newmatch, newfull, newellipsis,
309 newvisit, depth, source, version)
309 newvisit, depth, source, version)
310 part = bundler.newpart('changegroup', data=cg)
310 part = bundler.newpart('changegroup', data=cg)
311 part.addparam('version', version)
311 part.addparam('version', version)
312 if 'treemanifest' in repo.requirements:
312 if 'treemanifest' in repo.requirements:
313 part.addparam('treemanifest', '1')
313 part.addparam('treemanifest', '1')
314
314
315 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
315 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
316 repo, common, heads, set(), newmatch, depth=depth)
316 repo, common, heads, set(), newmatch, depth=depth)
317
317
318 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
318 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
319 if visitnodes:
319 if visitnodes:
320 cg = _packellipsischangegroup(
320 cg = _packellipsischangegroup(
321 repo, common, newmatch, relevant_nodes, ellipsisroots,
321 repo, common, newmatch, relevant_nodes, ellipsisroots,
322 visitnodes, depth, source, version)
322 visitnodes, depth, source, version)
323 part = bundler.newpart('changegroup', data=cg)
323 part = bundler.newpart('changegroup', data=cg)
324 part.addparam('version', version)
324 part.addparam('version', version)
325 if 'treemanifest' in repo.requirements:
325 if 'treemanifest' in repo.requirements:
326 part.addparam('treemanifest', '1')
326 part.addparam('treemanifest', '1')
327
327
328 def applyacl_narrow(repo, kwargs):
328 def applyacl_narrow(repo, kwargs):
329 ui = repo.ui
329 ui = repo.ui
330 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
330 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
331 user_includes = ui.configlist(
331 user_includes = ui.configlist(
332 _NARROWACL_SECTION, username + '.includes',
332 _NARROWACL_SECTION, username + '.includes',
333 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
334 user_excludes = ui.configlist(
334 user_excludes = ui.configlist(
335 _NARROWACL_SECTION, username + '.excludes',
335 _NARROWACL_SECTION, username + '.excludes',
336 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
337 if not user_includes:
337 if not user_includes:
338 raise error.Abort(_("{} configuration for user {} is empty")
338 raise error.Abort(_("{} configuration for user {} is empty")
339 .format(_NARROWACL_SECTION, username))
339 .format(_NARROWACL_SECTION, username))
340
340
341 user_includes = [
341 user_includes = [
342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
343 user_excludes = [
343 user_excludes = [
344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
345
345
346 req_includes = set(kwargs.get('includepats', []))
346 req_includes = set(kwargs.get(r'includepats', []))
347 req_excludes = set(kwargs.get('excludepats', []))
347 req_excludes = set(kwargs.get(r'excludepats', []))
348
348
349 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
349 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
350 req_includes, req_excludes, user_includes, user_excludes)
350 req_includes, req_excludes, user_includes, user_excludes)
351
351
352 if invalid_includes:
352 if invalid_includes:
353 raise error.Abort(
353 raise error.Abort(
354 _("The following includes are not accessible for {}: {}")
354 _("The following includes are not accessible for {}: {}")
355 .format(username, invalid_includes))
355 .format(username, invalid_includes))
356
356
357 new_args = {}
357 new_args = {}
358 new_args.update(kwargs)
358 new_args.update(kwargs)
359 new_args['includepats'] = req_includes
359 new_args['includepats'] = req_includes
360 if req_excludes:
360 if req_excludes:
361 new_args['excludepats'] = req_excludes
361 new_args['excludepats'] = req_excludes
362 return new_args
362 return new_args
363
363
364 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
364 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
365 def _handlechangespec_2(op, inpart):
365 def _handlechangespec_2(op, inpart):
366 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
366 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
367 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
367 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
368 narrowspec.save(op.repo, includepats, excludepats)
368 narrowspec.save(op.repo, includepats, excludepats)
369 if not narrowrepo.REQUIREMENT in op.repo.requirements:
369 if not narrowrepo.REQUIREMENT in op.repo.requirements:
370 op.repo.requirements.add(narrowrepo.REQUIREMENT)
370 op.repo.requirements.add(narrowrepo.REQUIREMENT)
371 op.repo._writerequirements()
371 op.repo._writerequirements()
372 op.repo.invalidate(clearfilecache=True)
372 op.repo.invalidate(clearfilecache=True)
373
373
374 @bundle2.parthandler(_CHANGESPECPART)
374 @bundle2.parthandler(_CHANGESPECPART)
375 def _handlechangespec(op, inpart):
375 def _handlechangespec(op, inpart):
376 repo = op.repo
376 repo = op.repo
377 cl = repo.changelog
377 cl = repo.changelog
378
378
379 # changesets which need to be stripped entirely. either they're no longer
379 # changesets which need to be stripped entirely. either they're no longer
380 # needed in the new narrow spec, or the server is sending a replacement
380 # needed in the new narrow spec, or the server is sending a replacement
381 # in the changegroup part.
381 # in the changegroup part.
382 clkills = set()
382 clkills = set()
383
383
384 # A changespec part contains all the updates to ellipsis nodes
384 # A changespec part contains all the updates to ellipsis nodes
385 # that will happen as a result of widening or narrowing a
385 # that will happen as a result of widening or narrowing a
386 # repo. All the changes that this block encounters are ellipsis
386 # repo. All the changes that this block encounters are ellipsis
387 # nodes or flags to kill an existing ellipsis.
387 # nodes or flags to kill an existing ellipsis.
388 chunksignal = changegroup.readexactly(inpart, 4)
388 chunksignal = changegroup.readexactly(inpart, 4)
389 while chunksignal != _DONESIGNAL:
389 while chunksignal != _DONESIGNAL:
390 if chunksignal == _KILLNODESIGNAL:
390 if chunksignal == _KILLNODESIGNAL:
391 # a node used to be an ellipsis but isn't anymore
391 # a node used to be an ellipsis but isn't anymore
392 ck = changegroup.readexactly(inpart, 20)
392 ck = changegroup.readexactly(inpart, 20)
393 if cl.hasnode(ck):
393 if cl.hasnode(ck):
394 clkills.add(ck)
394 clkills.add(ck)
395 else:
395 else:
396 raise error.Abort(
396 raise error.Abort(
397 _('unexpected changespec node chunk type: %s') % chunksignal)
397 _('unexpected changespec node chunk type: %s') % chunksignal)
398 chunksignal = changegroup.readexactly(inpart, 4)
398 chunksignal = changegroup.readexactly(inpart, 4)
399
399
400 if clkills:
400 if clkills:
401 # preserve bookmarks that repair.strip() would otherwise strip
401 # preserve bookmarks that repair.strip() would otherwise strip
402 bmstore = repo._bookmarks
402 bmstore = repo._bookmarks
403 class dummybmstore(dict):
403 class dummybmstore(dict):
404 def applychanges(self, repo, tr, changes):
404 def applychanges(self, repo, tr, changes):
405 pass
405 pass
406 def recordchange(self, tr): # legacy version
406 def recordchange(self, tr): # legacy version
407 pass
407 pass
408 repo._bookmarks = dummybmstore()
408 repo._bookmarks = dummybmstore()
409 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
409 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
410 topic='widen')
410 topic='widen')
411 repo._bookmarks = bmstore
411 repo._bookmarks = bmstore
412 if chgrpfile:
412 if chgrpfile:
413 # presence of _widen_bundle attribute activates widen handler later
413 # presence of _widen_bundle attribute activates widen handler later
414 op._widen_bundle = chgrpfile
414 op._widen_bundle = chgrpfile
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 # will currently always be there when using the core+narrowhg server, but
416 # will currently always be there when using the core+narrowhg server, but
417 # other servers may include a changespec part even when not widening (e.g.
417 # other servers may include a changespec part even when not widening (e.g.
418 # because we're deepening a shallow repo).
418 # because we're deepening a shallow repo).
419 if util.safehasattr(repo, 'setnewnarrowpats'):
419 if util.safehasattr(repo, 'setnewnarrowpats'):
420 repo.setnewnarrowpats()
420 repo.setnewnarrowpats()
421
421
422 def handlechangegroup_widen(op, inpart):
422 def handlechangegroup_widen(op, inpart):
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 # We saved a bundle with stripped node data we must now restore.
424 # We saved a bundle with stripped node data we must now restore.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 repo = op.repo
426 repo = op.repo
427 ui = op.ui
427 ui = op.ui
428
428
429 chgrpfile = op._widen_bundle
429 chgrpfile = op._widen_bundle
430 del op._widen_bundle
430 del op._widen_bundle
431 vfs = repo.vfs
431 vfs = repo.vfs
432
432
433 ui.note(_("adding branch\n"))
433 ui.note(_("adding branch\n"))
434 f = vfs.open(chgrpfile, "rb")
434 f = vfs.open(chgrpfile, "rb")
435 try:
435 try:
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 if not ui.verbose:
437 if not ui.verbose:
438 # silence internal shuffling chatter
438 # silence internal shuffling chatter
439 ui.pushbuffer()
439 ui.pushbuffer()
440 if isinstance(gen, bundle2.unbundle20):
440 if isinstance(gen, bundle2.unbundle20):
441 with repo.transaction('strip') as tr:
441 with repo.transaction('strip') as tr:
442 bundle2.processbundle(repo, gen, lambda: tr)
442 bundle2.processbundle(repo, gen, lambda: tr)
443 else:
443 else:
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 if not ui.verbose:
445 if not ui.verbose:
446 ui.popbuffer()
446 ui.popbuffer()
447 finally:
447 finally:
448 f.close()
448 f.close()
449
449
450 # remove undo files
450 # remove undo files
451 for undovfs, undofile in repo.undofiles():
451 for undovfs, undofile in repo.undofiles():
452 try:
452 try:
453 undovfs.unlink(undofile)
453 undovfs.unlink(undofile)
454 except OSError as e:
454 except OSError as e:
455 if e.errno != errno.ENOENT:
455 if e.errno != errno.ENOENT:
456 ui.warn(_('error removing %s: %s\n') %
456 ui.warn(_('error removing %s: %s\n') %
457 (undovfs.join(undofile), str(e)))
457 (undovfs.join(undofile), str(e)))
458
458
459 # Remove partial backup only if there were no exceptions
459 # Remove partial backup only if there were no exceptions
460 vfs.unlink(chgrpfile)
460 vfs.unlink(chgrpfile)
461
461
462 def setup():
462 def setup():
463 """Enable narrow repo support in bundle2-related extension points."""
463 """Enable narrow repo support in bundle2-related extension points."""
464 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
464 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
465
465
466 wireproto.gboptsmap['narrow'] = 'boolean'
466 wireproto.gboptsmap['narrow'] = 'boolean'
467 wireproto.gboptsmap['depth'] = 'plain'
467 wireproto.gboptsmap['depth'] = 'plain'
468 wireproto.gboptsmap['oldincludepats'] = 'csv'
468 wireproto.gboptsmap['oldincludepats'] = 'csv'
469 wireproto.gboptsmap['oldexcludepats'] = 'csv'
469 wireproto.gboptsmap['oldexcludepats'] = 'csv'
470 wireproto.gboptsmap['includepats'] = 'csv'
470 wireproto.gboptsmap['includepats'] = 'csv'
471 wireproto.gboptsmap['excludepats'] = 'csv'
471 wireproto.gboptsmap['excludepats'] = 'csv'
472 wireproto.gboptsmap['known'] = 'csv'
472 wireproto.gboptsmap['known'] = 'csv'
473
473
474 # Extend changegroup serving to handle requests from narrow clients.
474 # Extend changegroup serving to handle requests from narrow clients.
475 origcgfn = exchange.getbundle2partsmapping['changegroup']
475 origcgfn = exchange.getbundle2partsmapping['changegroup']
476 def wrappedcgfn(*args, **kwargs):
476 def wrappedcgfn(*args, **kwargs):
477 repo = args[1]
477 repo = args[1]
478 if repo.ui.has_section(_NARROWACL_SECTION):
478 if repo.ui.has_section(_NARROWACL_SECTION):
479 getbundlechangegrouppart_narrow(
479 getbundlechangegrouppart_narrow(
480 *args, **applyacl_narrow(repo, kwargs))
480 *args, **applyacl_narrow(repo, kwargs))
481 elif kwargs.get(r'narrow', False):
481 elif kwargs.get(r'narrow', False):
482 getbundlechangegrouppart_narrow(*args, **kwargs)
482 getbundlechangegrouppart_narrow(*args, **kwargs)
483 else:
483 else:
484 origcgfn(*args, **kwargs)
484 origcgfn(*args, **kwargs)
485 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
485 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
486
486
487 # Extend changegroup receiver so client can fixup after widen requests.
487 # Extend changegroup receiver so client can fixup after widen requests.
488 origcghandler = bundle2.parthandlermapping['changegroup']
488 origcghandler = bundle2.parthandlermapping['changegroup']
489 def wrappedcghandler(op, inpart):
489 def wrappedcghandler(op, inpart):
490 origcghandler(op, inpart)
490 origcghandler(op, inpart)
491 if util.safehasattr(op, '_widen_bundle'):
491 if util.safehasattr(op, '_widen_bundle'):
492 handlechangegroup_widen(op, inpart)
492 handlechangegroup_widen(op, inpart)
493 wrappedcghandler.params = origcghandler.params
493 wrappedcghandler.params = origcghandler.params
494 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
494 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
General Comments 0
You need to be logged in to leave comments. Login now