##// END OF EJS Templates
exchange: move narrow acl functionality into core...
Gregory Szorc -
r38826:3e738733 default
parent child Browse files
Show More
@@ -1,497 +1,461 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 narrowspec,
27 narrowspec,
28 repair,
28 repair,
29 util,
29 util,
30 wireprototypes,
30 wireprototypes,
31 )
31 )
32 from mercurial.utils import (
32 from mercurial.utils import (
33 stringutil,
33 stringutil,
34 )
34 )
35
35
36 NARROWCAP = 'narrow'
36 NARROWCAP = 'narrow'
37 _NARROWACL_SECTION = 'narrowhgacl'
37 _NARROWACL_SECTION = 'narrowhgacl'
38 _CHANGESPECPART = NARROWCAP + ':changespec'
38 _CHANGESPECPART = NARROWCAP + ':changespec'
39 _SPECPART = NARROWCAP + ':spec'
39 _SPECPART = NARROWCAP + ':spec'
40 _SPECPART_INCLUDE = 'include'
40 _SPECPART_INCLUDE = 'include'
41 _SPECPART_EXCLUDE = 'exclude'
41 _SPECPART_EXCLUDE = 'exclude'
42 _KILLNODESIGNAL = 'KILL'
42 _KILLNODESIGNAL = 'KILL'
43 _DONESIGNAL = 'DONE'
43 _DONESIGNAL = 'DONE'
44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48
48
49 # When advertising capabilities, always include narrow clone support.
49 # When advertising capabilities, always include narrow clone support.
50 def getrepocaps_narrow(orig, repo, **kwargs):
50 def getrepocaps_narrow(orig, repo, **kwargs):
51 caps = orig(repo, **kwargs)
51 caps = orig(repo, **kwargs)
52 caps[NARROWCAP] = ['v0']
52 caps[NARROWCAP] = ['v0']
53 return caps
53 return caps
54
54
55 def _computeellipsis(repo, common, heads, known, match, depth=None):
55 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 """Compute the shape of a narrowed DAG.
56 """Compute the shape of a narrowed DAG.
57
57
58 Args:
58 Args:
59 repo: The repository we're transferring.
59 repo: The repository we're transferring.
60 common: The roots of the DAG range we're transferring.
60 common: The roots of the DAG range we're transferring.
61 May be just [nullid], which means all ancestors of heads.
61 May be just [nullid], which means all ancestors of heads.
62 heads: The heads of the DAG range we're transferring.
62 heads: The heads of the DAG range we're transferring.
63 match: The narrowmatcher that allows us to identify relevant changes.
63 match: The narrowmatcher that allows us to identify relevant changes.
64 depth: If not None, only consider nodes to be full nodes if they are at
64 depth: If not None, only consider nodes to be full nodes if they are at
65 most depth changesets away from one of heads.
65 most depth changesets away from one of heads.
66
66
67 Returns:
67 Returns:
68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69
69
70 visitnodes: The list of nodes (either full or ellipsis) which
70 visitnodes: The list of nodes (either full or ellipsis) which
71 need to be sent to the client.
71 need to be sent to the client.
72 relevant_nodes: The set of changelog nodes which change a file inside
72 relevant_nodes: The set of changelog nodes which change a file inside
73 the narrowspec. The client needs these as non-ellipsis nodes.
73 the narrowspec. The client needs these as non-ellipsis nodes.
74 ellipsisroots: A dict of {rev: parents} that is used in
74 ellipsisroots: A dict of {rev: parents} that is used in
75 narrowchangegroup to produce ellipsis nodes with the
75 narrowchangegroup to produce ellipsis nodes with the
76 correct parents.
76 correct parents.
77 """
77 """
78 cl = repo.changelog
78 cl = repo.changelog
79 mfl = repo.manifestlog
79 mfl = repo.manifestlog
80
80
81 cldag = dagutil.revlogdag(cl)
81 cldag = dagutil.revlogdag(cl)
82 # dagutil does not like nullid/nullrev
82 # dagutil does not like nullid/nullrev
83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 headsrevs = cldag.internalizeall(heads)
84 headsrevs = cldag.internalizeall(heads)
85 if depth:
85 if depth:
86 revdepth = {h: 0 for h in headsrevs}
86 revdepth = {h: 0 for h in headsrevs}
87
87
88 ellipsisheads = collections.defaultdict(set)
88 ellipsisheads = collections.defaultdict(set)
89 ellipsisroots = collections.defaultdict(set)
89 ellipsisroots = collections.defaultdict(set)
90
90
91 def addroot(head, curchange):
91 def addroot(head, curchange):
92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 ellipsisroots[head].add(curchange)
93 ellipsisroots[head].add(curchange)
94 # Recursively split ellipsis heads with 3 roots by finding the
94 # Recursively split ellipsis heads with 3 roots by finding the
95 # roots' youngest common descendant which is an elided merge commit.
95 # roots' youngest common descendant which is an elided merge commit.
96 # That descendant takes 2 of the 3 roots as its own, and becomes a
96 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 # root of the head.
97 # root of the head.
98 while len(ellipsisroots[head]) > 2:
98 while len(ellipsisroots[head]) > 2:
99 child, roots = splithead(head)
99 child, roots = splithead(head)
100 splitroots(head, child, roots)
100 splitroots(head, child, roots)
101 head = child # Recurse in case we just added a 3rd root
101 head = child # Recurse in case we just added a 3rd root
102
102
103 def splitroots(head, child, roots):
103 def splitroots(head, child, roots):
104 ellipsisroots[head].difference_update(roots)
104 ellipsisroots[head].difference_update(roots)
105 ellipsisroots[head].add(child)
105 ellipsisroots[head].add(child)
106 ellipsisroots[child].update(roots)
106 ellipsisroots[child].update(roots)
107 ellipsisroots[child].discard(child)
107 ellipsisroots[child].discard(child)
108
108
109 def splithead(head):
109 def splithead(head):
110 r1, r2, r3 = sorted(ellipsisroots[head])
110 r1, r2, r3 = sorted(ellipsisroots[head])
111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 nr1, head, nr2, head)
113 nr1, head, nr2, head)
114 for j in mid:
114 for j in mid:
115 if j == nr2:
115 if j == nr2:
116 return nr2, (nr1, nr2)
116 return nr2, (nr1, nr2)
117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 return j, (nr1, nr2)
118 return j, (nr1, nr2)
119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 'roots: %d %d %d' % (head, r1, r2, r3))
120 'roots: %d %d %d' % (head, r1, r2, r3))
121
121
122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 visit = reversed(missing)
123 visit = reversed(missing)
124 relevant_nodes = set()
124 relevant_nodes = set()
125 visitnodes = [cl.node(m) for m in missing]
125 visitnodes = [cl.node(m) for m in missing]
126 required = set(headsrevs) | known
126 required = set(headsrevs) | known
127 for rev in visit:
127 for rev in visit:
128 clrev = cl.changelogrevision(rev)
128 clrev = cl.changelogrevision(rev)
129 ps = cldag.parents(rev)
129 ps = cldag.parents(rev)
130 if depth is not None:
130 if depth is not None:
131 curdepth = revdepth[rev]
131 curdepth = revdepth[rev]
132 for p in ps:
132 for p in ps:
133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 needed = False
134 needed = False
135 shallow_enough = depth is None or revdepth[rev] <= depth
135 shallow_enough = depth is None or revdepth[rev] <= depth
136 if shallow_enough:
136 if shallow_enough:
137 curmf = mfl[clrev.manifest].read()
137 curmf = mfl[clrev.manifest].read()
138 if ps:
138 if ps:
139 # We choose to not trust the changed files list in
139 # We choose to not trust the changed files list in
140 # changesets because it's not always correct. TODO: could
140 # changesets because it's not always correct. TODO: could
141 # we trust it for the non-merge case?
141 # we trust it for the non-merge case?
142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 needed = bool(curmf.diff(p1mf, match))
143 needed = bool(curmf.diff(p1mf, match))
144 if not needed and len(ps) > 1:
144 if not needed and len(ps) > 1:
145 # For merge changes, the list of changed files is not
145 # For merge changes, the list of changed files is not
146 # helpful, since we need to emit the merge if a file
146 # helpful, since we need to emit the merge if a file
147 # in the narrow spec has changed on either side of the
147 # in the narrow spec has changed on either side of the
148 # merge. As a result, we do a manifest diff to check.
148 # merge. As a result, we do a manifest diff to check.
149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 needed = bool(curmf.diff(p2mf, match))
150 needed = bool(curmf.diff(p2mf, match))
151 else:
151 else:
152 # For a root node, we need to include the node if any
152 # For a root node, we need to include the node if any
153 # files in the node match the narrowspec.
153 # files in the node match the narrowspec.
154 needed = any(curmf.walk(match))
154 needed = any(curmf.walk(match))
155
155
156 if needed:
156 if needed:
157 for head in ellipsisheads[rev]:
157 for head in ellipsisheads[rev]:
158 addroot(head, rev)
158 addroot(head, rev)
159 for p in ps:
159 for p in ps:
160 required.add(p)
160 required.add(p)
161 relevant_nodes.add(cl.node(rev))
161 relevant_nodes.add(cl.node(rev))
162 else:
162 else:
163 if not ps:
163 if not ps:
164 ps = [nullrev]
164 ps = [nullrev]
165 if rev in required:
165 if rev in required:
166 for head in ellipsisheads[rev]:
166 for head in ellipsisheads[rev]:
167 addroot(head, rev)
167 addroot(head, rev)
168 for p in ps:
168 for p in ps:
169 ellipsisheads[p].add(rev)
169 ellipsisheads[p].add(rev)
170 else:
170 else:
171 for p in ps:
171 for p in ps:
172 ellipsisheads[p] |= ellipsisheads[rev]
172 ellipsisheads[p] |= ellipsisheads[rev]
173
173
174 # add common changesets as roots of their reachable ellipsis heads
174 # add common changesets as roots of their reachable ellipsis heads
175 for c in commonrevs:
175 for c in commonrevs:
176 for head in ellipsisheads[c]:
176 for head in ellipsisheads[c]:
177 addroot(head, c)
177 addroot(head, c)
178 return visitnodes, relevant_nodes, ellipsisroots
178 return visitnodes, relevant_nodes, ellipsisroots
179
179
180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 ellipsisroots, visitnodes, depth, source, version):
181 ellipsisroots, visitnodes, depth, source, version):
182 if version in ('01', '02'):
182 if version in ('01', '02'):
183 raise error.Abort(
183 raise error.Abort(
184 'ellipsis nodes require at least cg3 on client and server, '
184 'ellipsis nodes require at least cg3 on client and server, '
185 'but negotiated version %s' % version)
185 'but negotiated version %s' % version)
186 # We wrap cg1packer.revchunk, using a side channel to pass
186 # We wrap cg1packer.revchunk, using a side channel to pass
187 # relevant_nodes into that area. Then if linknode isn't in the
187 # relevant_nodes into that area. Then if linknode isn't in the
188 # set, we know we have an ellipsis node and we should defer
188 # set, we know we have an ellipsis node and we should defer
189 # sending that node's data. We override close() to detect
189 # sending that node's data. We override close() to detect
190 # pending ellipsis nodes and flush them.
190 # pending ellipsis nodes and flush them.
191 packer = changegroup.getbundler(version, repo)
191 packer = changegroup.getbundler(version, repo)
192 # Let the packer have access to the narrow matcher so it can
192 # Let the packer have access to the narrow matcher so it can
193 # omit filelogs and dirlogs as needed
193 # omit filelogs and dirlogs as needed
194 packer._narrow_matcher = lambda : match
194 packer._narrow_matcher = lambda : match
195 # Give the packer the list of nodes which should not be
195 # Give the packer the list of nodes which should not be
196 # ellipsis nodes. We store this rather than the set of nodes
196 # ellipsis nodes. We store this rather than the set of nodes
197 # that should be an ellipsis because for very large histories
197 # that should be an ellipsis because for very large histories
198 # we expect this to be significantly smaller.
198 # we expect this to be significantly smaller.
199 packer.full_nodes = relevant_nodes
199 packer.full_nodes = relevant_nodes
200 # Maps ellipsis revs to their roots at the changelog level.
200 # Maps ellipsis revs to their roots at the changelog level.
201 packer.precomputed_ellipsis = ellipsisroots
201 packer.precomputed_ellipsis = ellipsisroots
202 # Maps CL revs to per-revlog revisions. Cleared in close() at
202 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 # the end of each group.
203 # the end of each group.
204 packer.clrev_to_localrev = {}
204 packer.clrev_to_localrev = {}
205 packer.next_clrev_to_localrev = {}
205 packer.next_clrev_to_localrev = {}
206 # Maps changelog nodes to changelog revs. Filled in once
206 # Maps changelog nodes to changelog revs. Filled in once
207 # during changelog stage and then left unmodified.
207 # during changelog stage and then left unmodified.
208 packer.clnode_to_rev = {}
208 packer.clnode_to_rev = {}
209 packer.changelog_done = False
209 packer.changelog_done = False
210 # If true, informs the packer that it is serving shallow content and might
210 # If true, informs the packer that it is serving shallow content and might
211 # need to pack file contents not introduced by the changes being packed.
211 # need to pack file contents not introduced by the changes being packed.
212 packer.is_shallow = depth is not None
212 packer.is_shallow = depth is not None
213
213
214 return packer.generate(common, visitnodes, False, source)
214 return packer.generate(common, visitnodes, False, source)
215
215
216 # Serve a changegroup for a client with a narrow clone.
216 # Serve a changegroup for a client with a narrow clone.
217 def getbundlechangegrouppart_narrow(bundler, repo, source,
217 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 bundlecaps=None, b2caps=None, heads=None,
218 bundlecaps=None, b2caps=None, heads=None,
219 common=None, **kwargs):
219 common=None, **kwargs):
220 cgversions = b2caps.get('changegroup')
220 cgversions = b2caps.get('changegroup')
221 if cgversions: # 3.1 and 3.2 ship with an empty value
221 if cgversions: # 3.1 and 3.2 ship with an empty value
222 cgversions = [v for v in cgversions
222 cgversions = [v for v in cgversions
223 if v in changegroup.supportedoutgoingversions(repo)]
223 if v in changegroup.supportedoutgoingversions(repo)]
224 if not cgversions:
224 if not cgversions:
225 raise ValueError(_('no common changegroup version'))
225 raise ValueError(_('no common changegroup version'))
226 version = max(cgversions)
226 version = max(cgversions)
227 else:
227 else:
228 raise ValueError(_("server does not advertise changegroup version,"
228 raise ValueError(_("server does not advertise changegroup version,"
229 " can't negotiate support for ellipsis nodes"))
229 " can't negotiate support for ellipsis nodes"))
230
230
231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
235 outgoing = exchange._computeoutgoing(repo, heads, common)
235 outgoing = exchange._computeoutgoing(repo, heads, common)
236 if not outgoing.missing:
236 if not outgoing.missing:
237 return
237 return
238 def wrappedgetbundler(orig, *args, **kwargs):
238 def wrappedgetbundler(orig, *args, **kwargs):
239 bundler = orig(*args, **kwargs)
239 bundler = orig(*args, **kwargs)
240 bundler._narrow_matcher = lambda : newmatch
240 bundler._narrow_matcher = lambda : newmatch
241 return bundler
241 return bundler
242 with extensions.wrappedfunction(changegroup, 'getbundler',
242 with extensions.wrappedfunction(changegroup, 'getbundler',
243 wrappedgetbundler):
243 wrappedgetbundler):
244 cg = changegroup.makestream(repo, outgoing, version, source)
244 cg = changegroup.makestream(repo, outgoing, version, source)
245 part = bundler.newpart('changegroup', data=cg)
245 part = bundler.newpart('changegroup', data=cg)
246 part.addparam('version', version)
246 part.addparam('version', version)
247 if 'treemanifest' in repo.requirements:
247 if 'treemanifest' in repo.requirements:
248 part.addparam('treemanifest', '1')
248 part.addparam('treemanifest', '1')
249
249
250 if include or exclude:
250 if include or exclude:
251 narrowspecpart = bundler.newpart(_SPECPART)
251 narrowspecpart = bundler.newpart(_SPECPART)
252 if include:
252 if include:
253 narrowspecpart.addparam(
253 narrowspecpart.addparam(
254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
255 if exclude:
255 if exclude:
256 narrowspecpart.addparam(
256 narrowspecpart.addparam(
257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
258
258
259 return
259 return
260
260
261 depth = kwargs.get(r'depth', None)
261 depth = kwargs.get(r'depth', None)
262 if depth is not None:
262 if depth is not None:
263 depth = int(depth)
263 depth = int(depth)
264 if depth < 1:
264 if depth < 1:
265 raise error.Abort(_('depth must be positive, got %d') % depth)
265 raise error.Abort(_('depth must be positive, got %d') % depth)
266
266
267 heads = set(heads or repo.heads())
267 heads = set(heads or repo.heads())
268 common = set(common or [nullid])
268 common = set(common or [nullid])
269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
271 known = {bin(n) for n in kwargs.get(r'known', [])}
271 known = {bin(n) for n in kwargs.get(r'known', [])}
272 if known and (oldinclude != include or oldexclude != exclude):
272 if known and (oldinclude != include or oldexclude != exclude):
273 # Steps:
273 # Steps:
274 # 1. Send kill for "$known & ::common"
274 # 1. Send kill for "$known & ::common"
275 #
275 #
276 # 2. Send changegroup for ::common
276 # 2. Send changegroup for ::common
277 #
277 #
278 # 3. Proceed.
278 # 3. Proceed.
279 #
279 #
280 # In the future, we can send kills for only the specific
280 # In the future, we can send kills for only the specific
281 # nodes we know should go away or change shape, and then
281 # nodes we know should go away or change shape, and then
282 # send a data stream that tells the client something like this:
282 # send a data stream that tells the client something like this:
283 #
283 #
284 # a) apply this changegroup
284 # a) apply this changegroup
285 # b) apply nodes XXX, YYY, ZZZ that you already have
285 # b) apply nodes XXX, YYY, ZZZ that you already have
286 # c) goto a
286 # c) goto a
287 #
287 #
288 # until they've built up the full new state.
288 # until they've built up the full new state.
289 # Convert to revnums and intersect with "common". The client should
289 # Convert to revnums and intersect with "common". The client should
290 # have made it a subset of "common" already, but let's be safe.
290 # have made it a subset of "common" already, but let's be safe.
291 known = set(repo.revs("%ln & ::%ln", known, common))
291 known = set(repo.revs("%ln & ::%ln", known, common))
292 # TODO: we could send only roots() of this set, and the
292 # TODO: we could send only roots() of this set, and the
293 # list of nodes in common, and the client could work out
293 # list of nodes in common, and the client could work out
294 # what to strip, instead of us explicitly sending every
294 # what to strip, instead of us explicitly sending every
295 # single node.
295 # single node.
296 deadrevs = known
296 deadrevs = known
297 def genkills():
297 def genkills():
298 for r in deadrevs:
298 for r in deadrevs:
299 yield _KILLNODESIGNAL
299 yield _KILLNODESIGNAL
300 yield repo.changelog.node(r)
300 yield repo.changelog.node(r)
301 yield _DONESIGNAL
301 yield _DONESIGNAL
302 bundler.newpart(_CHANGESPECPART, data=genkills())
302 bundler.newpart(_CHANGESPECPART, data=genkills())
303 newvisit, newfull, newellipsis = _computeellipsis(
303 newvisit, newfull, newellipsis = _computeellipsis(
304 repo, set(), common, known, newmatch)
304 repo, set(), common, known, newmatch)
305 if newvisit:
305 if newvisit:
306 cg = _packellipsischangegroup(
306 cg = _packellipsischangegroup(
307 repo, common, newmatch, newfull, newellipsis,
307 repo, common, newmatch, newfull, newellipsis,
308 newvisit, depth, source, version)
308 newvisit, depth, source, version)
309 part = bundler.newpart('changegroup', data=cg)
309 part = bundler.newpart('changegroup', data=cg)
310 part.addparam('version', version)
310 part.addparam('version', version)
311 if 'treemanifest' in repo.requirements:
311 if 'treemanifest' in repo.requirements:
312 part.addparam('treemanifest', '1')
312 part.addparam('treemanifest', '1')
313
313
314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
315 repo, common, heads, set(), newmatch, depth=depth)
315 repo, common, heads, set(), newmatch, depth=depth)
316
316
317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
318 if visitnodes:
318 if visitnodes:
319 cg = _packellipsischangegroup(
319 cg = _packellipsischangegroup(
320 repo, common, newmatch, relevant_nodes, ellipsisroots,
320 repo, common, newmatch, relevant_nodes, ellipsisroots,
321 visitnodes, depth, source, version)
321 visitnodes, depth, source, version)
322 part = bundler.newpart('changegroup', data=cg)
322 part = bundler.newpart('changegroup', data=cg)
323 part.addparam('version', version)
323 part.addparam('version', version)
324 if 'treemanifest' in repo.requirements:
324 if 'treemanifest' in repo.requirements:
325 part.addparam('treemanifest', '1')
325 part.addparam('treemanifest', '1')
326
326
327 def applyacl_narrow(repo, kwargs):
328 ui = repo.ui
329 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
330 user_includes = ui.configlist(
331 _NARROWACL_SECTION, username + '.includes',
332 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 user_excludes = ui.configlist(
334 _NARROWACL_SECTION, username + '.excludes',
335 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 if not user_includes:
337 raise error.Abort(_("{} configuration for user {} is empty")
338 .format(_NARROWACL_SECTION, username))
339
340 user_includes = [
341 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 user_excludes = [
343 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344
345 req_includes = set(kwargs.get(r'includepats', []))
346 req_excludes = set(kwargs.get(r'excludepats', []))
347
348 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
349 req_includes, req_excludes, user_includes, user_excludes)
350
351 if invalid_includes:
352 raise error.Abort(
353 _("The following includes are not accessible for {}: {}")
354 .format(username, invalid_includes))
355
356 new_args = {}
357 new_args.update(kwargs)
358 new_args['includepats'] = req_includes
359 if req_excludes:
360 new_args['excludepats'] = req_excludes
361 return new_args
362
363 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
327 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
364 def _handlechangespec_2(op, inpart):
328 def _handlechangespec_2(op, inpart):
365 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
329 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
366 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
330 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
367 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
331 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
368 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
332 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
369 op.repo._writerequirements()
333 op.repo._writerequirements()
370 op.repo.setnarrowpats(includepats, excludepats)
334 op.repo.setnarrowpats(includepats, excludepats)
371
335
372 @bundle2.parthandler(_CHANGESPECPART)
336 @bundle2.parthandler(_CHANGESPECPART)
373 def _handlechangespec(op, inpart):
337 def _handlechangespec(op, inpart):
374 repo = op.repo
338 repo = op.repo
375 cl = repo.changelog
339 cl = repo.changelog
376
340
377 # changesets which need to be stripped entirely. either they're no longer
341 # changesets which need to be stripped entirely. either they're no longer
378 # needed in the new narrow spec, or the server is sending a replacement
342 # needed in the new narrow spec, or the server is sending a replacement
379 # in the changegroup part.
343 # in the changegroup part.
380 clkills = set()
344 clkills = set()
381
345
382 # A changespec part contains all the updates to ellipsis nodes
346 # A changespec part contains all the updates to ellipsis nodes
383 # that will happen as a result of widening or narrowing a
347 # that will happen as a result of widening or narrowing a
384 # repo. All the changes that this block encounters are ellipsis
348 # repo. All the changes that this block encounters are ellipsis
385 # nodes or flags to kill an existing ellipsis.
349 # nodes or flags to kill an existing ellipsis.
386 chunksignal = changegroup.readexactly(inpart, 4)
350 chunksignal = changegroup.readexactly(inpart, 4)
387 while chunksignal != _DONESIGNAL:
351 while chunksignal != _DONESIGNAL:
388 if chunksignal == _KILLNODESIGNAL:
352 if chunksignal == _KILLNODESIGNAL:
389 # a node used to be an ellipsis but isn't anymore
353 # a node used to be an ellipsis but isn't anymore
390 ck = changegroup.readexactly(inpart, 20)
354 ck = changegroup.readexactly(inpart, 20)
391 if cl.hasnode(ck):
355 if cl.hasnode(ck):
392 clkills.add(ck)
356 clkills.add(ck)
393 else:
357 else:
394 raise error.Abort(
358 raise error.Abort(
395 _('unexpected changespec node chunk type: %s') % chunksignal)
359 _('unexpected changespec node chunk type: %s') % chunksignal)
396 chunksignal = changegroup.readexactly(inpart, 4)
360 chunksignal = changegroup.readexactly(inpart, 4)
397
361
398 if clkills:
362 if clkills:
399 # preserve bookmarks that repair.strip() would otherwise strip
363 # preserve bookmarks that repair.strip() would otherwise strip
400 bmstore = repo._bookmarks
364 bmstore = repo._bookmarks
401 class dummybmstore(dict):
365 class dummybmstore(dict):
402 def applychanges(self, repo, tr, changes):
366 def applychanges(self, repo, tr, changes):
403 pass
367 pass
404 def recordchange(self, tr): # legacy version
368 def recordchange(self, tr): # legacy version
405 pass
369 pass
406 repo._bookmarks = dummybmstore()
370 repo._bookmarks = dummybmstore()
407 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
371 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
408 topic='widen')
372 topic='widen')
409 repo._bookmarks = bmstore
373 repo._bookmarks = bmstore
410 if chgrpfile:
374 if chgrpfile:
411 op._widen_uninterr = repo.ui.uninterruptable()
375 op._widen_uninterr = repo.ui.uninterruptable()
412 op._widen_uninterr.__enter__()
376 op._widen_uninterr.__enter__()
413 # presence of _widen_bundle attribute activates widen handler later
377 # presence of _widen_bundle attribute activates widen handler later
414 op._widen_bundle = chgrpfile
378 op._widen_bundle = chgrpfile
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
379 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 # will currently always be there when using the core+narrowhg server, but
380 # will currently always be there when using the core+narrowhg server, but
417 # other servers may include a changespec part even when not widening (e.g.
381 # other servers may include a changespec part even when not widening (e.g.
418 # because we're deepening a shallow repo).
382 # because we're deepening a shallow repo).
419 if util.safehasattr(repo, 'setnewnarrowpats'):
383 if util.safehasattr(repo, 'setnewnarrowpats'):
420 repo.setnewnarrowpats()
384 repo.setnewnarrowpats()
421
385
422 def handlechangegroup_widen(op, inpart):
386 def handlechangegroup_widen(op, inpart):
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
387 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 # We saved a bundle with stripped node data we must now restore.
388 # We saved a bundle with stripped node data we must now restore.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
389 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 repo = op.repo
390 repo = op.repo
427 ui = op.ui
391 ui = op.ui
428
392
429 chgrpfile = op._widen_bundle
393 chgrpfile = op._widen_bundle
430 del op._widen_bundle
394 del op._widen_bundle
431 vfs = repo.vfs
395 vfs = repo.vfs
432
396
433 ui.note(_("adding branch\n"))
397 ui.note(_("adding branch\n"))
434 f = vfs.open(chgrpfile, "rb")
398 f = vfs.open(chgrpfile, "rb")
435 try:
399 try:
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
400 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 if not ui.verbose:
401 if not ui.verbose:
438 # silence internal shuffling chatter
402 # silence internal shuffling chatter
439 ui.pushbuffer()
403 ui.pushbuffer()
440 if isinstance(gen, bundle2.unbundle20):
404 if isinstance(gen, bundle2.unbundle20):
441 with repo.transaction('strip') as tr:
405 with repo.transaction('strip') as tr:
442 bundle2.processbundle(repo, gen, lambda: tr)
406 bundle2.processbundle(repo, gen, lambda: tr)
443 else:
407 else:
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
408 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 if not ui.verbose:
409 if not ui.verbose:
446 ui.popbuffer()
410 ui.popbuffer()
447 finally:
411 finally:
448 f.close()
412 f.close()
449
413
450 # remove undo files
414 # remove undo files
451 for undovfs, undofile in repo.undofiles():
415 for undovfs, undofile in repo.undofiles():
452 try:
416 try:
453 undovfs.unlink(undofile)
417 undovfs.unlink(undofile)
454 except OSError as e:
418 except OSError as e:
455 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
456 ui.warn(_('error removing %s: %s\n') %
420 ui.warn(_('error removing %s: %s\n') %
457 (undovfs.join(undofile), stringutil.forcebytestr(e)))
421 (undovfs.join(undofile), stringutil.forcebytestr(e)))
458
422
459 # Remove partial backup only if there were no exceptions
423 # Remove partial backup only if there were no exceptions
460 op._widen_uninterr.__exit__(None, None, None)
424 op._widen_uninterr.__exit__(None, None, None)
461 vfs.unlink(chgrpfile)
425 vfs.unlink(chgrpfile)
462
426
463 def setup():
427 def setup():
464 """Enable narrow repo support in bundle2-related extension points."""
428 """Enable narrow repo support in bundle2-related extension points."""
465 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
429 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
466
430
467 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
431 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
468
432
469 getbundleargs['narrow'] = 'boolean'
433 getbundleargs['narrow'] = 'boolean'
470 getbundleargs['depth'] = 'plain'
434 getbundleargs['depth'] = 'plain'
471 getbundleargs['oldincludepats'] = 'csv'
435 getbundleargs['oldincludepats'] = 'csv'
472 getbundleargs['oldexcludepats'] = 'csv'
436 getbundleargs['oldexcludepats'] = 'csv'
473 getbundleargs['includepats'] = 'csv'
437 getbundleargs['includepats'] = 'csv'
474 getbundleargs['excludepats'] = 'csv'
438 getbundleargs['excludepats'] = 'csv'
475 getbundleargs['known'] = 'csv'
439 getbundleargs['known'] = 'csv'
476
440
477 # Extend changegroup serving to handle requests from narrow clients.
441 # Extend changegroup serving to handle requests from narrow clients.
478 origcgfn = exchange.getbundle2partsmapping['changegroup']
442 origcgfn = exchange.getbundle2partsmapping['changegroup']
479 def wrappedcgfn(*args, **kwargs):
443 def wrappedcgfn(*args, **kwargs):
480 repo = args[1]
444 repo = args[1]
481 if repo.ui.has_section(_NARROWACL_SECTION):
445 if repo.ui.has_section(_NARROWACL_SECTION):
482 getbundlechangegrouppart_narrow(
446 getbundlechangegrouppart_narrow(
483 *args, **applyacl_narrow(repo, kwargs))
447 *args, **exchange.applynarrowacl(repo, kwargs))
484 elif kwargs.get(r'narrow', False):
448 elif kwargs.get(r'narrow', False):
485 getbundlechangegrouppart_narrow(*args, **kwargs)
449 getbundlechangegrouppart_narrow(*args, **kwargs)
486 else:
450 else:
487 origcgfn(*args, **kwargs)
451 origcgfn(*args, **kwargs)
488 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
452 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
489
453
490 # Extend changegroup receiver so client can fixup after widen requests.
454 # Extend changegroup receiver so client can fixup after widen requests.
491 origcghandler = bundle2.parthandlermapping['changegroup']
455 origcghandler = bundle2.parthandlermapping['changegroup']
492 def wrappedcghandler(op, inpart):
456 def wrappedcghandler(op, inpart):
493 origcghandler(op, inpart)
457 origcghandler(op, inpart)
494 if util.safehasattr(op, '_widen_bundle'):
458 if util.safehasattr(op, '_widen_bundle'):
495 handlechangegroup_widen(op, inpart)
459 handlechangegroup_widen(op, inpart)
496 wrappedcghandler.params = origcghandler.params
460 wrappedcghandler.params = origcghandler.params
497 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
461 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2428 +1,2471 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from .thirdparty import (
19 from .thirdparty import (
20 attr,
20 attr,
21 )
21 )
22 from . import (
22 from . import (
23 bookmarks as bookmod,
23 bookmarks as bookmod,
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 discovery,
26 discovery,
27 error,
27 error,
28 lock as lockmod,
28 lock as lockmod,
29 logexchange,
29 logexchange,
30 narrowspec,
30 obsolete,
31 obsolete,
31 phases,
32 phases,
32 pushkey,
33 pushkey,
33 pycompat,
34 pycompat,
34 scmutil,
35 scmutil,
35 sslutil,
36 sslutil,
36 streamclone,
37 streamclone,
37 url as urlmod,
38 url as urlmod,
38 util,
39 util,
39 )
40 )
40 from .utils import (
41 from .utils import (
41 stringutil,
42 stringutil,
42 )
43 )
43
44
44 urlerr = util.urlerr
45 urlerr = util.urlerr
45 urlreq = util.urlreq
46 urlreq = util.urlreq
46
47
47 _NARROWACL_SECTION = 'narrowhgacl'
48 _NARROWACL_SECTION = 'narrowhgacl'
48
49
49 # Maps bundle version human names to changegroup versions.
50 # Maps bundle version human names to changegroup versions.
50 _bundlespeccgversions = {'v1': '01',
51 _bundlespeccgversions = {'v1': '01',
51 'v2': '02',
52 'v2': '02',
52 'packed1': 's1',
53 'packed1': 's1',
53 'bundle2': '02', #legacy
54 'bundle2': '02', #legacy
54 }
55 }
55
56
56 # Maps bundle version with content opts to choose which part to bundle
57 # Maps bundle version with content opts to choose which part to bundle
57 _bundlespeccontentopts = {
58 _bundlespeccontentopts = {
58 'v1': {
59 'v1': {
59 'changegroup': True,
60 'changegroup': True,
60 'cg.version': '01',
61 'cg.version': '01',
61 'obsolescence': False,
62 'obsolescence': False,
62 'phases': False,
63 'phases': False,
63 'tagsfnodescache': False,
64 'tagsfnodescache': False,
64 'revbranchcache': False
65 'revbranchcache': False
65 },
66 },
66 'v2': {
67 'v2': {
67 'changegroup': True,
68 'changegroup': True,
68 'cg.version': '02',
69 'cg.version': '02',
69 'obsolescence': False,
70 'obsolescence': False,
70 'phases': False,
71 'phases': False,
71 'tagsfnodescache': True,
72 'tagsfnodescache': True,
72 'revbranchcache': True
73 'revbranchcache': True
73 },
74 },
74 'packed1' : {
75 'packed1' : {
75 'cg.version': 's1'
76 'cg.version': 's1'
76 }
77 }
77 }
78 }
78 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
79 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
79
80
80 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
81 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
81 "tagsfnodescache": False,
82 "tagsfnodescache": False,
82 "revbranchcache": False}}
83 "revbranchcache": False}}
83
84
84 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
85 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
85 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
86 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
86
87
87 @attr.s
88 @attr.s
88 class bundlespec(object):
89 class bundlespec(object):
89 compression = attr.ib()
90 compression = attr.ib()
90 wirecompression = attr.ib()
91 wirecompression = attr.ib()
91 version = attr.ib()
92 version = attr.ib()
92 wireversion = attr.ib()
93 wireversion = attr.ib()
93 params = attr.ib()
94 params = attr.ib()
94 contentopts = attr.ib()
95 contentopts = attr.ib()
95
96
96 def parsebundlespec(repo, spec, strict=True):
97 def parsebundlespec(repo, spec, strict=True):
97 """Parse a bundle string specification into parts.
98 """Parse a bundle string specification into parts.
98
99
99 Bundle specifications denote a well-defined bundle/exchange format.
100 Bundle specifications denote a well-defined bundle/exchange format.
100 The content of a given specification should not change over time in
101 The content of a given specification should not change over time in
101 order to ensure that bundles produced by a newer version of Mercurial are
102 order to ensure that bundles produced by a newer version of Mercurial are
102 readable from an older version.
103 readable from an older version.
103
104
104 The string currently has the form:
105 The string currently has the form:
105
106
106 <compression>-<type>[;<parameter0>[;<parameter1>]]
107 <compression>-<type>[;<parameter0>[;<parameter1>]]
107
108
108 Where <compression> is one of the supported compression formats
109 Where <compression> is one of the supported compression formats
109 and <type> is (currently) a version string. A ";" can follow the type and
110 and <type> is (currently) a version string. A ";" can follow the type and
110 all text afterwards is interpreted as URI encoded, ";" delimited key=value
111 all text afterwards is interpreted as URI encoded, ";" delimited key=value
111 pairs.
112 pairs.
112
113
113 If ``strict`` is True (the default) <compression> is required. Otherwise,
114 If ``strict`` is True (the default) <compression> is required. Otherwise,
114 it is optional.
115 it is optional.
115
116
116 Returns a bundlespec object of (compression, version, parameters).
117 Returns a bundlespec object of (compression, version, parameters).
117 Compression will be ``None`` if not in strict mode and a compression isn't
118 Compression will be ``None`` if not in strict mode and a compression isn't
118 defined.
119 defined.
119
120
120 An ``InvalidBundleSpecification`` is raised when the specification is
121 An ``InvalidBundleSpecification`` is raised when the specification is
121 not syntactically well formed.
122 not syntactically well formed.
122
123
123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 bundle type/version is not recognized.
125 bundle type/version is not recognized.
125
126
126 Note: this function will likely eventually return a more complex data
127 Note: this function will likely eventually return a more complex data
127 structure, including bundle2 part information.
128 structure, including bundle2 part information.
128 """
129 """
129 def parseparams(s):
130 def parseparams(s):
130 if ';' not in s:
131 if ';' not in s:
131 return s, {}
132 return s, {}
132
133
133 params = {}
134 params = {}
134 version, paramstr = s.split(';', 1)
135 version, paramstr = s.split(';', 1)
135
136
136 for p in paramstr.split(';'):
137 for p in paramstr.split(';'):
137 if '=' not in p:
138 if '=' not in p:
138 raise error.InvalidBundleSpecification(
139 raise error.InvalidBundleSpecification(
139 _('invalid bundle specification: '
140 _('invalid bundle specification: '
140 'missing "=" in parameter: %s') % p)
141 'missing "=" in parameter: %s') % p)
141
142
142 key, value = p.split('=', 1)
143 key, value = p.split('=', 1)
143 key = urlreq.unquote(key)
144 key = urlreq.unquote(key)
144 value = urlreq.unquote(value)
145 value = urlreq.unquote(value)
145 params[key] = value
146 params[key] = value
146
147
147 return version, params
148 return version, params
148
149
149
150
150 if strict and '-' not in spec:
151 if strict and '-' not in spec:
151 raise error.InvalidBundleSpecification(
152 raise error.InvalidBundleSpecification(
152 _('invalid bundle specification; '
153 _('invalid bundle specification; '
153 'must be prefixed with compression: %s') % spec)
154 'must be prefixed with compression: %s') % spec)
154
155
155 if '-' in spec:
156 if '-' in spec:
156 compression, version = spec.split('-', 1)
157 compression, version = spec.split('-', 1)
157
158
158 if compression not in util.compengines.supportedbundlenames:
159 if compression not in util.compengines.supportedbundlenames:
159 raise error.UnsupportedBundleSpecification(
160 raise error.UnsupportedBundleSpecification(
160 _('%s compression is not supported') % compression)
161 _('%s compression is not supported') % compression)
161
162
162 version, params = parseparams(version)
163 version, params = parseparams(version)
163
164
164 if version not in _bundlespeccgversions:
165 if version not in _bundlespeccgversions:
165 raise error.UnsupportedBundleSpecification(
166 raise error.UnsupportedBundleSpecification(
166 _('%s is not a recognized bundle version') % version)
167 _('%s is not a recognized bundle version') % version)
167 else:
168 else:
168 # Value could be just the compression or just the version, in which
169 # Value could be just the compression or just the version, in which
169 # case some defaults are assumed (but only when not in strict mode).
170 # case some defaults are assumed (but only when not in strict mode).
170 assert not strict
171 assert not strict
171
172
172 spec, params = parseparams(spec)
173 spec, params = parseparams(spec)
173
174
174 if spec in util.compengines.supportedbundlenames:
175 if spec in util.compengines.supportedbundlenames:
175 compression = spec
176 compression = spec
176 version = 'v1'
177 version = 'v1'
177 # Generaldelta repos require v2.
178 # Generaldelta repos require v2.
178 if 'generaldelta' in repo.requirements:
179 if 'generaldelta' in repo.requirements:
179 version = 'v2'
180 version = 'v2'
180 # Modern compression engines require v2.
181 # Modern compression engines require v2.
181 if compression not in _bundlespecv1compengines:
182 if compression not in _bundlespecv1compengines:
182 version = 'v2'
183 version = 'v2'
183 elif spec in _bundlespeccgversions:
184 elif spec in _bundlespeccgversions:
184 if spec == 'packed1':
185 if spec == 'packed1':
185 compression = 'none'
186 compression = 'none'
186 else:
187 else:
187 compression = 'bzip2'
188 compression = 'bzip2'
188 version = spec
189 version = spec
189 else:
190 else:
190 raise error.UnsupportedBundleSpecification(
191 raise error.UnsupportedBundleSpecification(
191 _('%s is not a recognized bundle specification') % spec)
192 _('%s is not a recognized bundle specification') % spec)
192
193
193 # Bundle version 1 only supports a known set of compression engines.
194 # Bundle version 1 only supports a known set of compression engines.
194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 if version == 'v1' and compression not in _bundlespecv1compengines:
195 raise error.UnsupportedBundleSpecification(
196 raise error.UnsupportedBundleSpecification(
196 _('compression engine %s is not supported on v1 bundles') %
197 _('compression engine %s is not supported on v1 bundles') %
197 compression)
198 compression)
198
199
199 # The specification for packed1 can optionally declare the data formats
200 # The specification for packed1 can optionally declare the data formats
200 # required to apply it. If we see this metadata, compare against what the
201 # required to apply it. If we see this metadata, compare against what the
201 # repo supports and error if the bundle isn't compatible.
202 # repo supports and error if the bundle isn't compatible.
202 if version == 'packed1' and 'requirements' in params:
203 if version == 'packed1' and 'requirements' in params:
203 requirements = set(params['requirements'].split(','))
204 requirements = set(params['requirements'].split(','))
204 missingreqs = requirements - repo.supportedformats
205 missingreqs = requirements - repo.supportedformats
205 if missingreqs:
206 if missingreqs:
206 raise error.UnsupportedBundleSpecification(
207 raise error.UnsupportedBundleSpecification(
207 _('missing support for repository features: %s') %
208 _('missing support for repository features: %s') %
208 ', '.join(sorted(missingreqs)))
209 ', '.join(sorted(missingreqs)))
209
210
210 # Compute contentopts based on the version
211 # Compute contentopts based on the version
211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212
213
213 # Process the variants
214 # Process the variants
214 if "stream" in params and params["stream"] == "v2":
215 if "stream" in params and params["stream"] == "v2":
215 variant = _bundlespecvariants["streamv2"]
216 variant = _bundlespecvariants["streamv2"]
216 contentopts.update(variant)
217 contentopts.update(variant)
217
218
218 engine = util.compengines.forbundlename(compression)
219 engine = util.compengines.forbundlename(compression)
219 compression, wirecompression = engine.bundletype()
220 compression, wirecompression = engine.bundletype()
220 wireversion = _bundlespeccgversions[version]
221 wireversion = _bundlespeccgversions[version]
221
222
222 return bundlespec(compression, wirecompression, version, wireversion,
223 return bundlespec(compression, wirecompression, version, wireversion,
223 params, contentopts)
224 params, contentopts)
224
225
225 def readbundle(ui, fh, fname, vfs=None):
226 def readbundle(ui, fh, fname, vfs=None):
226 header = changegroup.readexactly(fh, 4)
227 header = changegroup.readexactly(fh, 4)
227
228
228 alg = None
229 alg = None
229 if not fname:
230 if not fname:
230 fname = "stream"
231 fname = "stream"
231 if not header.startswith('HG') and header.startswith('\0'):
232 if not header.startswith('HG') and header.startswith('\0'):
232 fh = changegroup.headerlessfixup(fh, header)
233 fh = changegroup.headerlessfixup(fh, header)
233 header = "HG10"
234 header = "HG10"
234 alg = 'UN'
235 alg = 'UN'
235 elif vfs:
236 elif vfs:
236 fname = vfs.join(fname)
237 fname = vfs.join(fname)
237
238
238 magic, version = header[0:2], header[2:4]
239 magic, version = header[0:2], header[2:4]
239
240
240 if magic != 'HG':
241 if magic != 'HG':
241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 if version == '10':
243 if version == '10':
243 if alg is None:
244 if alg is None:
244 alg = changegroup.readexactly(fh, 2)
245 alg = changegroup.readexactly(fh, 2)
245 return changegroup.cg1unpacker(fh, alg)
246 return changegroup.cg1unpacker(fh, alg)
246 elif version.startswith('2'):
247 elif version.startswith('2'):
247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 elif version == 'S1':
249 elif version == 'S1':
249 return streamclone.streamcloneapplier(fh)
250 return streamclone.streamcloneapplier(fh)
250 else:
251 else:
251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252
253
253 def getbundlespec(ui, fh):
254 def getbundlespec(ui, fh):
254 """Infer the bundlespec from a bundle file handle.
255 """Infer the bundlespec from a bundle file handle.
255
256
256 The input file handle is seeked and the original seek position is not
257 The input file handle is seeked and the original seek position is not
257 restored.
258 restored.
258 """
259 """
259 def speccompression(alg):
260 def speccompression(alg):
260 try:
261 try:
261 return util.compengines.forbundletype(alg).bundletype()[0]
262 return util.compengines.forbundletype(alg).bundletype()[0]
262 except KeyError:
263 except KeyError:
263 return None
264 return None
264
265
265 b = readbundle(ui, fh, None)
266 b = readbundle(ui, fh, None)
266 if isinstance(b, changegroup.cg1unpacker):
267 if isinstance(b, changegroup.cg1unpacker):
267 alg = b._type
268 alg = b._type
268 if alg == '_truncatedBZ':
269 if alg == '_truncatedBZ':
269 alg = 'BZ'
270 alg = 'BZ'
270 comp = speccompression(alg)
271 comp = speccompression(alg)
271 if not comp:
272 if not comp:
272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 return '%s-v1' % comp
274 return '%s-v1' % comp
274 elif isinstance(b, bundle2.unbundle20):
275 elif isinstance(b, bundle2.unbundle20):
275 if 'Compression' in b.params:
276 if 'Compression' in b.params:
276 comp = speccompression(b.params['Compression'])
277 comp = speccompression(b.params['Compression'])
277 if not comp:
278 if not comp:
278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 else:
280 else:
280 comp = 'none'
281 comp = 'none'
281
282
282 version = None
283 version = None
283 for part in b.iterparts():
284 for part in b.iterparts():
284 if part.type == 'changegroup':
285 if part.type == 'changegroup':
285 version = part.params['version']
286 version = part.params['version']
286 if version in ('01', '02'):
287 if version in ('01', '02'):
287 version = 'v2'
288 version = 'v2'
288 else:
289 else:
289 raise error.Abort(_('changegroup version %s does not have '
290 raise error.Abort(_('changegroup version %s does not have '
290 'a known bundlespec') % version,
291 'a known bundlespec') % version,
291 hint=_('try upgrading your Mercurial '
292 hint=_('try upgrading your Mercurial '
292 'client'))
293 'client'))
293 elif part.type == 'stream2' and version is None:
294 elif part.type == 'stream2' and version is None:
294 # A stream2 part requires to be part of a v2 bundle
295 # A stream2 part requires to be part of a v2 bundle
295 version = "v2"
296 version = "v2"
296 requirements = urlreq.unquote(part.params['requirements'])
297 requirements = urlreq.unquote(part.params['requirements'])
297 splitted = requirements.split()
298 splitted = requirements.split()
298 params = bundle2._formatrequirementsparams(splitted)
299 params = bundle2._formatrequirementsparams(splitted)
299 return 'none-v2;stream=v2;%s' % params
300 return 'none-v2;stream=v2;%s' % params
300
301
301 if not version:
302 if not version:
302 raise error.Abort(_('could not identify changegroup version in '
303 raise error.Abort(_('could not identify changegroup version in '
303 'bundle'))
304 'bundle'))
304
305
305 return '%s-%s' % (comp, version)
306 return '%s-%s' % (comp, version)
306 elif isinstance(b, streamclone.streamcloneapplier):
307 elif isinstance(b, streamclone.streamcloneapplier):
307 requirements = streamclone.readbundle1header(fh)[2]
308 requirements = streamclone.readbundle1header(fh)[2]
308 formatted = bundle2._formatrequirementsparams(requirements)
309 formatted = bundle2._formatrequirementsparams(requirements)
309 return 'none-packed1;%s' % formatted
310 return 'none-packed1;%s' % formatted
310 else:
311 else:
311 raise error.Abort(_('unknown bundle type: %s') % b)
312 raise error.Abort(_('unknown bundle type: %s') % b)
312
313
313 def _computeoutgoing(repo, heads, common):
314 def _computeoutgoing(repo, heads, common):
314 """Computes which revs are outgoing given a set of common
315 """Computes which revs are outgoing given a set of common
315 and a set of heads.
316 and a set of heads.
316
317
317 This is a separate function so extensions can have access to
318 This is a separate function so extensions can have access to
318 the logic.
319 the logic.
319
320
320 Returns a discovery.outgoing object.
321 Returns a discovery.outgoing object.
321 """
322 """
322 cl = repo.changelog
323 cl = repo.changelog
323 if common:
324 if common:
324 hasnode = cl.hasnode
325 hasnode = cl.hasnode
325 common = [n for n in common if hasnode(n)]
326 common = [n for n in common if hasnode(n)]
326 else:
327 else:
327 common = [nullid]
328 common = [nullid]
328 if not heads:
329 if not heads:
329 heads = cl.heads()
330 heads = cl.heads()
330 return discovery.outgoing(repo, common, heads)
331 return discovery.outgoing(repo, common, heads)
331
332
332 def _forcebundle1(op):
333 def _forcebundle1(op):
333 """return true if a pull/push must use bundle1
334 """return true if a pull/push must use bundle1
334
335
335 This function is used to allow testing of the older bundle version"""
336 This function is used to allow testing of the older bundle version"""
336 ui = op.repo.ui
337 ui = op.repo.ui
337 # The goal is this config is to allow developer to choose the bundle
338 # The goal is this config is to allow developer to choose the bundle
338 # version used during exchanged. This is especially handy during test.
339 # version used during exchanged. This is especially handy during test.
339 # Value is a list of bundle version to be picked from, highest version
340 # Value is a list of bundle version to be picked from, highest version
340 # should be used.
341 # should be used.
341 #
342 #
342 # developer config: devel.legacy.exchange
343 # developer config: devel.legacy.exchange
343 exchange = ui.configlist('devel', 'legacy.exchange')
344 exchange = ui.configlist('devel', 'legacy.exchange')
344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 return forcebundle1 or not op.remote.capable('bundle2')
346 return forcebundle1 or not op.remote.capable('bundle2')
346
347
347 class pushoperation(object):
348 class pushoperation(object):
348 """A object that represent a single push operation
349 """A object that represent a single push operation
349
350
350 Its purpose is to carry push related state and very common operations.
351 Its purpose is to carry push related state and very common operations.
351
352
352 A new pushoperation should be created at the beginning of each push and
353 A new pushoperation should be created at the beginning of each push and
353 discarded afterward.
354 discarded afterward.
354 """
355 """
355
356
356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 bookmarks=(), pushvars=None):
358 bookmarks=(), pushvars=None):
358 # repo we push from
359 # repo we push from
359 self.repo = repo
360 self.repo = repo
360 self.ui = repo.ui
361 self.ui = repo.ui
361 # repo we push to
362 # repo we push to
362 self.remote = remote
363 self.remote = remote
363 # force option provided
364 # force option provided
364 self.force = force
365 self.force = force
365 # revs to be pushed (None is "all")
366 # revs to be pushed (None is "all")
366 self.revs = revs
367 self.revs = revs
367 # bookmark explicitly pushed
368 # bookmark explicitly pushed
368 self.bookmarks = bookmarks
369 self.bookmarks = bookmarks
369 # allow push of new branch
370 # allow push of new branch
370 self.newbranch = newbranch
371 self.newbranch = newbranch
371 # step already performed
372 # step already performed
372 # (used to check what steps have been already performed through bundle2)
373 # (used to check what steps have been already performed through bundle2)
373 self.stepsdone = set()
374 self.stepsdone = set()
374 # Integer version of the changegroup push result
375 # Integer version of the changegroup push result
375 # - None means nothing to push
376 # - None means nothing to push
376 # - 0 means HTTP error
377 # - 0 means HTTP error
377 # - 1 means we pushed and remote head count is unchanged *or*
378 # - 1 means we pushed and remote head count is unchanged *or*
378 # we have outgoing changesets but refused to push
379 # we have outgoing changesets but refused to push
379 # - other values as described by addchangegroup()
380 # - other values as described by addchangegroup()
380 self.cgresult = None
381 self.cgresult = None
381 # Boolean value for the bookmark push
382 # Boolean value for the bookmark push
382 self.bkresult = None
383 self.bkresult = None
383 # discover.outgoing object (contains common and outgoing data)
384 # discover.outgoing object (contains common and outgoing data)
384 self.outgoing = None
385 self.outgoing = None
385 # all remote topological heads before the push
386 # all remote topological heads before the push
386 self.remoteheads = None
387 self.remoteheads = None
387 # Details of the remote branch pre and post push
388 # Details of the remote branch pre and post push
388 #
389 #
389 # mapping: {'branch': ([remoteheads],
390 # mapping: {'branch': ([remoteheads],
390 # [newheads],
391 # [newheads],
391 # [unsyncedheads],
392 # [unsyncedheads],
392 # [discardedheads])}
393 # [discardedheads])}
393 # - branch: the branch name
394 # - branch: the branch name
394 # - remoteheads: the list of remote heads known locally
395 # - remoteheads: the list of remote heads known locally
395 # None if the branch is new
396 # None if the branch is new
396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 # - newheads: the new remote heads (known locally) with outgoing pushed
397 # - unsyncedheads: the list of remote heads unknown locally.
398 # - unsyncedheads: the list of remote heads unknown locally.
398 # - discardedheads: the list of remote heads made obsolete by the push
399 # - discardedheads: the list of remote heads made obsolete by the push
399 self.pushbranchmap = None
400 self.pushbranchmap = None
400 # testable as a boolean indicating if any nodes are missing locally.
401 # testable as a boolean indicating if any nodes are missing locally.
401 self.incoming = None
402 self.incoming = None
402 # summary of the remote phase situation
403 # summary of the remote phase situation
403 self.remotephases = None
404 self.remotephases = None
404 # phases changes that must be pushed along side the changesets
405 # phases changes that must be pushed along side the changesets
405 self.outdatedphases = None
406 self.outdatedphases = None
406 # phases changes that must be pushed if changeset push fails
407 # phases changes that must be pushed if changeset push fails
407 self.fallbackoutdatedphases = None
408 self.fallbackoutdatedphases = None
408 # outgoing obsmarkers
409 # outgoing obsmarkers
409 self.outobsmarkers = set()
410 self.outobsmarkers = set()
410 # outgoing bookmarks
411 # outgoing bookmarks
411 self.outbookmarks = []
412 self.outbookmarks = []
412 # transaction manager
413 # transaction manager
413 self.trmanager = None
414 self.trmanager = None
414 # map { pushkey partid -> callback handling failure}
415 # map { pushkey partid -> callback handling failure}
415 # used to handle exception from mandatory pushkey part failure
416 # used to handle exception from mandatory pushkey part failure
416 self.pkfailcb = {}
417 self.pkfailcb = {}
417 # an iterable of pushvars or None
418 # an iterable of pushvars or None
418 self.pushvars = pushvars
419 self.pushvars = pushvars
419
420
420 @util.propertycache
421 @util.propertycache
421 def futureheads(self):
422 def futureheads(self):
422 """future remote heads if the changeset push succeeds"""
423 """future remote heads if the changeset push succeeds"""
423 return self.outgoing.missingheads
424 return self.outgoing.missingheads
424
425
425 @util.propertycache
426 @util.propertycache
426 def fallbackheads(self):
427 def fallbackheads(self):
427 """future remote heads if the changeset push fails"""
428 """future remote heads if the changeset push fails"""
428 if self.revs is None:
429 if self.revs is None:
429 # not target to push, all common are relevant
430 # not target to push, all common are relevant
430 return self.outgoing.commonheads
431 return self.outgoing.commonheads
431 unfi = self.repo.unfiltered()
432 unfi = self.repo.unfiltered()
432 # I want cheads = heads(::missingheads and ::commonheads)
433 # I want cheads = heads(::missingheads and ::commonheads)
433 # (missingheads is revs with secret changeset filtered out)
434 # (missingheads is revs with secret changeset filtered out)
434 #
435 #
435 # This can be expressed as:
436 # This can be expressed as:
436 # cheads = ( (missingheads and ::commonheads)
437 # cheads = ( (missingheads and ::commonheads)
437 # + (commonheads and ::missingheads))"
438 # + (commonheads and ::missingheads))"
438 # )
439 # )
439 #
440 #
440 # while trying to push we already computed the following:
441 # while trying to push we already computed the following:
441 # common = (::commonheads)
442 # common = (::commonheads)
442 # missing = ((commonheads::missingheads) - commonheads)
443 # missing = ((commonheads::missingheads) - commonheads)
443 #
444 #
444 # We can pick:
445 # We can pick:
445 # * missingheads part of common (::commonheads)
446 # * missingheads part of common (::commonheads)
446 common = self.outgoing.common
447 common = self.outgoing.common
447 nm = self.repo.changelog.nodemap
448 nm = self.repo.changelog.nodemap
448 cheads = [node for node in self.revs if nm[node] in common]
449 cheads = [node for node in self.revs if nm[node] in common]
449 # and
450 # and
450 # * commonheads parents on missing
451 # * commonheads parents on missing
451 revset = unfi.set('%ln and parents(roots(%ln))',
452 revset = unfi.set('%ln and parents(roots(%ln))',
452 self.outgoing.commonheads,
453 self.outgoing.commonheads,
453 self.outgoing.missing)
454 self.outgoing.missing)
454 cheads.extend(c.node() for c in revset)
455 cheads.extend(c.node() for c in revset)
455 return cheads
456 return cheads
456
457
457 @property
458 @property
458 def commonheads(self):
459 def commonheads(self):
459 """set of all common heads after changeset bundle push"""
460 """set of all common heads after changeset bundle push"""
460 if self.cgresult:
461 if self.cgresult:
461 return self.futureheads
462 return self.futureheads
462 else:
463 else:
463 return self.fallbackheads
464 return self.fallbackheads
464
465
465 # mapping of message used when pushing bookmark
466 # mapping of message used when pushing bookmark
466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 _('updating bookmark %s failed!\n')),
468 _('updating bookmark %s failed!\n')),
468 'export': (_("exporting bookmark %s\n"),
469 'export': (_("exporting bookmark %s\n"),
469 _('exporting bookmark %s failed!\n')),
470 _('exporting bookmark %s failed!\n')),
470 'delete': (_("deleting remote bookmark %s\n"),
471 'delete': (_("deleting remote bookmark %s\n"),
471 _('deleting remote bookmark %s failed!\n')),
472 _('deleting remote bookmark %s failed!\n')),
472 }
473 }
473
474
474
475
475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 opargs=None):
477 opargs=None):
477 '''Push outgoing changesets (limited by revs) from a local
478 '''Push outgoing changesets (limited by revs) from a local
478 repository to remote. Return an integer:
479 repository to remote. Return an integer:
479 - None means nothing to push
480 - None means nothing to push
480 - 0 means HTTP error
481 - 0 means HTTP error
481 - 1 means we pushed and remote head count is unchanged *or*
482 - 1 means we pushed and remote head count is unchanged *or*
482 we have outgoing changesets but refused to push
483 we have outgoing changesets but refused to push
483 - other values as described by addchangegroup()
484 - other values as described by addchangegroup()
484 '''
485 '''
485 if opargs is None:
486 if opargs is None:
486 opargs = {}
487 opargs = {}
487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 **pycompat.strkwargs(opargs))
489 **pycompat.strkwargs(opargs))
489 if pushop.remote.local():
490 if pushop.remote.local():
490 missing = (set(pushop.repo.requirements)
491 missing = (set(pushop.repo.requirements)
491 - pushop.remote.local().supported)
492 - pushop.remote.local().supported)
492 if missing:
493 if missing:
493 msg = _("required features are not"
494 msg = _("required features are not"
494 " supported in the destination:"
495 " supported in the destination:"
495 " %s") % (', '.join(sorted(missing)))
496 " %s") % (', '.join(sorted(missing)))
496 raise error.Abort(msg)
497 raise error.Abort(msg)
497
498
498 if not pushop.remote.canpush():
499 if not pushop.remote.canpush():
499 raise error.Abort(_("destination does not support push"))
500 raise error.Abort(_("destination does not support push"))
500
501
501 if not pushop.remote.capable('unbundle'):
502 if not pushop.remote.capable('unbundle'):
502 raise error.Abort(_('cannot push: destination does not support the '
503 raise error.Abort(_('cannot push: destination does not support the '
503 'unbundle wire protocol command'))
504 'unbundle wire protocol command'))
504
505
505 # get lock as we might write phase data
506 # get lock as we might write phase data
506 wlock = lock = None
507 wlock = lock = None
507 try:
508 try:
508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 # bundle2 push may receive a reply bundle touching bookmarks or other
509 # things requiring the wlock. Take it now to ensure proper ordering.
510 # things requiring the wlock. Take it now to ensure proper ordering.
510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 if (not _forcebundle1(pushop)) and maypushback:
512 if (not _forcebundle1(pushop)) and maypushback:
512 wlock = pushop.repo.wlock()
513 wlock = pushop.repo.wlock()
513 lock = pushop.repo.lock()
514 lock = pushop.repo.lock()
514 pushop.trmanager = transactionmanager(pushop.repo,
515 pushop.trmanager = transactionmanager(pushop.repo,
515 'push-response',
516 'push-response',
516 pushop.remote.url())
517 pushop.remote.url())
517 except error.LockUnavailable as err:
518 except error.LockUnavailable as err:
518 # source repo cannot be locked.
519 # source repo cannot be locked.
519 # We do not abort the push, but just disable the local phase
520 # We do not abort the push, but just disable the local phase
520 # synchronisation.
521 # synchronisation.
521 msg = 'cannot lock source repository: %s\n' % err
522 msg = 'cannot lock source repository: %s\n' % err
522 pushop.ui.debug(msg)
523 pushop.ui.debug(msg)
523
524
524 with wlock or util.nullcontextmanager(), \
525 with wlock or util.nullcontextmanager(), \
525 lock or util.nullcontextmanager(), \
526 lock or util.nullcontextmanager(), \
526 pushop.trmanager or util.nullcontextmanager():
527 pushop.trmanager or util.nullcontextmanager():
527 pushop.repo.checkpush(pushop)
528 pushop.repo.checkpush(pushop)
528 _pushdiscovery(pushop)
529 _pushdiscovery(pushop)
529 if not _forcebundle1(pushop):
530 if not _forcebundle1(pushop):
530 _pushbundle2(pushop)
531 _pushbundle2(pushop)
531 _pushchangeset(pushop)
532 _pushchangeset(pushop)
532 _pushsyncphase(pushop)
533 _pushsyncphase(pushop)
533 _pushobsolete(pushop)
534 _pushobsolete(pushop)
534 _pushbookmark(pushop)
535 _pushbookmark(pushop)
535
536
536 if repo.ui.configbool('experimental', 'remotenames'):
537 if repo.ui.configbool('experimental', 'remotenames'):
537 logexchange.pullremotenames(repo, remote)
538 logexchange.pullremotenames(repo, remote)
538
539
539 return pushop
540 return pushop
540
541
541 # list of steps to perform discovery before push
542 # list of steps to perform discovery before push
542 pushdiscoveryorder = []
543 pushdiscoveryorder = []
543
544
544 # Mapping between step name and function
545 # Mapping between step name and function
545 #
546 #
546 # This exists to help extensions wrap steps if necessary
547 # This exists to help extensions wrap steps if necessary
547 pushdiscoverymapping = {}
548 pushdiscoverymapping = {}
548
549
549 def pushdiscovery(stepname):
550 def pushdiscovery(stepname):
550 """decorator for function performing discovery before push
551 """decorator for function performing discovery before push
551
552
552 The function is added to the step -> function mapping and appended to the
553 The function is added to the step -> function mapping and appended to the
553 list of steps. Beware that decorated function will be added in order (this
554 list of steps. Beware that decorated function will be added in order (this
554 may matter).
555 may matter).
555
556
556 You can only use this decorator for a new step, if you want to wrap a step
557 You can only use this decorator for a new step, if you want to wrap a step
557 from an extension, change the pushdiscovery dictionary directly."""
558 from an extension, change the pushdiscovery dictionary directly."""
558 def dec(func):
559 def dec(func):
559 assert stepname not in pushdiscoverymapping
560 assert stepname not in pushdiscoverymapping
560 pushdiscoverymapping[stepname] = func
561 pushdiscoverymapping[stepname] = func
561 pushdiscoveryorder.append(stepname)
562 pushdiscoveryorder.append(stepname)
562 return func
563 return func
563 return dec
564 return dec
564
565
565 def _pushdiscovery(pushop):
566 def _pushdiscovery(pushop):
566 """Run all discovery steps"""
567 """Run all discovery steps"""
567 for stepname in pushdiscoveryorder:
568 for stepname in pushdiscoveryorder:
568 step = pushdiscoverymapping[stepname]
569 step = pushdiscoverymapping[stepname]
569 step(pushop)
570 step(pushop)
570
571
571 @pushdiscovery('changeset')
572 @pushdiscovery('changeset')
572 def _pushdiscoverychangeset(pushop):
573 def _pushdiscoverychangeset(pushop):
573 """discover the changeset that need to be pushed"""
574 """discover the changeset that need to be pushed"""
574 fci = discovery.findcommonincoming
575 fci = discovery.findcommonincoming
575 if pushop.revs:
576 if pushop.revs:
576 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
577 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
577 ancestorsof=pushop.revs)
578 ancestorsof=pushop.revs)
578 else:
579 else:
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
580 common, inc, remoteheads = commoninc
581 common, inc, remoteheads = commoninc
581 fco = discovery.findcommonoutgoing
582 fco = discovery.findcommonoutgoing
582 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
583 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
583 commoninc=commoninc, force=pushop.force)
584 commoninc=commoninc, force=pushop.force)
584 pushop.outgoing = outgoing
585 pushop.outgoing = outgoing
585 pushop.remoteheads = remoteheads
586 pushop.remoteheads = remoteheads
586 pushop.incoming = inc
587 pushop.incoming = inc
587
588
588 @pushdiscovery('phase')
589 @pushdiscovery('phase')
589 def _pushdiscoveryphase(pushop):
590 def _pushdiscoveryphase(pushop):
590 """discover the phase that needs to be pushed
591 """discover the phase that needs to be pushed
591
592
592 (computed for both success and failure case for changesets push)"""
593 (computed for both success and failure case for changesets push)"""
593 outgoing = pushop.outgoing
594 outgoing = pushop.outgoing
594 unfi = pushop.repo.unfiltered()
595 unfi = pushop.repo.unfiltered()
595 remotephases = listkeys(pushop.remote, 'phases')
596 remotephases = listkeys(pushop.remote, 'phases')
596
597
597 if (pushop.ui.configbool('ui', '_usedassubrepo')
598 if (pushop.ui.configbool('ui', '_usedassubrepo')
598 and remotephases # server supports phases
599 and remotephases # server supports phases
599 and not pushop.outgoing.missing # no changesets to be pushed
600 and not pushop.outgoing.missing # no changesets to be pushed
600 and remotephases.get('publishing', False)):
601 and remotephases.get('publishing', False)):
601 # When:
602 # When:
602 # - this is a subrepo push
603 # - this is a subrepo push
603 # - and remote support phase
604 # - and remote support phase
604 # - and no changeset are to be pushed
605 # - and no changeset are to be pushed
605 # - and remote is publishing
606 # - and remote is publishing
606 # We may be in issue 3781 case!
607 # We may be in issue 3781 case!
607 # We drop the possible phase synchronisation done by
608 # We drop the possible phase synchronisation done by
608 # courtesy to publish changesets possibly locally draft
609 # courtesy to publish changesets possibly locally draft
609 # on the remote.
610 # on the remote.
610 pushop.outdatedphases = []
611 pushop.outdatedphases = []
611 pushop.fallbackoutdatedphases = []
612 pushop.fallbackoutdatedphases = []
612 return
613 return
613
614
614 pushop.remotephases = phases.remotephasessummary(pushop.repo,
615 pushop.remotephases = phases.remotephasessummary(pushop.repo,
615 pushop.fallbackheads,
616 pushop.fallbackheads,
616 remotephases)
617 remotephases)
617 droots = pushop.remotephases.draftroots
618 droots = pushop.remotephases.draftroots
618
619
619 extracond = ''
620 extracond = ''
620 if not pushop.remotephases.publishing:
621 if not pushop.remotephases.publishing:
621 extracond = ' and public()'
622 extracond = ' and public()'
622 revset = 'heads((%%ln::%%ln) %s)' % extracond
623 revset = 'heads((%%ln::%%ln) %s)' % extracond
623 # Get the list of all revs draft on remote by public here.
624 # Get the list of all revs draft on remote by public here.
624 # XXX Beware that revset break if droots is not strictly
625 # XXX Beware that revset break if droots is not strictly
625 # XXX root we may want to ensure it is but it is costly
626 # XXX root we may want to ensure it is but it is costly
626 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
627 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
627 if not outgoing.missing:
628 if not outgoing.missing:
628 future = fallback
629 future = fallback
629 else:
630 else:
630 # adds changeset we are going to push as draft
631 # adds changeset we are going to push as draft
631 #
632 #
632 # should not be necessary for publishing server, but because of an
633 # should not be necessary for publishing server, but because of an
633 # issue fixed in xxxxx we have to do it anyway.
634 # issue fixed in xxxxx we have to do it anyway.
634 fdroots = list(unfi.set('roots(%ln + %ln::)',
635 fdroots = list(unfi.set('roots(%ln + %ln::)',
635 outgoing.missing, droots))
636 outgoing.missing, droots))
636 fdroots = [f.node() for f in fdroots]
637 fdroots = [f.node() for f in fdroots]
637 future = list(unfi.set(revset, fdroots, pushop.futureheads))
638 future = list(unfi.set(revset, fdroots, pushop.futureheads))
638 pushop.outdatedphases = future
639 pushop.outdatedphases = future
639 pushop.fallbackoutdatedphases = fallback
640 pushop.fallbackoutdatedphases = fallback
640
641
641 @pushdiscovery('obsmarker')
642 @pushdiscovery('obsmarker')
642 def _pushdiscoveryobsmarkers(pushop):
643 def _pushdiscoveryobsmarkers(pushop):
643 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
644 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
644 return
645 return
645
646
646 if not pushop.repo.obsstore:
647 if not pushop.repo.obsstore:
647 return
648 return
648
649
649 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
650 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
650 return
651 return
651
652
652 repo = pushop.repo
653 repo = pushop.repo
653 # very naive computation, that can be quite expensive on big repo.
654 # very naive computation, that can be quite expensive on big repo.
654 # However: evolution is currently slow on them anyway.
655 # However: evolution is currently slow on them anyway.
655 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
656 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
656 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
657 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
657
658
658 @pushdiscovery('bookmarks')
659 @pushdiscovery('bookmarks')
659 def _pushdiscoverybookmarks(pushop):
660 def _pushdiscoverybookmarks(pushop):
660 ui = pushop.ui
661 ui = pushop.ui
661 repo = pushop.repo.unfiltered()
662 repo = pushop.repo.unfiltered()
662 remote = pushop.remote
663 remote = pushop.remote
663 ui.debug("checking for updated bookmarks\n")
664 ui.debug("checking for updated bookmarks\n")
664 ancestors = ()
665 ancestors = ()
665 if pushop.revs:
666 if pushop.revs:
666 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
667 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
667 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
668 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
668
669
669 remotebookmark = listkeys(remote, 'bookmarks')
670 remotebookmark = listkeys(remote, 'bookmarks')
670
671
671 explicit = set([repo._bookmarks.expandname(bookmark)
672 explicit = set([repo._bookmarks.expandname(bookmark)
672 for bookmark in pushop.bookmarks])
673 for bookmark in pushop.bookmarks])
673
674
674 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
675 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
675 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
676 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
676
677
677 def safehex(x):
678 def safehex(x):
678 if x is None:
679 if x is None:
679 return x
680 return x
680 return hex(x)
681 return hex(x)
681
682
682 def hexifycompbookmarks(bookmarks):
683 def hexifycompbookmarks(bookmarks):
683 return [(b, safehex(scid), safehex(dcid))
684 return [(b, safehex(scid), safehex(dcid))
684 for (b, scid, dcid) in bookmarks]
685 for (b, scid, dcid) in bookmarks]
685
686
686 comp = [hexifycompbookmarks(marks) for marks in comp]
687 comp = [hexifycompbookmarks(marks) for marks in comp]
687 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
688 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
688
689
689 def _processcompared(pushop, pushed, explicit, remotebms, comp):
690 def _processcompared(pushop, pushed, explicit, remotebms, comp):
690 """take decision on bookmark to pull from the remote bookmark
691 """take decision on bookmark to pull from the remote bookmark
691
692
692 Exist to help extensions who want to alter this behavior.
693 Exist to help extensions who want to alter this behavior.
693 """
694 """
694 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
695 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
695
696
696 repo = pushop.repo
697 repo = pushop.repo
697
698
698 for b, scid, dcid in advsrc:
699 for b, scid, dcid in advsrc:
699 if b in explicit:
700 if b in explicit:
700 explicit.remove(b)
701 explicit.remove(b)
701 if not pushed or repo[scid].rev() in pushed:
702 if not pushed or repo[scid].rev() in pushed:
702 pushop.outbookmarks.append((b, dcid, scid))
703 pushop.outbookmarks.append((b, dcid, scid))
703 # search added bookmark
704 # search added bookmark
704 for b, scid, dcid in addsrc:
705 for b, scid, dcid in addsrc:
705 if b in explicit:
706 if b in explicit:
706 explicit.remove(b)
707 explicit.remove(b)
707 pushop.outbookmarks.append((b, '', scid))
708 pushop.outbookmarks.append((b, '', scid))
708 # search for overwritten bookmark
709 # search for overwritten bookmark
709 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
710 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
710 if b in explicit:
711 if b in explicit:
711 explicit.remove(b)
712 explicit.remove(b)
712 pushop.outbookmarks.append((b, dcid, scid))
713 pushop.outbookmarks.append((b, dcid, scid))
713 # search for bookmark to delete
714 # search for bookmark to delete
714 for b, scid, dcid in adddst:
715 for b, scid, dcid in adddst:
715 if b in explicit:
716 if b in explicit:
716 explicit.remove(b)
717 explicit.remove(b)
717 # treat as "deleted locally"
718 # treat as "deleted locally"
718 pushop.outbookmarks.append((b, dcid, ''))
719 pushop.outbookmarks.append((b, dcid, ''))
719 # identical bookmarks shouldn't get reported
720 # identical bookmarks shouldn't get reported
720 for b, scid, dcid in same:
721 for b, scid, dcid in same:
721 if b in explicit:
722 if b in explicit:
722 explicit.remove(b)
723 explicit.remove(b)
723
724
724 if explicit:
725 if explicit:
725 explicit = sorted(explicit)
726 explicit = sorted(explicit)
726 # we should probably list all of them
727 # we should probably list all of them
727 pushop.ui.warn(_('bookmark %s does not exist on the local '
728 pushop.ui.warn(_('bookmark %s does not exist on the local '
728 'or remote repository!\n') % explicit[0])
729 'or remote repository!\n') % explicit[0])
729 pushop.bkresult = 2
730 pushop.bkresult = 2
730
731
731 pushop.outbookmarks.sort()
732 pushop.outbookmarks.sort()
732
733
733 def _pushcheckoutgoing(pushop):
734 def _pushcheckoutgoing(pushop):
734 outgoing = pushop.outgoing
735 outgoing = pushop.outgoing
735 unfi = pushop.repo.unfiltered()
736 unfi = pushop.repo.unfiltered()
736 if not outgoing.missing:
737 if not outgoing.missing:
737 # nothing to push
738 # nothing to push
738 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
739 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
739 return False
740 return False
740 # something to push
741 # something to push
741 if not pushop.force:
742 if not pushop.force:
742 # if repo.obsstore == False --> no obsolete
743 # if repo.obsstore == False --> no obsolete
743 # then, save the iteration
744 # then, save the iteration
744 if unfi.obsstore:
745 if unfi.obsstore:
745 # this message are here for 80 char limit reason
746 # this message are here for 80 char limit reason
746 mso = _("push includes obsolete changeset: %s!")
747 mso = _("push includes obsolete changeset: %s!")
747 mspd = _("push includes phase-divergent changeset: %s!")
748 mspd = _("push includes phase-divergent changeset: %s!")
748 mscd = _("push includes content-divergent changeset: %s!")
749 mscd = _("push includes content-divergent changeset: %s!")
749 mst = {"orphan": _("push includes orphan changeset: %s!"),
750 mst = {"orphan": _("push includes orphan changeset: %s!"),
750 "phase-divergent": mspd,
751 "phase-divergent": mspd,
751 "content-divergent": mscd}
752 "content-divergent": mscd}
752 # If we are to push if there is at least one
753 # If we are to push if there is at least one
753 # obsolete or unstable changeset in missing, at
754 # obsolete or unstable changeset in missing, at
754 # least one of the missinghead will be obsolete or
755 # least one of the missinghead will be obsolete or
755 # unstable. So checking heads only is ok
756 # unstable. So checking heads only is ok
756 for node in outgoing.missingheads:
757 for node in outgoing.missingheads:
757 ctx = unfi[node]
758 ctx = unfi[node]
758 if ctx.obsolete():
759 if ctx.obsolete():
759 raise error.Abort(mso % ctx)
760 raise error.Abort(mso % ctx)
760 elif ctx.isunstable():
761 elif ctx.isunstable():
761 # TODO print more than one instability in the abort
762 # TODO print more than one instability in the abort
762 # message
763 # message
763 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
764 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
764
765
765 discovery.checkheads(pushop)
766 discovery.checkheads(pushop)
766 return True
767 return True
767
768
768 # List of names of steps to perform for an outgoing bundle2, order matters.
769 # List of names of steps to perform for an outgoing bundle2, order matters.
769 b2partsgenorder = []
770 b2partsgenorder = []
770
771
771 # Mapping between step name and function
772 # Mapping between step name and function
772 #
773 #
773 # This exists to help extensions wrap steps if necessary
774 # This exists to help extensions wrap steps if necessary
774 b2partsgenmapping = {}
775 b2partsgenmapping = {}
775
776
776 def b2partsgenerator(stepname, idx=None):
777 def b2partsgenerator(stepname, idx=None):
777 """decorator for function generating bundle2 part
778 """decorator for function generating bundle2 part
778
779
779 The function is added to the step -> function mapping and appended to the
780 The function is added to the step -> function mapping and appended to the
780 list of steps. Beware that decorated functions will be added in order
781 list of steps. Beware that decorated functions will be added in order
781 (this may matter).
782 (this may matter).
782
783
783 You can only use this decorator for new steps, if you want to wrap a step
784 You can only use this decorator for new steps, if you want to wrap a step
784 from an extension, attack the b2partsgenmapping dictionary directly."""
785 from an extension, attack the b2partsgenmapping dictionary directly."""
785 def dec(func):
786 def dec(func):
786 assert stepname not in b2partsgenmapping
787 assert stepname not in b2partsgenmapping
787 b2partsgenmapping[stepname] = func
788 b2partsgenmapping[stepname] = func
788 if idx is None:
789 if idx is None:
789 b2partsgenorder.append(stepname)
790 b2partsgenorder.append(stepname)
790 else:
791 else:
791 b2partsgenorder.insert(idx, stepname)
792 b2partsgenorder.insert(idx, stepname)
792 return func
793 return func
793 return dec
794 return dec
794
795
795 def _pushb2ctxcheckheads(pushop, bundler):
796 def _pushb2ctxcheckheads(pushop, bundler):
796 """Generate race condition checking parts
797 """Generate race condition checking parts
797
798
798 Exists as an independent function to aid extensions
799 Exists as an independent function to aid extensions
799 """
800 """
800 # * 'force' do not check for push race,
801 # * 'force' do not check for push race,
801 # * if we don't push anything, there are nothing to check.
802 # * if we don't push anything, there are nothing to check.
802 if not pushop.force and pushop.outgoing.missingheads:
803 if not pushop.force and pushop.outgoing.missingheads:
803 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
804 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
804 emptyremote = pushop.pushbranchmap is None
805 emptyremote = pushop.pushbranchmap is None
805 if not allowunrelated or emptyremote:
806 if not allowunrelated or emptyremote:
806 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
807 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
807 else:
808 else:
808 affected = set()
809 affected = set()
809 for branch, heads in pushop.pushbranchmap.iteritems():
810 for branch, heads in pushop.pushbranchmap.iteritems():
810 remoteheads, newheads, unsyncedheads, discardedheads = heads
811 remoteheads, newheads, unsyncedheads, discardedheads = heads
811 if remoteheads is not None:
812 if remoteheads is not None:
812 remote = set(remoteheads)
813 remote = set(remoteheads)
813 affected |= set(discardedheads) & remote
814 affected |= set(discardedheads) & remote
814 affected |= remote - set(newheads)
815 affected |= remote - set(newheads)
815 if affected:
816 if affected:
816 data = iter(sorted(affected))
817 data = iter(sorted(affected))
817 bundler.newpart('check:updated-heads', data=data)
818 bundler.newpart('check:updated-heads', data=data)
818
819
819 def _pushing(pushop):
820 def _pushing(pushop):
820 """return True if we are pushing anything"""
821 """return True if we are pushing anything"""
821 return bool(pushop.outgoing.missing
822 return bool(pushop.outgoing.missing
822 or pushop.outdatedphases
823 or pushop.outdatedphases
823 or pushop.outobsmarkers
824 or pushop.outobsmarkers
824 or pushop.outbookmarks)
825 or pushop.outbookmarks)
825
826
826 @b2partsgenerator('check-bookmarks')
827 @b2partsgenerator('check-bookmarks')
827 def _pushb2checkbookmarks(pushop, bundler):
828 def _pushb2checkbookmarks(pushop, bundler):
828 """insert bookmark move checking"""
829 """insert bookmark move checking"""
829 if not _pushing(pushop) or pushop.force:
830 if not _pushing(pushop) or pushop.force:
830 return
831 return
831 b2caps = bundle2.bundle2caps(pushop.remote)
832 b2caps = bundle2.bundle2caps(pushop.remote)
832 hasbookmarkcheck = 'bookmarks' in b2caps
833 hasbookmarkcheck = 'bookmarks' in b2caps
833 if not (pushop.outbookmarks and hasbookmarkcheck):
834 if not (pushop.outbookmarks and hasbookmarkcheck):
834 return
835 return
835 data = []
836 data = []
836 for book, old, new in pushop.outbookmarks:
837 for book, old, new in pushop.outbookmarks:
837 old = bin(old)
838 old = bin(old)
838 data.append((book, old))
839 data.append((book, old))
839 checkdata = bookmod.binaryencode(data)
840 checkdata = bookmod.binaryencode(data)
840 bundler.newpart('check:bookmarks', data=checkdata)
841 bundler.newpart('check:bookmarks', data=checkdata)
841
842
842 @b2partsgenerator('check-phases')
843 @b2partsgenerator('check-phases')
843 def _pushb2checkphases(pushop, bundler):
844 def _pushb2checkphases(pushop, bundler):
844 """insert phase move checking"""
845 """insert phase move checking"""
845 if not _pushing(pushop) or pushop.force:
846 if not _pushing(pushop) or pushop.force:
846 return
847 return
847 b2caps = bundle2.bundle2caps(pushop.remote)
848 b2caps = bundle2.bundle2caps(pushop.remote)
848 hasphaseheads = 'heads' in b2caps.get('phases', ())
849 hasphaseheads = 'heads' in b2caps.get('phases', ())
849 if pushop.remotephases is not None and hasphaseheads:
850 if pushop.remotephases is not None and hasphaseheads:
850 # check that the remote phase has not changed
851 # check that the remote phase has not changed
851 checks = [[] for p in phases.allphases]
852 checks = [[] for p in phases.allphases]
852 checks[phases.public].extend(pushop.remotephases.publicheads)
853 checks[phases.public].extend(pushop.remotephases.publicheads)
853 checks[phases.draft].extend(pushop.remotephases.draftroots)
854 checks[phases.draft].extend(pushop.remotephases.draftroots)
854 if any(checks):
855 if any(checks):
855 for nodes in checks:
856 for nodes in checks:
856 nodes.sort()
857 nodes.sort()
857 checkdata = phases.binaryencode(checks)
858 checkdata = phases.binaryencode(checks)
858 bundler.newpart('check:phases', data=checkdata)
859 bundler.newpart('check:phases', data=checkdata)
859
860
860 @b2partsgenerator('changeset')
861 @b2partsgenerator('changeset')
861 def _pushb2ctx(pushop, bundler):
862 def _pushb2ctx(pushop, bundler):
862 """handle changegroup push through bundle2
863 """handle changegroup push through bundle2
863
864
864 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
865 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
865 """
866 """
866 if 'changesets' in pushop.stepsdone:
867 if 'changesets' in pushop.stepsdone:
867 return
868 return
868 pushop.stepsdone.add('changesets')
869 pushop.stepsdone.add('changesets')
869 # Send known heads to the server for race detection.
870 # Send known heads to the server for race detection.
870 if not _pushcheckoutgoing(pushop):
871 if not _pushcheckoutgoing(pushop):
871 return
872 return
872 pushop.repo.prepushoutgoinghooks(pushop)
873 pushop.repo.prepushoutgoinghooks(pushop)
873
874
874 _pushb2ctxcheckheads(pushop, bundler)
875 _pushb2ctxcheckheads(pushop, bundler)
875
876
876 b2caps = bundle2.bundle2caps(pushop.remote)
877 b2caps = bundle2.bundle2caps(pushop.remote)
877 version = '01'
878 version = '01'
878 cgversions = b2caps.get('changegroup')
879 cgversions = b2caps.get('changegroup')
879 if cgversions: # 3.1 and 3.2 ship with an empty value
880 if cgversions: # 3.1 and 3.2 ship with an empty value
880 cgversions = [v for v in cgversions
881 cgversions = [v for v in cgversions
881 if v in changegroup.supportedoutgoingversions(
882 if v in changegroup.supportedoutgoingversions(
882 pushop.repo)]
883 pushop.repo)]
883 if not cgversions:
884 if not cgversions:
884 raise ValueError(_('no common changegroup version'))
885 raise ValueError(_('no common changegroup version'))
885 version = max(cgversions)
886 version = max(cgversions)
886 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
887 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
887 'push')
888 'push')
888 cgpart = bundler.newpart('changegroup', data=cgstream)
889 cgpart = bundler.newpart('changegroup', data=cgstream)
889 if cgversions:
890 if cgversions:
890 cgpart.addparam('version', version)
891 cgpart.addparam('version', version)
891 if 'treemanifest' in pushop.repo.requirements:
892 if 'treemanifest' in pushop.repo.requirements:
892 cgpart.addparam('treemanifest', '1')
893 cgpart.addparam('treemanifest', '1')
893 def handlereply(op):
894 def handlereply(op):
894 """extract addchangegroup returns from server reply"""
895 """extract addchangegroup returns from server reply"""
895 cgreplies = op.records.getreplies(cgpart.id)
896 cgreplies = op.records.getreplies(cgpart.id)
896 assert len(cgreplies['changegroup']) == 1
897 assert len(cgreplies['changegroup']) == 1
897 pushop.cgresult = cgreplies['changegroup'][0]['return']
898 pushop.cgresult = cgreplies['changegroup'][0]['return']
898 return handlereply
899 return handlereply
899
900
900 @b2partsgenerator('phase')
901 @b2partsgenerator('phase')
901 def _pushb2phases(pushop, bundler):
902 def _pushb2phases(pushop, bundler):
902 """handle phase push through bundle2"""
903 """handle phase push through bundle2"""
903 if 'phases' in pushop.stepsdone:
904 if 'phases' in pushop.stepsdone:
904 return
905 return
905 b2caps = bundle2.bundle2caps(pushop.remote)
906 b2caps = bundle2.bundle2caps(pushop.remote)
906 ui = pushop.repo.ui
907 ui = pushop.repo.ui
907
908
908 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
909 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
909 haspushkey = 'pushkey' in b2caps
910 haspushkey = 'pushkey' in b2caps
910 hasphaseheads = 'heads' in b2caps.get('phases', ())
911 hasphaseheads = 'heads' in b2caps.get('phases', ())
911
912
912 if hasphaseheads and not legacyphase:
913 if hasphaseheads and not legacyphase:
913 return _pushb2phaseheads(pushop, bundler)
914 return _pushb2phaseheads(pushop, bundler)
914 elif haspushkey:
915 elif haspushkey:
915 return _pushb2phasespushkey(pushop, bundler)
916 return _pushb2phasespushkey(pushop, bundler)
916
917
917 def _pushb2phaseheads(pushop, bundler):
918 def _pushb2phaseheads(pushop, bundler):
918 """push phase information through a bundle2 - binary part"""
919 """push phase information through a bundle2 - binary part"""
919 pushop.stepsdone.add('phases')
920 pushop.stepsdone.add('phases')
920 if pushop.outdatedphases:
921 if pushop.outdatedphases:
921 updates = [[] for p in phases.allphases]
922 updates = [[] for p in phases.allphases]
922 updates[0].extend(h.node() for h in pushop.outdatedphases)
923 updates[0].extend(h.node() for h in pushop.outdatedphases)
923 phasedata = phases.binaryencode(updates)
924 phasedata = phases.binaryencode(updates)
924 bundler.newpart('phase-heads', data=phasedata)
925 bundler.newpart('phase-heads', data=phasedata)
925
926
926 def _pushb2phasespushkey(pushop, bundler):
927 def _pushb2phasespushkey(pushop, bundler):
927 """push phase information through a bundle2 - pushkey part"""
928 """push phase information through a bundle2 - pushkey part"""
928 pushop.stepsdone.add('phases')
929 pushop.stepsdone.add('phases')
929 part2node = []
930 part2node = []
930
931
931 def handlefailure(pushop, exc):
932 def handlefailure(pushop, exc):
932 targetid = int(exc.partid)
933 targetid = int(exc.partid)
933 for partid, node in part2node:
934 for partid, node in part2node:
934 if partid == targetid:
935 if partid == targetid:
935 raise error.Abort(_('updating %s to public failed') % node)
936 raise error.Abort(_('updating %s to public failed') % node)
936
937
937 enc = pushkey.encode
938 enc = pushkey.encode
938 for newremotehead in pushop.outdatedphases:
939 for newremotehead in pushop.outdatedphases:
939 part = bundler.newpart('pushkey')
940 part = bundler.newpart('pushkey')
940 part.addparam('namespace', enc('phases'))
941 part.addparam('namespace', enc('phases'))
941 part.addparam('key', enc(newremotehead.hex()))
942 part.addparam('key', enc(newremotehead.hex()))
942 part.addparam('old', enc('%d' % phases.draft))
943 part.addparam('old', enc('%d' % phases.draft))
943 part.addparam('new', enc('%d' % phases.public))
944 part.addparam('new', enc('%d' % phases.public))
944 part2node.append((part.id, newremotehead))
945 part2node.append((part.id, newremotehead))
945 pushop.pkfailcb[part.id] = handlefailure
946 pushop.pkfailcb[part.id] = handlefailure
946
947
947 def handlereply(op):
948 def handlereply(op):
948 for partid, node in part2node:
949 for partid, node in part2node:
949 partrep = op.records.getreplies(partid)
950 partrep = op.records.getreplies(partid)
950 results = partrep['pushkey']
951 results = partrep['pushkey']
951 assert len(results) <= 1
952 assert len(results) <= 1
952 msg = None
953 msg = None
953 if not results:
954 if not results:
954 msg = _('server ignored update of %s to public!\n') % node
955 msg = _('server ignored update of %s to public!\n') % node
955 elif not int(results[0]['return']):
956 elif not int(results[0]['return']):
956 msg = _('updating %s to public failed!\n') % node
957 msg = _('updating %s to public failed!\n') % node
957 if msg is not None:
958 if msg is not None:
958 pushop.ui.warn(msg)
959 pushop.ui.warn(msg)
959 return handlereply
960 return handlereply
960
961
961 @b2partsgenerator('obsmarkers')
962 @b2partsgenerator('obsmarkers')
962 def _pushb2obsmarkers(pushop, bundler):
963 def _pushb2obsmarkers(pushop, bundler):
963 if 'obsmarkers' in pushop.stepsdone:
964 if 'obsmarkers' in pushop.stepsdone:
964 return
965 return
965 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
966 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
966 if obsolete.commonversion(remoteversions) is None:
967 if obsolete.commonversion(remoteversions) is None:
967 return
968 return
968 pushop.stepsdone.add('obsmarkers')
969 pushop.stepsdone.add('obsmarkers')
969 if pushop.outobsmarkers:
970 if pushop.outobsmarkers:
970 markers = sorted(pushop.outobsmarkers)
971 markers = sorted(pushop.outobsmarkers)
971 bundle2.buildobsmarkerspart(bundler, markers)
972 bundle2.buildobsmarkerspart(bundler, markers)
972
973
973 @b2partsgenerator('bookmarks')
974 @b2partsgenerator('bookmarks')
974 def _pushb2bookmarks(pushop, bundler):
975 def _pushb2bookmarks(pushop, bundler):
975 """handle bookmark push through bundle2"""
976 """handle bookmark push through bundle2"""
976 if 'bookmarks' in pushop.stepsdone:
977 if 'bookmarks' in pushop.stepsdone:
977 return
978 return
978 b2caps = bundle2.bundle2caps(pushop.remote)
979 b2caps = bundle2.bundle2caps(pushop.remote)
979
980
980 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
981 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
981 legacybooks = 'bookmarks' in legacy
982 legacybooks = 'bookmarks' in legacy
982
983
983 if not legacybooks and 'bookmarks' in b2caps:
984 if not legacybooks and 'bookmarks' in b2caps:
984 return _pushb2bookmarkspart(pushop, bundler)
985 return _pushb2bookmarkspart(pushop, bundler)
985 elif 'pushkey' in b2caps:
986 elif 'pushkey' in b2caps:
986 return _pushb2bookmarkspushkey(pushop, bundler)
987 return _pushb2bookmarkspushkey(pushop, bundler)
987
988
988 def _bmaction(old, new):
989 def _bmaction(old, new):
989 """small utility for bookmark pushing"""
990 """small utility for bookmark pushing"""
990 if not old:
991 if not old:
991 return 'export'
992 return 'export'
992 elif not new:
993 elif not new:
993 return 'delete'
994 return 'delete'
994 return 'update'
995 return 'update'
995
996
996 def _pushb2bookmarkspart(pushop, bundler):
997 def _pushb2bookmarkspart(pushop, bundler):
997 pushop.stepsdone.add('bookmarks')
998 pushop.stepsdone.add('bookmarks')
998 if not pushop.outbookmarks:
999 if not pushop.outbookmarks:
999 return
1000 return
1000
1001
1001 allactions = []
1002 allactions = []
1002 data = []
1003 data = []
1003 for book, old, new in pushop.outbookmarks:
1004 for book, old, new in pushop.outbookmarks:
1004 new = bin(new)
1005 new = bin(new)
1005 data.append((book, new))
1006 data.append((book, new))
1006 allactions.append((book, _bmaction(old, new)))
1007 allactions.append((book, _bmaction(old, new)))
1007 checkdata = bookmod.binaryencode(data)
1008 checkdata = bookmod.binaryencode(data)
1008 bundler.newpart('bookmarks', data=checkdata)
1009 bundler.newpart('bookmarks', data=checkdata)
1009
1010
1010 def handlereply(op):
1011 def handlereply(op):
1011 ui = pushop.ui
1012 ui = pushop.ui
1012 # if success
1013 # if success
1013 for book, action in allactions:
1014 for book, action in allactions:
1014 ui.status(bookmsgmap[action][0] % book)
1015 ui.status(bookmsgmap[action][0] % book)
1015
1016
1016 return handlereply
1017 return handlereply
1017
1018
1018 def _pushb2bookmarkspushkey(pushop, bundler):
1019 def _pushb2bookmarkspushkey(pushop, bundler):
1019 pushop.stepsdone.add('bookmarks')
1020 pushop.stepsdone.add('bookmarks')
1020 part2book = []
1021 part2book = []
1021 enc = pushkey.encode
1022 enc = pushkey.encode
1022
1023
1023 def handlefailure(pushop, exc):
1024 def handlefailure(pushop, exc):
1024 targetid = int(exc.partid)
1025 targetid = int(exc.partid)
1025 for partid, book, action in part2book:
1026 for partid, book, action in part2book:
1026 if partid == targetid:
1027 if partid == targetid:
1027 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1028 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1028 # we should not be called for part we did not generated
1029 # we should not be called for part we did not generated
1029 assert False
1030 assert False
1030
1031
1031 for book, old, new in pushop.outbookmarks:
1032 for book, old, new in pushop.outbookmarks:
1032 part = bundler.newpart('pushkey')
1033 part = bundler.newpart('pushkey')
1033 part.addparam('namespace', enc('bookmarks'))
1034 part.addparam('namespace', enc('bookmarks'))
1034 part.addparam('key', enc(book))
1035 part.addparam('key', enc(book))
1035 part.addparam('old', enc(old))
1036 part.addparam('old', enc(old))
1036 part.addparam('new', enc(new))
1037 part.addparam('new', enc(new))
1037 action = 'update'
1038 action = 'update'
1038 if not old:
1039 if not old:
1039 action = 'export'
1040 action = 'export'
1040 elif not new:
1041 elif not new:
1041 action = 'delete'
1042 action = 'delete'
1042 part2book.append((part.id, book, action))
1043 part2book.append((part.id, book, action))
1043 pushop.pkfailcb[part.id] = handlefailure
1044 pushop.pkfailcb[part.id] = handlefailure
1044
1045
1045 def handlereply(op):
1046 def handlereply(op):
1046 ui = pushop.ui
1047 ui = pushop.ui
1047 for partid, book, action in part2book:
1048 for partid, book, action in part2book:
1048 partrep = op.records.getreplies(partid)
1049 partrep = op.records.getreplies(partid)
1049 results = partrep['pushkey']
1050 results = partrep['pushkey']
1050 assert len(results) <= 1
1051 assert len(results) <= 1
1051 if not results:
1052 if not results:
1052 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1053 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1053 else:
1054 else:
1054 ret = int(results[0]['return'])
1055 ret = int(results[0]['return'])
1055 if ret:
1056 if ret:
1056 ui.status(bookmsgmap[action][0] % book)
1057 ui.status(bookmsgmap[action][0] % book)
1057 else:
1058 else:
1058 ui.warn(bookmsgmap[action][1] % book)
1059 ui.warn(bookmsgmap[action][1] % book)
1059 if pushop.bkresult is not None:
1060 if pushop.bkresult is not None:
1060 pushop.bkresult = 1
1061 pushop.bkresult = 1
1061 return handlereply
1062 return handlereply
1062
1063
1063 @b2partsgenerator('pushvars', idx=0)
1064 @b2partsgenerator('pushvars', idx=0)
1064 def _getbundlesendvars(pushop, bundler):
1065 def _getbundlesendvars(pushop, bundler):
1065 '''send shellvars via bundle2'''
1066 '''send shellvars via bundle2'''
1066 pushvars = pushop.pushvars
1067 pushvars = pushop.pushvars
1067 if pushvars:
1068 if pushvars:
1068 shellvars = {}
1069 shellvars = {}
1069 for raw in pushvars:
1070 for raw in pushvars:
1070 if '=' not in raw:
1071 if '=' not in raw:
1071 msg = ("unable to parse variable '%s', should follow "
1072 msg = ("unable to parse variable '%s', should follow "
1072 "'KEY=VALUE' or 'KEY=' format")
1073 "'KEY=VALUE' or 'KEY=' format")
1073 raise error.Abort(msg % raw)
1074 raise error.Abort(msg % raw)
1074 k, v = raw.split('=', 1)
1075 k, v = raw.split('=', 1)
1075 shellvars[k] = v
1076 shellvars[k] = v
1076
1077
1077 part = bundler.newpart('pushvars')
1078 part = bundler.newpart('pushvars')
1078
1079
1079 for key, value in shellvars.iteritems():
1080 for key, value in shellvars.iteritems():
1080 part.addparam(key, value, mandatory=False)
1081 part.addparam(key, value, mandatory=False)
1081
1082
1082 def _pushbundle2(pushop):
1083 def _pushbundle2(pushop):
1083 """push data to the remote using bundle2
1084 """push data to the remote using bundle2
1084
1085
1085 The only currently supported type of data is changegroup but this will
1086 The only currently supported type of data is changegroup but this will
1086 evolve in the future."""
1087 evolve in the future."""
1087 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1088 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1088 pushback = (pushop.trmanager
1089 pushback = (pushop.trmanager
1089 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1090 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1090
1091
1091 # create reply capability
1092 # create reply capability
1092 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1093 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1093 allowpushback=pushback,
1094 allowpushback=pushback,
1094 role='client'))
1095 role='client'))
1095 bundler.newpart('replycaps', data=capsblob)
1096 bundler.newpart('replycaps', data=capsblob)
1096 replyhandlers = []
1097 replyhandlers = []
1097 for partgenname in b2partsgenorder:
1098 for partgenname in b2partsgenorder:
1098 partgen = b2partsgenmapping[partgenname]
1099 partgen = b2partsgenmapping[partgenname]
1099 ret = partgen(pushop, bundler)
1100 ret = partgen(pushop, bundler)
1100 if callable(ret):
1101 if callable(ret):
1101 replyhandlers.append(ret)
1102 replyhandlers.append(ret)
1102 # do not push if nothing to push
1103 # do not push if nothing to push
1103 if bundler.nbparts <= 1:
1104 if bundler.nbparts <= 1:
1104 return
1105 return
1105 stream = util.chunkbuffer(bundler.getchunks())
1106 stream = util.chunkbuffer(bundler.getchunks())
1106 try:
1107 try:
1107 try:
1108 try:
1108 with pushop.remote.commandexecutor() as e:
1109 with pushop.remote.commandexecutor() as e:
1109 reply = e.callcommand('unbundle', {
1110 reply = e.callcommand('unbundle', {
1110 'bundle': stream,
1111 'bundle': stream,
1111 'heads': ['force'],
1112 'heads': ['force'],
1112 'url': pushop.remote.url(),
1113 'url': pushop.remote.url(),
1113 }).result()
1114 }).result()
1114 except error.BundleValueError as exc:
1115 except error.BundleValueError as exc:
1115 raise error.Abort(_('missing support for %s') % exc)
1116 raise error.Abort(_('missing support for %s') % exc)
1116 try:
1117 try:
1117 trgetter = None
1118 trgetter = None
1118 if pushback:
1119 if pushback:
1119 trgetter = pushop.trmanager.transaction
1120 trgetter = pushop.trmanager.transaction
1120 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1121 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1121 except error.BundleValueError as exc:
1122 except error.BundleValueError as exc:
1122 raise error.Abort(_('missing support for %s') % exc)
1123 raise error.Abort(_('missing support for %s') % exc)
1123 except bundle2.AbortFromPart as exc:
1124 except bundle2.AbortFromPart as exc:
1124 pushop.ui.status(_('remote: %s\n') % exc)
1125 pushop.ui.status(_('remote: %s\n') % exc)
1125 if exc.hint is not None:
1126 if exc.hint is not None:
1126 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1127 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1127 raise error.Abort(_('push failed on remote'))
1128 raise error.Abort(_('push failed on remote'))
1128 except error.PushkeyFailed as exc:
1129 except error.PushkeyFailed as exc:
1129 partid = int(exc.partid)
1130 partid = int(exc.partid)
1130 if partid not in pushop.pkfailcb:
1131 if partid not in pushop.pkfailcb:
1131 raise
1132 raise
1132 pushop.pkfailcb[partid](pushop, exc)
1133 pushop.pkfailcb[partid](pushop, exc)
1133 for rephand in replyhandlers:
1134 for rephand in replyhandlers:
1134 rephand(op)
1135 rephand(op)
1135
1136
1136 def _pushchangeset(pushop):
1137 def _pushchangeset(pushop):
1137 """Make the actual push of changeset bundle to remote repo"""
1138 """Make the actual push of changeset bundle to remote repo"""
1138 if 'changesets' in pushop.stepsdone:
1139 if 'changesets' in pushop.stepsdone:
1139 return
1140 return
1140 pushop.stepsdone.add('changesets')
1141 pushop.stepsdone.add('changesets')
1141 if not _pushcheckoutgoing(pushop):
1142 if not _pushcheckoutgoing(pushop):
1142 return
1143 return
1143
1144
1144 # Should have verified this in push().
1145 # Should have verified this in push().
1145 assert pushop.remote.capable('unbundle')
1146 assert pushop.remote.capable('unbundle')
1146
1147
1147 pushop.repo.prepushoutgoinghooks(pushop)
1148 pushop.repo.prepushoutgoinghooks(pushop)
1148 outgoing = pushop.outgoing
1149 outgoing = pushop.outgoing
1149 # TODO: get bundlecaps from remote
1150 # TODO: get bundlecaps from remote
1150 bundlecaps = None
1151 bundlecaps = None
1151 # create a changegroup from local
1152 # create a changegroup from local
1152 if pushop.revs is None and not (outgoing.excluded
1153 if pushop.revs is None and not (outgoing.excluded
1153 or pushop.repo.changelog.filteredrevs):
1154 or pushop.repo.changelog.filteredrevs):
1154 # push everything,
1155 # push everything,
1155 # use the fast path, no race possible on push
1156 # use the fast path, no race possible on push
1156 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1157 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1157 fastpath=True, bundlecaps=bundlecaps)
1158 fastpath=True, bundlecaps=bundlecaps)
1158 else:
1159 else:
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1160 'push', bundlecaps=bundlecaps)
1161 'push', bundlecaps=bundlecaps)
1161
1162
1162 # apply changegroup to remote
1163 # apply changegroup to remote
1163 # local repo finds heads on server, finds out what
1164 # local repo finds heads on server, finds out what
1164 # revs it must push. once revs transferred, if server
1165 # revs it must push. once revs transferred, if server
1165 # finds it has different heads (someone else won
1166 # finds it has different heads (someone else won
1166 # commit/push race), server aborts.
1167 # commit/push race), server aborts.
1167 if pushop.force:
1168 if pushop.force:
1168 remoteheads = ['force']
1169 remoteheads = ['force']
1169 else:
1170 else:
1170 remoteheads = pushop.remoteheads
1171 remoteheads = pushop.remoteheads
1171 # ssh: return remote's addchangegroup()
1172 # ssh: return remote's addchangegroup()
1172 # http: return remote's addchangegroup() or 0 for error
1173 # http: return remote's addchangegroup() or 0 for error
1173 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1174 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1174 pushop.repo.url())
1175 pushop.repo.url())
1175
1176
1176 def _pushsyncphase(pushop):
1177 def _pushsyncphase(pushop):
1177 """synchronise phase information locally and remotely"""
1178 """synchronise phase information locally and remotely"""
1178 cheads = pushop.commonheads
1179 cheads = pushop.commonheads
1179 # even when we don't push, exchanging phase data is useful
1180 # even when we don't push, exchanging phase data is useful
1180 remotephases = listkeys(pushop.remote, 'phases')
1181 remotephases = listkeys(pushop.remote, 'phases')
1181 if (pushop.ui.configbool('ui', '_usedassubrepo')
1182 if (pushop.ui.configbool('ui', '_usedassubrepo')
1182 and remotephases # server supports phases
1183 and remotephases # server supports phases
1183 and pushop.cgresult is None # nothing was pushed
1184 and pushop.cgresult is None # nothing was pushed
1184 and remotephases.get('publishing', False)):
1185 and remotephases.get('publishing', False)):
1185 # When:
1186 # When:
1186 # - this is a subrepo push
1187 # - this is a subrepo push
1187 # - and remote support phase
1188 # - and remote support phase
1188 # - and no changeset was pushed
1189 # - and no changeset was pushed
1189 # - and remote is publishing
1190 # - and remote is publishing
1190 # We may be in issue 3871 case!
1191 # We may be in issue 3871 case!
1191 # We drop the possible phase synchronisation done by
1192 # We drop the possible phase synchronisation done by
1192 # courtesy to publish changesets possibly locally draft
1193 # courtesy to publish changesets possibly locally draft
1193 # on the remote.
1194 # on the remote.
1194 remotephases = {'publishing': 'True'}
1195 remotephases = {'publishing': 'True'}
1195 if not remotephases: # old server or public only reply from non-publishing
1196 if not remotephases: # old server or public only reply from non-publishing
1196 _localphasemove(pushop, cheads)
1197 _localphasemove(pushop, cheads)
1197 # don't push any phase data as there is nothing to push
1198 # don't push any phase data as there is nothing to push
1198 else:
1199 else:
1199 ana = phases.analyzeremotephases(pushop.repo, cheads,
1200 ana = phases.analyzeremotephases(pushop.repo, cheads,
1200 remotephases)
1201 remotephases)
1201 pheads, droots = ana
1202 pheads, droots = ana
1202 ### Apply remote phase on local
1203 ### Apply remote phase on local
1203 if remotephases.get('publishing', False):
1204 if remotephases.get('publishing', False):
1204 _localphasemove(pushop, cheads)
1205 _localphasemove(pushop, cheads)
1205 else: # publish = False
1206 else: # publish = False
1206 _localphasemove(pushop, pheads)
1207 _localphasemove(pushop, pheads)
1207 _localphasemove(pushop, cheads, phases.draft)
1208 _localphasemove(pushop, cheads, phases.draft)
1208 ### Apply local phase on remote
1209 ### Apply local phase on remote
1209
1210
1210 if pushop.cgresult:
1211 if pushop.cgresult:
1211 if 'phases' in pushop.stepsdone:
1212 if 'phases' in pushop.stepsdone:
1212 # phases already pushed though bundle2
1213 # phases already pushed though bundle2
1213 return
1214 return
1214 outdated = pushop.outdatedphases
1215 outdated = pushop.outdatedphases
1215 else:
1216 else:
1216 outdated = pushop.fallbackoutdatedphases
1217 outdated = pushop.fallbackoutdatedphases
1217
1218
1218 pushop.stepsdone.add('phases')
1219 pushop.stepsdone.add('phases')
1219
1220
1220 # filter heads already turned public by the push
1221 # filter heads already turned public by the push
1221 outdated = [c for c in outdated if c.node() not in pheads]
1222 outdated = [c for c in outdated if c.node() not in pheads]
1222 # fallback to independent pushkey command
1223 # fallback to independent pushkey command
1223 for newremotehead in outdated:
1224 for newremotehead in outdated:
1224 with pushop.remote.commandexecutor() as e:
1225 with pushop.remote.commandexecutor() as e:
1225 r = e.callcommand('pushkey', {
1226 r = e.callcommand('pushkey', {
1226 'namespace': 'phases',
1227 'namespace': 'phases',
1227 'key': newremotehead.hex(),
1228 'key': newremotehead.hex(),
1228 'old': '%d' % phases.draft,
1229 'old': '%d' % phases.draft,
1229 'new': '%d' % phases.public
1230 'new': '%d' % phases.public
1230 }).result()
1231 }).result()
1231
1232
1232 if not r:
1233 if not r:
1233 pushop.ui.warn(_('updating %s to public failed!\n')
1234 pushop.ui.warn(_('updating %s to public failed!\n')
1234 % newremotehead)
1235 % newremotehead)
1235
1236
1236 def _localphasemove(pushop, nodes, phase=phases.public):
1237 def _localphasemove(pushop, nodes, phase=phases.public):
1237 """move <nodes> to <phase> in the local source repo"""
1238 """move <nodes> to <phase> in the local source repo"""
1238 if pushop.trmanager:
1239 if pushop.trmanager:
1239 phases.advanceboundary(pushop.repo,
1240 phases.advanceboundary(pushop.repo,
1240 pushop.trmanager.transaction(),
1241 pushop.trmanager.transaction(),
1241 phase,
1242 phase,
1242 nodes)
1243 nodes)
1243 else:
1244 else:
1244 # repo is not locked, do not change any phases!
1245 # repo is not locked, do not change any phases!
1245 # Informs the user that phases should have been moved when
1246 # Informs the user that phases should have been moved when
1246 # applicable.
1247 # applicable.
1247 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1248 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1248 phasestr = phases.phasenames[phase]
1249 phasestr = phases.phasenames[phase]
1249 if actualmoves:
1250 if actualmoves:
1250 pushop.ui.status(_('cannot lock source repo, skipping '
1251 pushop.ui.status(_('cannot lock source repo, skipping '
1251 'local %s phase update\n') % phasestr)
1252 'local %s phase update\n') % phasestr)
1252
1253
1253 def _pushobsolete(pushop):
1254 def _pushobsolete(pushop):
1254 """utility function to push obsolete markers to a remote"""
1255 """utility function to push obsolete markers to a remote"""
1255 if 'obsmarkers' in pushop.stepsdone:
1256 if 'obsmarkers' in pushop.stepsdone:
1256 return
1257 return
1257 repo = pushop.repo
1258 repo = pushop.repo
1258 remote = pushop.remote
1259 remote = pushop.remote
1259 pushop.stepsdone.add('obsmarkers')
1260 pushop.stepsdone.add('obsmarkers')
1260 if pushop.outobsmarkers:
1261 if pushop.outobsmarkers:
1261 pushop.ui.debug('try to push obsolete markers to remote\n')
1262 pushop.ui.debug('try to push obsolete markers to remote\n')
1262 rslts = []
1263 rslts = []
1263 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1264 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1264 for key in sorted(remotedata, reverse=True):
1265 for key in sorted(remotedata, reverse=True):
1265 # reverse sort to ensure we end with dump0
1266 # reverse sort to ensure we end with dump0
1266 data = remotedata[key]
1267 data = remotedata[key]
1267 rslts.append(remote.pushkey('obsolete', key, '', data))
1268 rslts.append(remote.pushkey('obsolete', key, '', data))
1268 if [r for r in rslts if not r]:
1269 if [r for r in rslts if not r]:
1269 msg = _('failed to push some obsolete markers!\n')
1270 msg = _('failed to push some obsolete markers!\n')
1270 repo.ui.warn(msg)
1271 repo.ui.warn(msg)
1271
1272
1272 def _pushbookmark(pushop):
1273 def _pushbookmark(pushop):
1273 """Update bookmark position on remote"""
1274 """Update bookmark position on remote"""
1274 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1275 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1275 return
1276 return
1276 pushop.stepsdone.add('bookmarks')
1277 pushop.stepsdone.add('bookmarks')
1277 ui = pushop.ui
1278 ui = pushop.ui
1278 remote = pushop.remote
1279 remote = pushop.remote
1279
1280
1280 for b, old, new in pushop.outbookmarks:
1281 for b, old, new in pushop.outbookmarks:
1281 action = 'update'
1282 action = 'update'
1282 if not old:
1283 if not old:
1283 action = 'export'
1284 action = 'export'
1284 elif not new:
1285 elif not new:
1285 action = 'delete'
1286 action = 'delete'
1286
1287
1287 with remote.commandexecutor() as e:
1288 with remote.commandexecutor() as e:
1288 r = e.callcommand('pushkey', {
1289 r = e.callcommand('pushkey', {
1289 'namespace': 'bookmarks',
1290 'namespace': 'bookmarks',
1290 'key': b,
1291 'key': b,
1291 'old': old,
1292 'old': old,
1292 'new': new,
1293 'new': new,
1293 }).result()
1294 }).result()
1294
1295
1295 if r:
1296 if r:
1296 ui.status(bookmsgmap[action][0] % b)
1297 ui.status(bookmsgmap[action][0] % b)
1297 else:
1298 else:
1298 ui.warn(bookmsgmap[action][1] % b)
1299 ui.warn(bookmsgmap[action][1] % b)
1299 # discovery can have set the value form invalid entry
1300 # discovery can have set the value form invalid entry
1300 if pushop.bkresult is not None:
1301 if pushop.bkresult is not None:
1301 pushop.bkresult = 1
1302 pushop.bkresult = 1
1302
1303
1303 class pulloperation(object):
1304 class pulloperation(object):
1304 """A object that represent a single pull operation
1305 """A object that represent a single pull operation
1305
1306
1306 It purpose is to carry pull related state and very common operation.
1307 It purpose is to carry pull related state and very common operation.
1307
1308
1308 A new should be created at the beginning of each pull and discarded
1309 A new should be created at the beginning of each pull and discarded
1309 afterward.
1310 afterward.
1310 """
1311 """
1311
1312
1312 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1313 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1313 remotebookmarks=None, streamclonerequested=None):
1314 remotebookmarks=None, streamclonerequested=None):
1314 # repo we pull into
1315 # repo we pull into
1315 self.repo = repo
1316 self.repo = repo
1316 # repo we pull from
1317 # repo we pull from
1317 self.remote = remote
1318 self.remote = remote
1318 # revision we try to pull (None is "all")
1319 # revision we try to pull (None is "all")
1319 self.heads = heads
1320 self.heads = heads
1320 # bookmark pulled explicitly
1321 # bookmark pulled explicitly
1321 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1322 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1322 for bookmark in bookmarks]
1323 for bookmark in bookmarks]
1323 # do we force pull?
1324 # do we force pull?
1324 self.force = force
1325 self.force = force
1325 # whether a streaming clone was requested
1326 # whether a streaming clone was requested
1326 self.streamclonerequested = streamclonerequested
1327 self.streamclonerequested = streamclonerequested
1327 # transaction manager
1328 # transaction manager
1328 self.trmanager = None
1329 self.trmanager = None
1329 # set of common changeset between local and remote before pull
1330 # set of common changeset between local and remote before pull
1330 self.common = None
1331 self.common = None
1331 # set of pulled head
1332 # set of pulled head
1332 self.rheads = None
1333 self.rheads = None
1333 # list of missing changeset to fetch remotely
1334 # list of missing changeset to fetch remotely
1334 self.fetch = None
1335 self.fetch = None
1335 # remote bookmarks data
1336 # remote bookmarks data
1336 self.remotebookmarks = remotebookmarks
1337 self.remotebookmarks = remotebookmarks
1337 # result of changegroup pulling (used as return code by pull)
1338 # result of changegroup pulling (used as return code by pull)
1338 self.cgresult = None
1339 self.cgresult = None
1339 # list of step already done
1340 # list of step already done
1340 self.stepsdone = set()
1341 self.stepsdone = set()
1341 # Whether we attempted a clone from pre-generated bundles.
1342 # Whether we attempted a clone from pre-generated bundles.
1342 self.clonebundleattempted = False
1343 self.clonebundleattempted = False
1343
1344
1344 @util.propertycache
1345 @util.propertycache
1345 def pulledsubset(self):
1346 def pulledsubset(self):
1346 """heads of the set of changeset target by the pull"""
1347 """heads of the set of changeset target by the pull"""
1347 # compute target subset
1348 # compute target subset
1348 if self.heads is None:
1349 if self.heads is None:
1349 # We pulled every thing possible
1350 # We pulled every thing possible
1350 # sync on everything common
1351 # sync on everything common
1351 c = set(self.common)
1352 c = set(self.common)
1352 ret = list(self.common)
1353 ret = list(self.common)
1353 for n in self.rheads:
1354 for n in self.rheads:
1354 if n not in c:
1355 if n not in c:
1355 ret.append(n)
1356 ret.append(n)
1356 return ret
1357 return ret
1357 else:
1358 else:
1358 # We pulled a specific subset
1359 # We pulled a specific subset
1359 # sync on this subset
1360 # sync on this subset
1360 return self.heads
1361 return self.heads
1361
1362
1362 @util.propertycache
1363 @util.propertycache
1363 def canusebundle2(self):
1364 def canusebundle2(self):
1364 return not _forcebundle1(self)
1365 return not _forcebundle1(self)
1365
1366
1366 @util.propertycache
1367 @util.propertycache
1367 def remotebundle2caps(self):
1368 def remotebundle2caps(self):
1368 return bundle2.bundle2caps(self.remote)
1369 return bundle2.bundle2caps(self.remote)
1369
1370
1370 def gettransaction(self):
1371 def gettransaction(self):
1371 # deprecated; talk to trmanager directly
1372 # deprecated; talk to trmanager directly
1372 return self.trmanager.transaction()
1373 return self.trmanager.transaction()
1373
1374
1374 class transactionmanager(util.transactional):
1375 class transactionmanager(util.transactional):
1375 """An object to manage the life cycle of a transaction
1376 """An object to manage the life cycle of a transaction
1376
1377
1377 It creates the transaction on demand and calls the appropriate hooks when
1378 It creates the transaction on demand and calls the appropriate hooks when
1378 closing the transaction."""
1379 closing the transaction."""
1379 def __init__(self, repo, source, url):
1380 def __init__(self, repo, source, url):
1380 self.repo = repo
1381 self.repo = repo
1381 self.source = source
1382 self.source = source
1382 self.url = url
1383 self.url = url
1383 self._tr = None
1384 self._tr = None
1384
1385
1385 def transaction(self):
1386 def transaction(self):
1386 """Return an open transaction object, constructing if necessary"""
1387 """Return an open transaction object, constructing if necessary"""
1387 if not self._tr:
1388 if not self._tr:
1388 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1389 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1389 self._tr = self.repo.transaction(trname)
1390 self._tr = self.repo.transaction(trname)
1390 self._tr.hookargs['source'] = self.source
1391 self._tr.hookargs['source'] = self.source
1391 self._tr.hookargs['url'] = self.url
1392 self._tr.hookargs['url'] = self.url
1392 return self._tr
1393 return self._tr
1393
1394
1394 def close(self):
1395 def close(self):
1395 """close transaction if created"""
1396 """close transaction if created"""
1396 if self._tr is not None:
1397 if self._tr is not None:
1397 self._tr.close()
1398 self._tr.close()
1398
1399
1399 def release(self):
1400 def release(self):
1400 """release transaction if created"""
1401 """release transaction if created"""
1401 if self._tr is not None:
1402 if self._tr is not None:
1402 self._tr.release()
1403 self._tr.release()
1403
1404
1404 def listkeys(remote, namespace):
1405 def listkeys(remote, namespace):
1405 with remote.commandexecutor() as e:
1406 with remote.commandexecutor() as e:
1406 return e.callcommand('listkeys', {'namespace': namespace}).result()
1407 return e.callcommand('listkeys', {'namespace': namespace}).result()
1407
1408
1408 def _fullpullbundle2(repo, pullop):
1409 def _fullpullbundle2(repo, pullop):
1409 # The server may send a partial reply, i.e. when inlining
1410 # The server may send a partial reply, i.e. when inlining
1410 # pre-computed bundles. In that case, update the common
1411 # pre-computed bundles. In that case, update the common
1411 # set based on the results and pull another bundle.
1412 # set based on the results and pull another bundle.
1412 #
1413 #
1413 # There are two indicators that the process is finished:
1414 # There are two indicators that the process is finished:
1414 # - no changeset has been added, or
1415 # - no changeset has been added, or
1415 # - all remote heads are known locally.
1416 # - all remote heads are known locally.
1416 # The head check must use the unfiltered view as obsoletion
1417 # The head check must use the unfiltered view as obsoletion
1417 # markers can hide heads.
1418 # markers can hide heads.
1418 unfi = repo.unfiltered()
1419 unfi = repo.unfiltered()
1419 unficl = unfi.changelog
1420 unficl = unfi.changelog
1420 def headsofdiff(h1, h2):
1421 def headsofdiff(h1, h2):
1421 """Returns heads(h1 % h2)"""
1422 """Returns heads(h1 % h2)"""
1422 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1423 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1423 return set(ctx.node() for ctx in res)
1424 return set(ctx.node() for ctx in res)
1424 def headsofunion(h1, h2):
1425 def headsofunion(h1, h2):
1425 """Returns heads((h1 + h2) - null)"""
1426 """Returns heads((h1 + h2) - null)"""
1426 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1427 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1427 return set(ctx.node() for ctx in res)
1428 return set(ctx.node() for ctx in res)
1428 while True:
1429 while True:
1429 old_heads = unficl.heads()
1430 old_heads = unficl.heads()
1430 clstart = len(unficl)
1431 clstart = len(unficl)
1431 _pullbundle2(pullop)
1432 _pullbundle2(pullop)
1432 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1433 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1433 # XXX narrow clones filter the heads on the server side during
1434 # XXX narrow clones filter the heads on the server side during
1434 # XXX getbundle and result in partial replies as well.
1435 # XXX getbundle and result in partial replies as well.
1435 # XXX Disable pull bundles in this case as band aid to avoid
1436 # XXX Disable pull bundles in this case as band aid to avoid
1436 # XXX extra round trips.
1437 # XXX extra round trips.
1437 break
1438 break
1438 if clstart == len(unficl):
1439 if clstart == len(unficl):
1439 break
1440 break
1440 if all(unficl.hasnode(n) for n in pullop.rheads):
1441 if all(unficl.hasnode(n) for n in pullop.rheads):
1441 break
1442 break
1442 new_heads = headsofdiff(unficl.heads(), old_heads)
1443 new_heads = headsofdiff(unficl.heads(), old_heads)
1443 pullop.common = headsofunion(new_heads, pullop.common)
1444 pullop.common = headsofunion(new_heads, pullop.common)
1444 pullop.rheads = set(pullop.rheads) - pullop.common
1445 pullop.rheads = set(pullop.rheads) - pullop.common
1445
1446
1446 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1447 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1447 streamclonerequested=None):
1448 streamclonerequested=None):
1448 """Fetch repository data from a remote.
1449 """Fetch repository data from a remote.
1449
1450
1450 This is the main function used to retrieve data from a remote repository.
1451 This is the main function used to retrieve data from a remote repository.
1451
1452
1452 ``repo`` is the local repository to clone into.
1453 ``repo`` is the local repository to clone into.
1453 ``remote`` is a peer instance.
1454 ``remote`` is a peer instance.
1454 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1455 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1455 default) means to pull everything from the remote.
1456 default) means to pull everything from the remote.
1456 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1457 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1457 default, all remote bookmarks are pulled.
1458 default, all remote bookmarks are pulled.
1458 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1459 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1459 initialization.
1460 initialization.
1460 ``streamclonerequested`` is a boolean indicating whether a "streaming
1461 ``streamclonerequested`` is a boolean indicating whether a "streaming
1461 clone" is requested. A "streaming clone" is essentially a raw file copy
1462 clone" is requested. A "streaming clone" is essentially a raw file copy
1462 of revlogs from the server. This only works when the local repository is
1463 of revlogs from the server. This only works when the local repository is
1463 empty. The default value of ``None`` means to respect the server
1464 empty. The default value of ``None`` means to respect the server
1464 configuration for preferring stream clones.
1465 configuration for preferring stream clones.
1465
1466
1466 Returns the ``pulloperation`` created for this pull.
1467 Returns the ``pulloperation`` created for this pull.
1467 """
1468 """
1468 if opargs is None:
1469 if opargs is None:
1469 opargs = {}
1470 opargs = {}
1470 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1471 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1471 streamclonerequested=streamclonerequested,
1472 streamclonerequested=streamclonerequested,
1472 **pycompat.strkwargs(opargs))
1473 **pycompat.strkwargs(opargs))
1473
1474
1474 peerlocal = pullop.remote.local()
1475 peerlocal = pullop.remote.local()
1475 if peerlocal:
1476 if peerlocal:
1476 missing = set(peerlocal.requirements) - pullop.repo.supported
1477 missing = set(peerlocal.requirements) - pullop.repo.supported
1477 if missing:
1478 if missing:
1478 msg = _("required features are not"
1479 msg = _("required features are not"
1479 " supported in the destination:"
1480 " supported in the destination:"
1480 " %s") % (', '.join(sorted(missing)))
1481 " %s") % (', '.join(sorted(missing)))
1481 raise error.Abort(msg)
1482 raise error.Abort(msg)
1482
1483
1483 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1484 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1484 with repo.wlock(), repo.lock(), pullop.trmanager:
1485 with repo.wlock(), repo.lock(), pullop.trmanager:
1485 # This should ideally be in _pullbundle2(). However, it needs to run
1486 # This should ideally be in _pullbundle2(). However, it needs to run
1486 # before discovery to avoid extra work.
1487 # before discovery to avoid extra work.
1487 _maybeapplyclonebundle(pullop)
1488 _maybeapplyclonebundle(pullop)
1488 streamclone.maybeperformlegacystreamclone(pullop)
1489 streamclone.maybeperformlegacystreamclone(pullop)
1489 _pulldiscovery(pullop)
1490 _pulldiscovery(pullop)
1490 if pullop.canusebundle2:
1491 if pullop.canusebundle2:
1491 _fullpullbundle2(repo, pullop)
1492 _fullpullbundle2(repo, pullop)
1492 _pullchangeset(pullop)
1493 _pullchangeset(pullop)
1493 _pullphase(pullop)
1494 _pullphase(pullop)
1494 _pullbookmarks(pullop)
1495 _pullbookmarks(pullop)
1495 _pullobsolete(pullop)
1496 _pullobsolete(pullop)
1496
1497
1497 # storing remotenames
1498 # storing remotenames
1498 if repo.ui.configbool('experimental', 'remotenames'):
1499 if repo.ui.configbool('experimental', 'remotenames'):
1499 logexchange.pullremotenames(repo, remote)
1500 logexchange.pullremotenames(repo, remote)
1500
1501
1501 return pullop
1502 return pullop
1502
1503
1503 # list of steps to perform discovery before pull
1504 # list of steps to perform discovery before pull
1504 pulldiscoveryorder = []
1505 pulldiscoveryorder = []
1505
1506
1506 # Mapping between step name and function
1507 # Mapping between step name and function
1507 #
1508 #
1508 # This exists to help extensions wrap steps if necessary
1509 # This exists to help extensions wrap steps if necessary
1509 pulldiscoverymapping = {}
1510 pulldiscoverymapping = {}
1510
1511
1511 def pulldiscovery(stepname):
1512 def pulldiscovery(stepname):
1512 """decorator for function performing discovery before pull
1513 """decorator for function performing discovery before pull
1513
1514
1514 The function is added to the step -> function mapping and appended to the
1515 The function is added to the step -> function mapping and appended to the
1515 list of steps. Beware that decorated function will be added in order (this
1516 list of steps. Beware that decorated function will be added in order (this
1516 may matter).
1517 may matter).
1517
1518
1518 You can only use this decorator for a new step, if you want to wrap a step
1519 You can only use this decorator for a new step, if you want to wrap a step
1519 from an extension, change the pulldiscovery dictionary directly."""
1520 from an extension, change the pulldiscovery dictionary directly."""
1520 def dec(func):
1521 def dec(func):
1521 assert stepname not in pulldiscoverymapping
1522 assert stepname not in pulldiscoverymapping
1522 pulldiscoverymapping[stepname] = func
1523 pulldiscoverymapping[stepname] = func
1523 pulldiscoveryorder.append(stepname)
1524 pulldiscoveryorder.append(stepname)
1524 return func
1525 return func
1525 return dec
1526 return dec
1526
1527
1527 def _pulldiscovery(pullop):
1528 def _pulldiscovery(pullop):
1528 """Run all discovery steps"""
1529 """Run all discovery steps"""
1529 for stepname in pulldiscoveryorder:
1530 for stepname in pulldiscoveryorder:
1530 step = pulldiscoverymapping[stepname]
1531 step = pulldiscoverymapping[stepname]
1531 step(pullop)
1532 step(pullop)
1532
1533
1533 @pulldiscovery('b1:bookmarks')
1534 @pulldiscovery('b1:bookmarks')
1534 def _pullbookmarkbundle1(pullop):
1535 def _pullbookmarkbundle1(pullop):
1535 """fetch bookmark data in bundle1 case
1536 """fetch bookmark data in bundle1 case
1536
1537
1537 If not using bundle2, we have to fetch bookmarks before changeset
1538 If not using bundle2, we have to fetch bookmarks before changeset
1538 discovery to reduce the chance and impact of race conditions."""
1539 discovery to reduce the chance and impact of race conditions."""
1539 if pullop.remotebookmarks is not None:
1540 if pullop.remotebookmarks is not None:
1540 return
1541 return
1541 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1542 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1542 # all known bundle2 servers now support listkeys, but lets be nice with
1543 # all known bundle2 servers now support listkeys, but lets be nice with
1543 # new implementation.
1544 # new implementation.
1544 return
1545 return
1545 books = listkeys(pullop.remote, 'bookmarks')
1546 books = listkeys(pullop.remote, 'bookmarks')
1546 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1547 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1547
1548
1548
1549
1549 @pulldiscovery('changegroup')
1550 @pulldiscovery('changegroup')
1550 def _pulldiscoverychangegroup(pullop):
1551 def _pulldiscoverychangegroup(pullop):
1551 """discovery phase for the pull
1552 """discovery phase for the pull
1552
1553
1553 Current handle changeset discovery only, will change handle all discovery
1554 Current handle changeset discovery only, will change handle all discovery
1554 at some point."""
1555 at some point."""
1555 tmp = discovery.findcommonincoming(pullop.repo,
1556 tmp = discovery.findcommonincoming(pullop.repo,
1556 pullop.remote,
1557 pullop.remote,
1557 heads=pullop.heads,
1558 heads=pullop.heads,
1558 force=pullop.force)
1559 force=pullop.force)
1559 common, fetch, rheads = tmp
1560 common, fetch, rheads = tmp
1560 nm = pullop.repo.unfiltered().changelog.nodemap
1561 nm = pullop.repo.unfiltered().changelog.nodemap
1561 if fetch and rheads:
1562 if fetch and rheads:
1562 # If a remote heads is filtered locally, put in back in common.
1563 # If a remote heads is filtered locally, put in back in common.
1563 #
1564 #
1564 # This is a hackish solution to catch most of "common but locally
1565 # This is a hackish solution to catch most of "common but locally
1565 # hidden situation". We do not performs discovery on unfiltered
1566 # hidden situation". We do not performs discovery on unfiltered
1566 # repository because it end up doing a pathological amount of round
1567 # repository because it end up doing a pathological amount of round
1567 # trip for w huge amount of changeset we do not care about.
1568 # trip for w huge amount of changeset we do not care about.
1568 #
1569 #
1569 # If a set of such "common but filtered" changeset exist on the server
1570 # If a set of such "common but filtered" changeset exist on the server
1570 # but are not including a remote heads, we'll not be able to detect it,
1571 # but are not including a remote heads, we'll not be able to detect it,
1571 scommon = set(common)
1572 scommon = set(common)
1572 for n in rheads:
1573 for n in rheads:
1573 if n in nm:
1574 if n in nm:
1574 if n not in scommon:
1575 if n not in scommon:
1575 common.append(n)
1576 common.append(n)
1576 if set(rheads).issubset(set(common)):
1577 if set(rheads).issubset(set(common)):
1577 fetch = []
1578 fetch = []
1578 pullop.common = common
1579 pullop.common = common
1579 pullop.fetch = fetch
1580 pullop.fetch = fetch
1580 pullop.rheads = rheads
1581 pullop.rheads = rheads
1581
1582
1582 def _pullbundle2(pullop):
1583 def _pullbundle2(pullop):
1583 """pull data using bundle2
1584 """pull data using bundle2
1584
1585
1585 For now, the only supported data are changegroup."""
1586 For now, the only supported data are changegroup."""
1586 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1587 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1587
1588
1588 # make ui easier to access
1589 # make ui easier to access
1589 ui = pullop.repo.ui
1590 ui = pullop.repo.ui
1590
1591
1591 # At the moment we don't do stream clones over bundle2. If that is
1592 # At the moment we don't do stream clones over bundle2. If that is
1592 # implemented then here's where the check for that will go.
1593 # implemented then here's where the check for that will go.
1593 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1594 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1594
1595
1595 # declare pull perimeters
1596 # declare pull perimeters
1596 kwargs['common'] = pullop.common
1597 kwargs['common'] = pullop.common
1597 kwargs['heads'] = pullop.heads or pullop.rheads
1598 kwargs['heads'] = pullop.heads or pullop.rheads
1598
1599
1599 if streaming:
1600 if streaming:
1600 kwargs['cg'] = False
1601 kwargs['cg'] = False
1601 kwargs['stream'] = True
1602 kwargs['stream'] = True
1602 pullop.stepsdone.add('changegroup')
1603 pullop.stepsdone.add('changegroup')
1603 pullop.stepsdone.add('phases')
1604 pullop.stepsdone.add('phases')
1604
1605
1605 else:
1606 else:
1606 # pulling changegroup
1607 # pulling changegroup
1607 pullop.stepsdone.add('changegroup')
1608 pullop.stepsdone.add('changegroup')
1608
1609
1609 kwargs['cg'] = pullop.fetch
1610 kwargs['cg'] = pullop.fetch
1610
1611
1611 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1612 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1612 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1613 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1613 if (not legacyphase and hasbinaryphase):
1614 if (not legacyphase and hasbinaryphase):
1614 kwargs['phases'] = True
1615 kwargs['phases'] = True
1615 pullop.stepsdone.add('phases')
1616 pullop.stepsdone.add('phases')
1616
1617
1617 if 'listkeys' in pullop.remotebundle2caps:
1618 if 'listkeys' in pullop.remotebundle2caps:
1618 if 'phases' not in pullop.stepsdone:
1619 if 'phases' not in pullop.stepsdone:
1619 kwargs['listkeys'] = ['phases']
1620 kwargs['listkeys'] = ['phases']
1620
1621
1621 bookmarksrequested = False
1622 bookmarksrequested = False
1622 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1623 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1623 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1624 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1624
1625
1625 if pullop.remotebookmarks is not None:
1626 if pullop.remotebookmarks is not None:
1626 pullop.stepsdone.add('request-bookmarks')
1627 pullop.stepsdone.add('request-bookmarks')
1627
1628
1628 if ('request-bookmarks' not in pullop.stepsdone
1629 if ('request-bookmarks' not in pullop.stepsdone
1629 and pullop.remotebookmarks is None
1630 and pullop.remotebookmarks is None
1630 and not legacybookmark and hasbinarybook):
1631 and not legacybookmark and hasbinarybook):
1631 kwargs['bookmarks'] = True
1632 kwargs['bookmarks'] = True
1632 bookmarksrequested = True
1633 bookmarksrequested = True
1633
1634
1634 if 'listkeys' in pullop.remotebundle2caps:
1635 if 'listkeys' in pullop.remotebundle2caps:
1635 if 'request-bookmarks' not in pullop.stepsdone:
1636 if 'request-bookmarks' not in pullop.stepsdone:
1636 # make sure to always includes bookmark data when migrating
1637 # make sure to always includes bookmark data when migrating
1637 # `hg incoming --bundle` to using this function.
1638 # `hg incoming --bundle` to using this function.
1638 pullop.stepsdone.add('request-bookmarks')
1639 pullop.stepsdone.add('request-bookmarks')
1639 kwargs.setdefault('listkeys', []).append('bookmarks')
1640 kwargs.setdefault('listkeys', []).append('bookmarks')
1640
1641
1641 # If this is a full pull / clone and the server supports the clone bundles
1642 # If this is a full pull / clone and the server supports the clone bundles
1642 # feature, tell the server whether we attempted a clone bundle. The
1643 # feature, tell the server whether we attempted a clone bundle. The
1643 # presence of this flag indicates the client supports clone bundles. This
1644 # presence of this flag indicates the client supports clone bundles. This
1644 # will enable the server to treat clients that support clone bundles
1645 # will enable the server to treat clients that support clone bundles
1645 # differently from those that don't.
1646 # differently from those that don't.
1646 if (pullop.remote.capable('clonebundles')
1647 if (pullop.remote.capable('clonebundles')
1647 and pullop.heads is None and list(pullop.common) == [nullid]):
1648 and pullop.heads is None and list(pullop.common) == [nullid]):
1648 kwargs['cbattempted'] = pullop.clonebundleattempted
1649 kwargs['cbattempted'] = pullop.clonebundleattempted
1649
1650
1650 if streaming:
1651 if streaming:
1651 pullop.repo.ui.status(_('streaming all changes\n'))
1652 pullop.repo.ui.status(_('streaming all changes\n'))
1652 elif not pullop.fetch:
1653 elif not pullop.fetch:
1653 pullop.repo.ui.status(_("no changes found\n"))
1654 pullop.repo.ui.status(_("no changes found\n"))
1654 pullop.cgresult = 0
1655 pullop.cgresult = 0
1655 else:
1656 else:
1656 if pullop.heads is None and list(pullop.common) == [nullid]:
1657 if pullop.heads is None and list(pullop.common) == [nullid]:
1657 pullop.repo.ui.status(_("requesting all changes\n"))
1658 pullop.repo.ui.status(_("requesting all changes\n"))
1658 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1659 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1660 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1660 if obsolete.commonversion(remoteversions) is not None:
1661 if obsolete.commonversion(remoteversions) is not None:
1661 kwargs['obsmarkers'] = True
1662 kwargs['obsmarkers'] = True
1662 pullop.stepsdone.add('obsmarkers')
1663 pullop.stepsdone.add('obsmarkers')
1663 _pullbundle2extraprepare(pullop, kwargs)
1664 _pullbundle2extraprepare(pullop, kwargs)
1664
1665
1665 with pullop.remote.commandexecutor() as e:
1666 with pullop.remote.commandexecutor() as e:
1666 args = dict(kwargs)
1667 args = dict(kwargs)
1667 args['source'] = 'pull'
1668 args['source'] = 'pull'
1668 bundle = e.callcommand('getbundle', args).result()
1669 bundle = e.callcommand('getbundle', args).result()
1669
1670
1670 try:
1671 try:
1671 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1672 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1672 source='pull')
1673 source='pull')
1673 op.modes['bookmarks'] = 'records'
1674 op.modes['bookmarks'] = 'records'
1674 bundle2.processbundle(pullop.repo, bundle, op=op)
1675 bundle2.processbundle(pullop.repo, bundle, op=op)
1675 except bundle2.AbortFromPart as exc:
1676 except bundle2.AbortFromPart as exc:
1676 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1677 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1677 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1678 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1678 except error.BundleValueError as exc:
1679 except error.BundleValueError as exc:
1679 raise error.Abort(_('missing support for %s') % exc)
1680 raise error.Abort(_('missing support for %s') % exc)
1680
1681
1681 if pullop.fetch:
1682 if pullop.fetch:
1682 pullop.cgresult = bundle2.combinechangegroupresults(op)
1683 pullop.cgresult = bundle2.combinechangegroupresults(op)
1683
1684
1684 # processing phases change
1685 # processing phases change
1685 for namespace, value in op.records['listkeys']:
1686 for namespace, value in op.records['listkeys']:
1686 if namespace == 'phases':
1687 if namespace == 'phases':
1687 _pullapplyphases(pullop, value)
1688 _pullapplyphases(pullop, value)
1688
1689
1689 # processing bookmark update
1690 # processing bookmark update
1690 if bookmarksrequested:
1691 if bookmarksrequested:
1691 books = {}
1692 books = {}
1692 for record in op.records['bookmarks']:
1693 for record in op.records['bookmarks']:
1693 books[record['bookmark']] = record["node"]
1694 books[record['bookmark']] = record["node"]
1694 pullop.remotebookmarks = books
1695 pullop.remotebookmarks = books
1695 else:
1696 else:
1696 for namespace, value in op.records['listkeys']:
1697 for namespace, value in op.records['listkeys']:
1697 if namespace == 'bookmarks':
1698 if namespace == 'bookmarks':
1698 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1699 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1699
1700
1700 # bookmark data were either already there or pulled in the bundle
1701 # bookmark data were either already there or pulled in the bundle
1701 if pullop.remotebookmarks is not None:
1702 if pullop.remotebookmarks is not None:
1702 _pullbookmarks(pullop)
1703 _pullbookmarks(pullop)
1703
1704
1704 def _pullbundle2extraprepare(pullop, kwargs):
1705 def _pullbundle2extraprepare(pullop, kwargs):
1705 """hook function so that extensions can extend the getbundle call"""
1706 """hook function so that extensions can extend the getbundle call"""
1706
1707
1707 def _pullchangeset(pullop):
1708 def _pullchangeset(pullop):
1708 """pull changeset from unbundle into the local repo"""
1709 """pull changeset from unbundle into the local repo"""
1709 # We delay the open of the transaction as late as possible so we
1710 # We delay the open of the transaction as late as possible so we
1710 # don't open transaction for nothing or you break future useful
1711 # don't open transaction for nothing or you break future useful
1711 # rollback call
1712 # rollback call
1712 if 'changegroup' in pullop.stepsdone:
1713 if 'changegroup' in pullop.stepsdone:
1713 return
1714 return
1714 pullop.stepsdone.add('changegroup')
1715 pullop.stepsdone.add('changegroup')
1715 if not pullop.fetch:
1716 if not pullop.fetch:
1716 pullop.repo.ui.status(_("no changes found\n"))
1717 pullop.repo.ui.status(_("no changes found\n"))
1717 pullop.cgresult = 0
1718 pullop.cgresult = 0
1718 return
1719 return
1719 tr = pullop.gettransaction()
1720 tr = pullop.gettransaction()
1720 if pullop.heads is None and list(pullop.common) == [nullid]:
1721 if pullop.heads is None and list(pullop.common) == [nullid]:
1721 pullop.repo.ui.status(_("requesting all changes\n"))
1722 pullop.repo.ui.status(_("requesting all changes\n"))
1722 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1723 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1723 # issue1320, avoid a race if remote changed after discovery
1724 # issue1320, avoid a race if remote changed after discovery
1724 pullop.heads = pullop.rheads
1725 pullop.heads = pullop.rheads
1725
1726
1726 if pullop.remote.capable('getbundle'):
1727 if pullop.remote.capable('getbundle'):
1727 # TODO: get bundlecaps from remote
1728 # TODO: get bundlecaps from remote
1728 cg = pullop.remote.getbundle('pull', common=pullop.common,
1729 cg = pullop.remote.getbundle('pull', common=pullop.common,
1729 heads=pullop.heads or pullop.rheads)
1730 heads=pullop.heads or pullop.rheads)
1730 elif pullop.heads is None:
1731 elif pullop.heads is None:
1731 with pullop.remote.commandexecutor() as e:
1732 with pullop.remote.commandexecutor() as e:
1732 cg = e.callcommand('changegroup', {
1733 cg = e.callcommand('changegroup', {
1733 'nodes': pullop.fetch,
1734 'nodes': pullop.fetch,
1734 'source': 'pull',
1735 'source': 'pull',
1735 }).result()
1736 }).result()
1736
1737
1737 elif not pullop.remote.capable('changegroupsubset'):
1738 elif not pullop.remote.capable('changegroupsubset'):
1738 raise error.Abort(_("partial pull cannot be done because "
1739 raise error.Abort(_("partial pull cannot be done because "
1739 "other repository doesn't support "
1740 "other repository doesn't support "
1740 "changegroupsubset."))
1741 "changegroupsubset."))
1741 else:
1742 else:
1742 with pullop.remote.commandexecutor() as e:
1743 with pullop.remote.commandexecutor() as e:
1743 cg = e.callcommand('changegroupsubset', {
1744 cg = e.callcommand('changegroupsubset', {
1744 'bases': pullop.fetch,
1745 'bases': pullop.fetch,
1745 'heads': pullop.heads,
1746 'heads': pullop.heads,
1746 'source': 'pull',
1747 'source': 'pull',
1747 }).result()
1748 }).result()
1748
1749
1749 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1750 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1750 pullop.remote.url())
1751 pullop.remote.url())
1751 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1752 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1752
1753
1753 def _pullphase(pullop):
1754 def _pullphase(pullop):
1754 # Get remote phases data from remote
1755 # Get remote phases data from remote
1755 if 'phases' in pullop.stepsdone:
1756 if 'phases' in pullop.stepsdone:
1756 return
1757 return
1757 remotephases = listkeys(pullop.remote, 'phases')
1758 remotephases = listkeys(pullop.remote, 'phases')
1758 _pullapplyphases(pullop, remotephases)
1759 _pullapplyphases(pullop, remotephases)
1759
1760
1760 def _pullapplyphases(pullop, remotephases):
1761 def _pullapplyphases(pullop, remotephases):
1761 """apply phase movement from observed remote state"""
1762 """apply phase movement from observed remote state"""
1762 if 'phases' in pullop.stepsdone:
1763 if 'phases' in pullop.stepsdone:
1763 return
1764 return
1764 pullop.stepsdone.add('phases')
1765 pullop.stepsdone.add('phases')
1765 publishing = bool(remotephases.get('publishing', False))
1766 publishing = bool(remotephases.get('publishing', False))
1766 if remotephases and not publishing:
1767 if remotephases and not publishing:
1767 # remote is new and non-publishing
1768 # remote is new and non-publishing
1768 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1769 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1769 pullop.pulledsubset,
1770 pullop.pulledsubset,
1770 remotephases)
1771 remotephases)
1771 dheads = pullop.pulledsubset
1772 dheads = pullop.pulledsubset
1772 else:
1773 else:
1773 # Remote is old or publishing all common changesets
1774 # Remote is old or publishing all common changesets
1774 # should be seen as public
1775 # should be seen as public
1775 pheads = pullop.pulledsubset
1776 pheads = pullop.pulledsubset
1776 dheads = []
1777 dheads = []
1777 unfi = pullop.repo.unfiltered()
1778 unfi = pullop.repo.unfiltered()
1778 phase = unfi._phasecache.phase
1779 phase = unfi._phasecache.phase
1779 rev = unfi.changelog.nodemap.get
1780 rev = unfi.changelog.nodemap.get
1780 public = phases.public
1781 public = phases.public
1781 draft = phases.draft
1782 draft = phases.draft
1782
1783
1783 # exclude changesets already public locally and update the others
1784 # exclude changesets already public locally and update the others
1784 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1785 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1785 if pheads:
1786 if pheads:
1786 tr = pullop.gettransaction()
1787 tr = pullop.gettransaction()
1787 phases.advanceboundary(pullop.repo, tr, public, pheads)
1788 phases.advanceboundary(pullop.repo, tr, public, pheads)
1788
1789
1789 # exclude changesets already draft locally and update the others
1790 # exclude changesets already draft locally and update the others
1790 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1791 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1791 if dheads:
1792 if dheads:
1792 tr = pullop.gettransaction()
1793 tr = pullop.gettransaction()
1793 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1794 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1794
1795
1795 def _pullbookmarks(pullop):
1796 def _pullbookmarks(pullop):
1796 """process the remote bookmark information to update the local one"""
1797 """process the remote bookmark information to update the local one"""
1797 if 'bookmarks' in pullop.stepsdone:
1798 if 'bookmarks' in pullop.stepsdone:
1798 return
1799 return
1799 pullop.stepsdone.add('bookmarks')
1800 pullop.stepsdone.add('bookmarks')
1800 repo = pullop.repo
1801 repo = pullop.repo
1801 remotebookmarks = pullop.remotebookmarks
1802 remotebookmarks = pullop.remotebookmarks
1802 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1803 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1803 pullop.remote.url(),
1804 pullop.remote.url(),
1804 pullop.gettransaction,
1805 pullop.gettransaction,
1805 explicit=pullop.explicitbookmarks)
1806 explicit=pullop.explicitbookmarks)
1806
1807
1807 def _pullobsolete(pullop):
1808 def _pullobsolete(pullop):
1808 """utility function to pull obsolete markers from a remote
1809 """utility function to pull obsolete markers from a remote
1809
1810
1810 The `gettransaction` is function that return the pull transaction, creating
1811 The `gettransaction` is function that return the pull transaction, creating
1811 one if necessary. We return the transaction to inform the calling code that
1812 one if necessary. We return the transaction to inform the calling code that
1812 a new transaction have been created (when applicable).
1813 a new transaction have been created (when applicable).
1813
1814
1814 Exists mostly to allow overriding for experimentation purpose"""
1815 Exists mostly to allow overriding for experimentation purpose"""
1815 if 'obsmarkers' in pullop.stepsdone:
1816 if 'obsmarkers' in pullop.stepsdone:
1816 return
1817 return
1817 pullop.stepsdone.add('obsmarkers')
1818 pullop.stepsdone.add('obsmarkers')
1818 tr = None
1819 tr = None
1819 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1820 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1820 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1821 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1821 remoteobs = listkeys(pullop.remote, 'obsolete')
1822 remoteobs = listkeys(pullop.remote, 'obsolete')
1822 if 'dump0' in remoteobs:
1823 if 'dump0' in remoteobs:
1823 tr = pullop.gettransaction()
1824 tr = pullop.gettransaction()
1824 markers = []
1825 markers = []
1825 for key in sorted(remoteobs, reverse=True):
1826 for key in sorted(remoteobs, reverse=True):
1826 if key.startswith('dump'):
1827 if key.startswith('dump'):
1827 data = util.b85decode(remoteobs[key])
1828 data = util.b85decode(remoteobs[key])
1828 version, newmarks = obsolete._readmarkers(data)
1829 version, newmarks = obsolete._readmarkers(data)
1829 markers += newmarks
1830 markers += newmarks
1830 if markers:
1831 if markers:
1831 pullop.repo.obsstore.add(tr, markers)
1832 pullop.repo.obsstore.add(tr, markers)
1832 pullop.repo.invalidatevolatilesets()
1833 pullop.repo.invalidatevolatilesets()
1833 return tr
1834 return tr
1834
1835
1836 def applynarrowacl(repo, kwargs):
1837 """Apply narrow fetch access control.
1838
1839 This massages the named arguments for getbundle wire protocol commands
1840 so requested data is filtered through access control rules.
1841 """
1842 ui = repo.ui
1843 # TODO this assumes existence of HTTP and is a layering violation.
1844 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1845 user_includes = ui.configlist(
1846 _NARROWACL_SECTION, username + '.includes',
1847 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1848 user_excludes = ui.configlist(
1849 _NARROWACL_SECTION, username + '.excludes',
1850 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1851 if not user_includes:
1852 raise error.Abort(_("{} configuration for user {} is empty")
1853 .format(_NARROWACL_SECTION, username))
1854
1855 user_includes = [
1856 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1857 user_excludes = [
1858 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1859
1860 req_includes = set(kwargs.get(r'includepats', []))
1861 req_excludes = set(kwargs.get(r'excludepats', []))
1862
1863 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1864 req_includes, req_excludes, user_includes, user_excludes)
1865
1866 if invalid_includes:
1867 raise error.Abort(
1868 _("The following includes are not accessible for {}: {}")
1869 .format(username, invalid_includes))
1870
1871 new_args = {}
1872 new_args.update(kwargs)
1873 new_args['includepats'] = req_includes
1874 if req_excludes:
1875 new_args['excludepats'] = req_excludes
1876 return new_args
1877
1835 def caps20to10(repo, role):
1878 def caps20to10(repo, role):
1836 """return a set with appropriate options to use bundle20 during getbundle"""
1879 """return a set with appropriate options to use bundle20 during getbundle"""
1837 caps = {'HG20'}
1880 caps = {'HG20'}
1838 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1881 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1839 caps.add('bundle2=' + urlreq.quote(capsblob))
1882 caps.add('bundle2=' + urlreq.quote(capsblob))
1840 return caps
1883 return caps
1841
1884
1842 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1885 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1843 getbundle2partsorder = []
1886 getbundle2partsorder = []
1844
1887
1845 # Mapping between step name and function
1888 # Mapping between step name and function
1846 #
1889 #
1847 # This exists to help extensions wrap steps if necessary
1890 # This exists to help extensions wrap steps if necessary
1848 getbundle2partsmapping = {}
1891 getbundle2partsmapping = {}
1849
1892
1850 def getbundle2partsgenerator(stepname, idx=None):
1893 def getbundle2partsgenerator(stepname, idx=None):
1851 """decorator for function generating bundle2 part for getbundle
1894 """decorator for function generating bundle2 part for getbundle
1852
1895
1853 The function is added to the step -> function mapping and appended to the
1896 The function is added to the step -> function mapping and appended to the
1854 list of steps. Beware that decorated functions will be added in order
1897 list of steps. Beware that decorated functions will be added in order
1855 (this may matter).
1898 (this may matter).
1856
1899
1857 You can only use this decorator for new steps, if you want to wrap a step
1900 You can only use this decorator for new steps, if you want to wrap a step
1858 from an extension, attack the getbundle2partsmapping dictionary directly."""
1901 from an extension, attack the getbundle2partsmapping dictionary directly."""
1859 def dec(func):
1902 def dec(func):
1860 assert stepname not in getbundle2partsmapping
1903 assert stepname not in getbundle2partsmapping
1861 getbundle2partsmapping[stepname] = func
1904 getbundle2partsmapping[stepname] = func
1862 if idx is None:
1905 if idx is None:
1863 getbundle2partsorder.append(stepname)
1906 getbundle2partsorder.append(stepname)
1864 else:
1907 else:
1865 getbundle2partsorder.insert(idx, stepname)
1908 getbundle2partsorder.insert(idx, stepname)
1866 return func
1909 return func
1867 return dec
1910 return dec
1868
1911
1869 def bundle2requested(bundlecaps):
1912 def bundle2requested(bundlecaps):
1870 if bundlecaps is not None:
1913 if bundlecaps is not None:
1871 return any(cap.startswith('HG2') for cap in bundlecaps)
1914 return any(cap.startswith('HG2') for cap in bundlecaps)
1872 return False
1915 return False
1873
1916
1874 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1917 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1875 **kwargs):
1918 **kwargs):
1876 """Return chunks constituting a bundle's raw data.
1919 """Return chunks constituting a bundle's raw data.
1877
1920
1878 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1921 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1879 passed.
1922 passed.
1880
1923
1881 Returns a 2-tuple of a dict with metadata about the generated bundle
1924 Returns a 2-tuple of a dict with metadata about the generated bundle
1882 and an iterator over raw chunks (of varying sizes).
1925 and an iterator over raw chunks (of varying sizes).
1883 """
1926 """
1884 kwargs = pycompat.byteskwargs(kwargs)
1927 kwargs = pycompat.byteskwargs(kwargs)
1885 info = {}
1928 info = {}
1886 usebundle2 = bundle2requested(bundlecaps)
1929 usebundle2 = bundle2requested(bundlecaps)
1887 # bundle10 case
1930 # bundle10 case
1888 if not usebundle2:
1931 if not usebundle2:
1889 if bundlecaps and not kwargs.get('cg', True):
1932 if bundlecaps and not kwargs.get('cg', True):
1890 raise ValueError(_('request for bundle10 must include changegroup'))
1933 raise ValueError(_('request for bundle10 must include changegroup'))
1891
1934
1892 if kwargs:
1935 if kwargs:
1893 raise ValueError(_('unsupported getbundle arguments: %s')
1936 raise ValueError(_('unsupported getbundle arguments: %s')
1894 % ', '.join(sorted(kwargs.keys())))
1937 % ', '.join(sorted(kwargs.keys())))
1895 outgoing = _computeoutgoing(repo, heads, common)
1938 outgoing = _computeoutgoing(repo, heads, common)
1896 info['bundleversion'] = 1
1939 info['bundleversion'] = 1
1897 return info, changegroup.makestream(repo, outgoing, '01', source,
1940 return info, changegroup.makestream(repo, outgoing, '01', source,
1898 bundlecaps=bundlecaps)
1941 bundlecaps=bundlecaps)
1899
1942
1900 # bundle20 case
1943 # bundle20 case
1901 info['bundleversion'] = 2
1944 info['bundleversion'] = 2
1902 b2caps = {}
1945 b2caps = {}
1903 for bcaps in bundlecaps:
1946 for bcaps in bundlecaps:
1904 if bcaps.startswith('bundle2='):
1947 if bcaps.startswith('bundle2='):
1905 blob = urlreq.unquote(bcaps[len('bundle2='):])
1948 blob = urlreq.unquote(bcaps[len('bundle2='):])
1906 b2caps.update(bundle2.decodecaps(blob))
1949 b2caps.update(bundle2.decodecaps(blob))
1907 bundler = bundle2.bundle20(repo.ui, b2caps)
1950 bundler = bundle2.bundle20(repo.ui, b2caps)
1908
1951
1909 kwargs['heads'] = heads
1952 kwargs['heads'] = heads
1910 kwargs['common'] = common
1953 kwargs['common'] = common
1911
1954
1912 for name in getbundle2partsorder:
1955 for name in getbundle2partsorder:
1913 func = getbundle2partsmapping[name]
1956 func = getbundle2partsmapping[name]
1914 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1957 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1915 **pycompat.strkwargs(kwargs))
1958 **pycompat.strkwargs(kwargs))
1916
1959
1917 info['prefercompressed'] = bundler.prefercompressed
1960 info['prefercompressed'] = bundler.prefercompressed
1918
1961
1919 return info, bundler.getchunks()
1962 return info, bundler.getchunks()
1920
1963
1921 @getbundle2partsgenerator('stream2')
1964 @getbundle2partsgenerator('stream2')
1922 def _getbundlestream2(bundler, repo, *args, **kwargs):
1965 def _getbundlestream2(bundler, repo, *args, **kwargs):
1923 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1966 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1924
1967
1925 @getbundle2partsgenerator('changegroup')
1968 @getbundle2partsgenerator('changegroup')
1926 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1969 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1927 b2caps=None, heads=None, common=None, **kwargs):
1970 b2caps=None, heads=None, common=None, **kwargs):
1928 """add a changegroup part to the requested bundle"""
1971 """add a changegroup part to the requested bundle"""
1929 cgstream = None
1972 cgstream = None
1930 if kwargs.get(r'cg', True):
1973 if kwargs.get(r'cg', True):
1931 # build changegroup bundle here.
1974 # build changegroup bundle here.
1932 version = '01'
1975 version = '01'
1933 cgversions = b2caps.get('changegroup')
1976 cgversions = b2caps.get('changegroup')
1934 if cgversions: # 3.1 and 3.2 ship with an empty value
1977 if cgversions: # 3.1 and 3.2 ship with an empty value
1935 cgversions = [v for v in cgversions
1978 cgversions = [v for v in cgversions
1936 if v in changegroup.supportedoutgoingversions(repo)]
1979 if v in changegroup.supportedoutgoingversions(repo)]
1937 if not cgversions:
1980 if not cgversions:
1938 raise ValueError(_('no common changegroup version'))
1981 raise ValueError(_('no common changegroup version'))
1939 version = max(cgversions)
1982 version = max(cgversions)
1940 outgoing = _computeoutgoing(repo, heads, common)
1983 outgoing = _computeoutgoing(repo, heads, common)
1941 if outgoing.missing:
1984 if outgoing.missing:
1942 cgstream = changegroup.makestream(repo, outgoing, version, source,
1985 cgstream = changegroup.makestream(repo, outgoing, version, source,
1943 bundlecaps=bundlecaps)
1986 bundlecaps=bundlecaps)
1944
1987
1945 if cgstream:
1988 if cgstream:
1946 part = bundler.newpart('changegroup', data=cgstream)
1989 part = bundler.newpart('changegroup', data=cgstream)
1947 if cgversions:
1990 if cgversions:
1948 part.addparam('version', version)
1991 part.addparam('version', version)
1949 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1992 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1950 mandatory=False)
1993 mandatory=False)
1951 if 'treemanifest' in repo.requirements:
1994 if 'treemanifest' in repo.requirements:
1952 part.addparam('treemanifest', '1')
1995 part.addparam('treemanifest', '1')
1953
1996
1954 @getbundle2partsgenerator('bookmarks')
1997 @getbundle2partsgenerator('bookmarks')
1955 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1998 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1956 b2caps=None, **kwargs):
1999 b2caps=None, **kwargs):
1957 """add a bookmark part to the requested bundle"""
2000 """add a bookmark part to the requested bundle"""
1958 if not kwargs.get(r'bookmarks', False):
2001 if not kwargs.get(r'bookmarks', False):
1959 return
2002 return
1960 if 'bookmarks' not in b2caps:
2003 if 'bookmarks' not in b2caps:
1961 raise ValueError(_('no common bookmarks exchange method'))
2004 raise ValueError(_('no common bookmarks exchange method'))
1962 books = bookmod.listbinbookmarks(repo)
2005 books = bookmod.listbinbookmarks(repo)
1963 data = bookmod.binaryencode(books)
2006 data = bookmod.binaryencode(books)
1964 if data:
2007 if data:
1965 bundler.newpart('bookmarks', data=data)
2008 bundler.newpart('bookmarks', data=data)
1966
2009
1967 @getbundle2partsgenerator('listkeys')
2010 @getbundle2partsgenerator('listkeys')
1968 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2011 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1969 b2caps=None, **kwargs):
2012 b2caps=None, **kwargs):
1970 """add parts containing listkeys namespaces to the requested bundle"""
2013 """add parts containing listkeys namespaces to the requested bundle"""
1971 listkeys = kwargs.get(r'listkeys', ())
2014 listkeys = kwargs.get(r'listkeys', ())
1972 for namespace in listkeys:
2015 for namespace in listkeys:
1973 part = bundler.newpart('listkeys')
2016 part = bundler.newpart('listkeys')
1974 part.addparam('namespace', namespace)
2017 part.addparam('namespace', namespace)
1975 keys = repo.listkeys(namespace).items()
2018 keys = repo.listkeys(namespace).items()
1976 part.data = pushkey.encodekeys(keys)
2019 part.data = pushkey.encodekeys(keys)
1977
2020
1978 @getbundle2partsgenerator('obsmarkers')
2021 @getbundle2partsgenerator('obsmarkers')
1979 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2022 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1980 b2caps=None, heads=None, **kwargs):
2023 b2caps=None, heads=None, **kwargs):
1981 """add an obsolescence markers part to the requested bundle"""
2024 """add an obsolescence markers part to the requested bundle"""
1982 if kwargs.get(r'obsmarkers', False):
2025 if kwargs.get(r'obsmarkers', False):
1983 if heads is None:
2026 if heads is None:
1984 heads = repo.heads()
2027 heads = repo.heads()
1985 subset = [c.node() for c in repo.set('::%ln', heads)]
2028 subset = [c.node() for c in repo.set('::%ln', heads)]
1986 markers = repo.obsstore.relevantmarkers(subset)
2029 markers = repo.obsstore.relevantmarkers(subset)
1987 markers = sorted(markers)
2030 markers = sorted(markers)
1988 bundle2.buildobsmarkerspart(bundler, markers)
2031 bundle2.buildobsmarkerspart(bundler, markers)
1989
2032
1990 @getbundle2partsgenerator('phases')
2033 @getbundle2partsgenerator('phases')
1991 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2034 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1992 b2caps=None, heads=None, **kwargs):
2035 b2caps=None, heads=None, **kwargs):
1993 """add phase heads part to the requested bundle"""
2036 """add phase heads part to the requested bundle"""
1994 if kwargs.get(r'phases', False):
2037 if kwargs.get(r'phases', False):
1995 if not 'heads' in b2caps.get('phases'):
2038 if not 'heads' in b2caps.get('phases'):
1996 raise ValueError(_('no common phases exchange method'))
2039 raise ValueError(_('no common phases exchange method'))
1997 if heads is None:
2040 if heads is None:
1998 heads = repo.heads()
2041 heads = repo.heads()
1999
2042
2000 headsbyphase = collections.defaultdict(set)
2043 headsbyphase = collections.defaultdict(set)
2001 if repo.publishing():
2044 if repo.publishing():
2002 headsbyphase[phases.public] = heads
2045 headsbyphase[phases.public] = heads
2003 else:
2046 else:
2004 # find the appropriate heads to move
2047 # find the appropriate heads to move
2005
2048
2006 phase = repo._phasecache.phase
2049 phase = repo._phasecache.phase
2007 node = repo.changelog.node
2050 node = repo.changelog.node
2008 rev = repo.changelog.rev
2051 rev = repo.changelog.rev
2009 for h in heads:
2052 for h in heads:
2010 headsbyphase[phase(repo, rev(h))].add(h)
2053 headsbyphase[phase(repo, rev(h))].add(h)
2011 seenphases = list(headsbyphase.keys())
2054 seenphases = list(headsbyphase.keys())
2012
2055
2013 # We do not handle anything but public and draft phase for now)
2056 # We do not handle anything but public and draft phase for now)
2014 if seenphases:
2057 if seenphases:
2015 assert max(seenphases) <= phases.draft
2058 assert max(seenphases) <= phases.draft
2016
2059
2017 # if client is pulling non-public changesets, we need to find
2060 # if client is pulling non-public changesets, we need to find
2018 # intermediate public heads.
2061 # intermediate public heads.
2019 draftheads = headsbyphase.get(phases.draft, set())
2062 draftheads = headsbyphase.get(phases.draft, set())
2020 if draftheads:
2063 if draftheads:
2021 publicheads = headsbyphase.get(phases.public, set())
2064 publicheads = headsbyphase.get(phases.public, set())
2022
2065
2023 revset = 'heads(only(%ln, %ln) and public())'
2066 revset = 'heads(only(%ln, %ln) and public())'
2024 extraheads = repo.revs(revset, draftheads, publicheads)
2067 extraheads = repo.revs(revset, draftheads, publicheads)
2025 for r in extraheads:
2068 for r in extraheads:
2026 headsbyphase[phases.public].add(node(r))
2069 headsbyphase[phases.public].add(node(r))
2027
2070
2028 # transform data in a format used by the encoding function
2071 # transform data in a format used by the encoding function
2029 phasemapping = []
2072 phasemapping = []
2030 for phase in phases.allphases:
2073 for phase in phases.allphases:
2031 phasemapping.append(sorted(headsbyphase[phase]))
2074 phasemapping.append(sorted(headsbyphase[phase]))
2032
2075
2033 # generate the actual part
2076 # generate the actual part
2034 phasedata = phases.binaryencode(phasemapping)
2077 phasedata = phases.binaryencode(phasemapping)
2035 bundler.newpart('phase-heads', data=phasedata)
2078 bundler.newpart('phase-heads', data=phasedata)
2036
2079
2037 @getbundle2partsgenerator('hgtagsfnodes')
2080 @getbundle2partsgenerator('hgtagsfnodes')
2038 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2081 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2039 b2caps=None, heads=None, common=None,
2082 b2caps=None, heads=None, common=None,
2040 **kwargs):
2083 **kwargs):
2041 """Transfer the .hgtags filenodes mapping.
2084 """Transfer the .hgtags filenodes mapping.
2042
2085
2043 Only values for heads in this bundle will be transferred.
2086 Only values for heads in this bundle will be transferred.
2044
2087
2045 The part data consists of pairs of 20 byte changeset node and .hgtags
2088 The part data consists of pairs of 20 byte changeset node and .hgtags
2046 filenodes raw values.
2089 filenodes raw values.
2047 """
2090 """
2048 # Don't send unless:
2091 # Don't send unless:
2049 # - changeset are being exchanged,
2092 # - changeset are being exchanged,
2050 # - the client supports it.
2093 # - the client supports it.
2051 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2094 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2052 return
2095 return
2053
2096
2054 outgoing = _computeoutgoing(repo, heads, common)
2097 outgoing = _computeoutgoing(repo, heads, common)
2055 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2098 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2056
2099
2057 @getbundle2partsgenerator('cache:rev-branch-cache')
2100 @getbundle2partsgenerator('cache:rev-branch-cache')
2058 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2101 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2059 b2caps=None, heads=None, common=None,
2102 b2caps=None, heads=None, common=None,
2060 **kwargs):
2103 **kwargs):
2061 """Transfer the rev-branch-cache mapping
2104 """Transfer the rev-branch-cache mapping
2062
2105
2063 The payload is a series of data related to each branch
2106 The payload is a series of data related to each branch
2064
2107
2065 1) branch name length
2108 1) branch name length
2066 2) number of open heads
2109 2) number of open heads
2067 3) number of closed heads
2110 3) number of closed heads
2068 4) open heads nodes
2111 4) open heads nodes
2069 5) closed heads nodes
2112 5) closed heads nodes
2070 """
2113 """
2071 # Don't send unless:
2114 # Don't send unless:
2072 # - changeset are being exchanged,
2115 # - changeset are being exchanged,
2073 # - the client supports it.
2116 # - the client supports it.
2074 # - narrow bundle isn't in play (not currently compatible).
2117 # - narrow bundle isn't in play (not currently compatible).
2075 if (not kwargs.get(r'cg', True)
2118 if (not kwargs.get(r'cg', True)
2076 or 'rev-branch-cache' not in b2caps
2119 or 'rev-branch-cache' not in b2caps
2077 or kwargs.get(r'narrow', False)
2120 or kwargs.get(r'narrow', False)
2078 or repo.ui.has_section(_NARROWACL_SECTION)):
2121 or repo.ui.has_section(_NARROWACL_SECTION)):
2079 return
2122 return
2080
2123
2081 outgoing = _computeoutgoing(repo, heads, common)
2124 outgoing = _computeoutgoing(repo, heads, common)
2082 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2125 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2083
2126
2084 def check_heads(repo, their_heads, context):
2127 def check_heads(repo, their_heads, context):
2085 """check if the heads of a repo have been modified
2128 """check if the heads of a repo have been modified
2086
2129
2087 Used by peer for unbundling.
2130 Used by peer for unbundling.
2088 """
2131 """
2089 heads = repo.heads()
2132 heads = repo.heads()
2090 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2133 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2091 if not (their_heads == ['force'] or their_heads == heads or
2134 if not (their_heads == ['force'] or their_heads == heads or
2092 their_heads == ['hashed', heads_hash]):
2135 their_heads == ['hashed', heads_hash]):
2093 # someone else committed/pushed/unbundled while we
2136 # someone else committed/pushed/unbundled while we
2094 # were transferring data
2137 # were transferring data
2095 raise error.PushRaced('repository changed while %s - '
2138 raise error.PushRaced('repository changed while %s - '
2096 'please try again' % context)
2139 'please try again' % context)
2097
2140
2098 def unbundle(repo, cg, heads, source, url):
2141 def unbundle(repo, cg, heads, source, url):
2099 """Apply a bundle to a repo.
2142 """Apply a bundle to a repo.
2100
2143
2101 this function makes sure the repo is locked during the application and have
2144 this function makes sure the repo is locked during the application and have
2102 mechanism to check that no push race occurred between the creation of the
2145 mechanism to check that no push race occurred between the creation of the
2103 bundle and its application.
2146 bundle and its application.
2104
2147
2105 If the push was raced as PushRaced exception is raised."""
2148 If the push was raced as PushRaced exception is raised."""
2106 r = 0
2149 r = 0
2107 # need a transaction when processing a bundle2 stream
2150 # need a transaction when processing a bundle2 stream
2108 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2151 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2109 lockandtr = [None, None, None]
2152 lockandtr = [None, None, None]
2110 recordout = None
2153 recordout = None
2111 # quick fix for output mismatch with bundle2 in 3.4
2154 # quick fix for output mismatch with bundle2 in 3.4
2112 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2155 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2113 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2156 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2114 captureoutput = True
2157 captureoutput = True
2115 try:
2158 try:
2116 # note: outside bundle1, 'heads' is expected to be empty and this
2159 # note: outside bundle1, 'heads' is expected to be empty and this
2117 # 'check_heads' call wil be a no-op
2160 # 'check_heads' call wil be a no-op
2118 check_heads(repo, heads, 'uploading changes')
2161 check_heads(repo, heads, 'uploading changes')
2119 # push can proceed
2162 # push can proceed
2120 if not isinstance(cg, bundle2.unbundle20):
2163 if not isinstance(cg, bundle2.unbundle20):
2121 # legacy case: bundle1 (changegroup 01)
2164 # legacy case: bundle1 (changegroup 01)
2122 txnname = "\n".join([source, util.hidepassword(url)])
2165 txnname = "\n".join([source, util.hidepassword(url)])
2123 with repo.lock(), repo.transaction(txnname) as tr:
2166 with repo.lock(), repo.transaction(txnname) as tr:
2124 op = bundle2.applybundle(repo, cg, tr, source, url)
2167 op = bundle2.applybundle(repo, cg, tr, source, url)
2125 r = bundle2.combinechangegroupresults(op)
2168 r = bundle2.combinechangegroupresults(op)
2126 else:
2169 else:
2127 r = None
2170 r = None
2128 try:
2171 try:
2129 def gettransaction():
2172 def gettransaction():
2130 if not lockandtr[2]:
2173 if not lockandtr[2]:
2131 lockandtr[0] = repo.wlock()
2174 lockandtr[0] = repo.wlock()
2132 lockandtr[1] = repo.lock()
2175 lockandtr[1] = repo.lock()
2133 lockandtr[2] = repo.transaction(source)
2176 lockandtr[2] = repo.transaction(source)
2134 lockandtr[2].hookargs['source'] = source
2177 lockandtr[2].hookargs['source'] = source
2135 lockandtr[2].hookargs['url'] = url
2178 lockandtr[2].hookargs['url'] = url
2136 lockandtr[2].hookargs['bundle2'] = '1'
2179 lockandtr[2].hookargs['bundle2'] = '1'
2137 return lockandtr[2]
2180 return lockandtr[2]
2138
2181
2139 # Do greedy locking by default until we're satisfied with lazy
2182 # Do greedy locking by default until we're satisfied with lazy
2140 # locking.
2183 # locking.
2141 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2184 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2142 gettransaction()
2185 gettransaction()
2143
2186
2144 op = bundle2.bundleoperation(repo, gettransaction,
2187 op = bundle2.bundleoperation(repo, gettransaction,
2145 captureoutput=captureoutput,
2188 captureoutput=captureoutput,
2146 source='push')
2189 source='push')
2147 try:
2190 try:
2148 op = bundle2.processbundle(repo, cg, op=op)
2191 op = bundle2.processbundle(repo, cg, op=op)
2149 finally:
2192 finally:
2150 r = op.reply
2193 r = op.reply
2151 if captureoutput and r is not None:
2194 if captureoutput and r is not None:
2152 repo.ui.pushbuffer(error=True, subproc=True)
2195 repo.ui.pushbuffer(error=True, subproc=True)
2153 def recordout(output):
2196 def recordout(output):
2154 r.newpart('output', data=output, mandatory=False)
2197 r.newpart('output', data=output, mandatory=False)
2155 if lockandtr[2] is not None:
2198 if lockandtr[2] is not None:
2156 lockandtr[2].close()
2199 lockandtr[2].close()
2157 except BaseException as exc:
2200 except BaseException as exc:
2158 exc.duringunbundle2 = True
2201 exc.duringunbundle2 = True
2159 if captureoutput and r is not None:
2202 if captureoutput and r is not None:
2160 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2203 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2161 def recordout(output):
2204 def recordout(output):
2162 part = bundle2.bundlepart('output', data=output,
2205 part = bundle2.bundlepart('output', data=output,
2163 mandatory=False)
2206 mandatory=False)
2164 parts.append(part)
2207 parts.append(part)
2165 raise
2208 raise
2166 finally:
2209 finally:
2167 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2210 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2168 if recordout is not None:
2211 if recordout is not None:
2169 recordout(repo.ui.popbuffer())
2212 recordout(repo.ui.popbuffer())
2170 return r
2213 return r
2171
2214
2172 def _maybeapplyclonebundle(pullop):
2215 def _maybeapplyclonebundle(pullop):
2173 """Apply a clone bundle from a remote, if possible."""
2216 """Apply a clone bundle from a remote, if possible."""
2174
2217
2175 repo = pullop.repo
2218 repo = pullop.repo
2176 remote = pullop.remote
2219 remote = pullop.remote
2177
2220
2178 if not repo.ui.configbool('ui', 'clonebundles'):
2221 if not repo.ui.configbool('ui', 'clonebundles'):
2179 return
2222 return
2180
2223
2181 # Only run if local repo is empty.
2224 # Only run if local repo is empty.
2182 if len(repo):
2225 if len(repo):
2183 return
2226 return
2184
2227
2185 if pullop.heads:
2228 if pullop.heads:
2186 return
2229 return
2187
2230
2188 if not remote.capable('clonebundles'):
2231 if not remote.capable('clonebundles'):
2189 return
2232 return
2190
2233
2191 with remote.commandexecutor() as e:
2234 with remote.commandexecutor() as e:
2192 res = e.callcommand('clonebundles', {}).result()
2235 res = e.callcommand('clonebundles', {}).result()
2193
2236
2194 # If we call the wire protocol command, that's good enough to record the
2237 # If we call the wire protocol command, that's good enough to record the
2195 # attempt.
2238 # attempt.
2196 pullop.clonebundleattempted = True
2239 pullop.clonebundleattempted = True
2197
2240
2198 entries = parseclonebundlesmanifest(repo, res)
2241 entries = parseclonebundlesmanifest(repo, res)
2199 if not entries:
2242 if not entries:
2200 repo.ui.note(_('no clone bundles available on remote; '
2243 repo.ui.note(_('no clone bundles available on remote; '
2201 'falling back to regular clone\n'))
2244 'falling back to regular clone\n'))
2202 return
2245 return
2203
2246
2204 entries = filterclonebundleentries(
2247 entries = filterclonebundleentries(
2205 repo, entries, streamclonerequested=pullop.streamclonerequested)
2248 repo, entries, streamclonerequested=pullop.streamclonerequested)
2206
2249
2207 if not entries:
2250 if not entries:
2208 # There is a thundering herd concern here. However, if a server
2251 # There is a thundering herd concern here. However, if a server
2209 # operator doesn't advertise bundles appropriate for its clients,
2252 # operator doesn't advertise bundles appropriate for its clients,
2210 # they deserve what's coming. Furthermore, from a client's
2253 # they deserve what's coming. Furthermore, from a client's
2211 # perspective, no automatic fallback would mean not being able to
2254 # perspective, no automatic fallback would mean not being able to
2212 # clone!
2255 # clone!
2213 repo.ui.warn(_('no compatible clone bundles available on server; '
2256 repo.ui.warn(_('no compatible clone bundles available on server; '
2214 'falling back to regular clone\n'))
2257 'falling back to regular clone\n'))
2215 repo.ui.warn(_('(you may want to report this to the server '
2258 repo.ui.warn(_('(you may want to report this to the server '
2216 'operator)\n'))
2259 'operator)\n'))
2217 return
2260 return
2218
2261
2219 entries = sortclonebundleentries(repo.ui, entries)
2262 entries = sortclonebundleentries(repo.ui, entries)
2220
2263
2221 url = entries[0]['URL']
2264 url = entries[0]['URL']
2222 repo.ui.status(_('applying clone bundle from %s\n') % url)
2265 repo.ui.status(_('applying clone bundle from %s\n') % url)
2223 if trypullbundlefromurl(repo.ui, repo, url):
2266 if trypullbundlefromurl(repo.ui, repo, url):
2224 repo.ui.status(_('finished applying clone bundle\n'))
2267 repo.ui.status(_('finished applying clone bundle\n'))
2225 # Bundle failed.
2268 # Bundle failed.
2226 #
2269 #
2227 # We abort by default to avoid the thundering herd of
2270 # We abort by default to avoid the thundering herd of
2228 # clients flooding a server that was expecting expensive
2271 # clients flooding a server that was expecting expensive
2229 # clone load to be offloaded.
2272 # clone load to be offloaded.
2230 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2273 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2231 repo.ui.warn(_('falling back to normal clone\n'))
2274 repo.ui.warn(_('falling back to normal clone\n'))
2232 else:
2275 else:
2233 raise error.Abort(_('error applying bundle'),
2276 raise error.Abort(_('error applying bundle'),
2234 hint=_('if this error persists, consider contacting '
2277 hint=_('if this error persists, consider contacting '
2235 'the server operator or disable clone '
2278 'the server operator or disable clone '
2236 'bundles via '
2279 'bundles via '
2237 '"--config ui.clonebundles=false"'))
2280 '"--config ui.clonebundles=false"'))
2238
2281
2239 def parseclonebundlesmanifest(repo, s):
2282 def parseclonebundlesmanifest(repo, s):
2240 """Parses the raw text of a clone bundles manifest.
2283 """Parses the raw text of a clone bundles manifest.
2241
2284
2242 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2285 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2243 to the URL and other keys are the attributes for the entry.
2286 to the URL and other keys are the attributes for the entry.
2244 """
2287 """
2245 m = []
2288 m = []
2246 for line in s.splitlines():
2289 for line in s.splitlines():
2247 fields = line.split()
2290 fields = line.split()
2248 if not fields:
2291 if not fields:
2249 continue
2292 continue
2250 attrs = {'URL': fields[0]}
2293 attrs = {'URL': fields[0]}
2251 for rawattr in fields[1:]:
2294 for rawattr in fields[1:]:
2252 key, value = rawattr.split('=', 1)
2295 key, value = rawattr.split('=', 1)
2253 key = urlreq.unquote(key)
2296 key = urlreq.unquote(key)
2254 value = urlreq.unquote(value)
2297 value = urlreq.unquote(value)
2255 attrs[key] = value
2298 attrs[key] = value
2256
2299
2257 # Parse BUNDLESPEC into components. This makes client-side
2300 # Parse BUNDLESPEC into components. This makes client-side
2258 # preferences easier to specify since you can prefer a single
2301 # preferences easier to specify since you can prefer a single
2259 # component of the BUNDLESPEC.
2302 # component of the BUNDLESPEC.
2260 if key == 'BUNDLESPEC':
2303 if key == 'BUNDLESPEC':
2261 try:
2304 try:
2262 bundlespec = parsebundlespec(repo, value)
2305 bundlespec = parsebundlespec(repo, value)
2263 attrs['COMPRESSION'] = bundlespec.compression
2306 attrs['COMPRESSION'] = bundlespec.compression
2264 attrs['VERSION'] = bundlespec.version
2307 attrs['VERSION'] = bundlespec.version
2265 except error.InvalidBundleSpecification:
2308 except error.InvalidBundleSpecification:
2266 pass
2309 pass
2267 except error.UnsupportedBundleSpecification:
2310 except error.UnsupportedBundleSpecification:
2268 pass
2311 pass
2269
2312
2270 m.append(attrs)
2313 m.append(attrs)
2271
2314
2272 return m
2315 return m
2273
2316
2274 def isstreamclonespec(bundlespec):
2317 def isstreamclonespec(bundlespec):
2275 # Stream clone v1
2318 # Stream clone v1
2276 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2319 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2277 return True
2320 return True
2278
2321
2279 # Stream clone v2
2322 # Stream clone v2
2280 if (bundlespec.wirecompression == 'UN' and \
2323 if (bundlespec.wirecompression == 'UN' and \
2281 bundlespec.wireversion == '02' and \
2324 bundlespec.wireversion == '02' and \
2282 bundlespec.contentopts.get('streamv2')):
2325 bundlespec.contentopts.get('streamv2')):
2283 return True
2326 return True
2284
2327
2285 return False
2328 return False
2286
2329
2287 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2330 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2288 """Remove incompatible clone bundle manifest entries.
2331 """Remove incompatible clone bundle manifest entries.
2289
2332
2290 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2333 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2291 and returns a new list consisting of only the entries that this client
2334 and returns a new list consisting of only the entries that this client
2292 should be able to apply.
2335 should be able to apply.
2293
2336
2294 There is no guarantee we'll be able to apply all returned entries because
2337 There is no guarantee we'll be able to apply all returned entries because
2295 the metadata we use to filter on may be missing or wrong.
2338 the metadata we use to filter on may be missing or wrong.
2296 """
2339 """
2297 newentries = []
2340 newentries = []
2298 for entry in entries:
2341 for entry in entries:
2299 spec = entry.get('BUNDLESPEC')
2342 spec = entry.get('BUNDLESPEC')
2300 if spec:
2343 if spec:
2301 try:
2344 try:
2302 bundlespec = parsebundlespec(repo, spec, strict=True)
2345 bundlespec = parsebundlespec(repo, spec, strict=True)
2303
2346
2304 # If a stream clone was requested, filter out non-streamclone
2347 # If a stream clone was requested, filter out non-streamclone
2305 # entries.
2348 # entries.
2306 if streamclonerequested and not isstreamclonespec(bundlespec):
2349 if streamclonerequested and not isstreamclonespec(bundlespec):
2307 repo.ui.debug('filtering %s because not a stream clone\n' %
2350 repo.ui.debug('filtering %s because not a stream clone\n' %
2308 entry['URL'])
2351 entry['URL'])
2309 continue
2352 continue
2310
2353
2311 except error.InvalidBundleSpecification as e:
2354 except error.InvalidBundleSpecification as e:
2312 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2355 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2313 continue
2356 continue
2314 except error.UnsupportedBundleSpecification as e:
2357 except error.UnsupportedBundleSpecification as e:
2315 repo.ui.debug('filtering %s because unsupported bundle '
2358 repo.ui.debug('filtering %s because unsupported bundle '
2316 'spec: %s\n' % (
2359 'spec: %s\n' % (
2317 entry['URL'], stringutil.forcebytestr(e)))
2360 entry['URL'], stringutil.forcebytestr(e)))
2318 continue
2361 continue
2319 # If we don't have a spec and requested a stream clone, we don't know
2362 # If we don't have a spec and requested a stream clone, we don't know
2320 # what the entry is so don't attempt to apply it.
2363 # what the entry is so don't attempt to apply it.
2321 elif streamclonerequested:
2364 elif streamclonerequested:
2322 repo.ui.debug('filtering %s because cannot determine if a stream '
2365 repo.ui.debug('filtering %s because cannot determine if a stream '
2323 'clone bundle\n' % entry['URL'])
2366 'clone bundle\n' % entry['URL'])
2324 continue
2367 continue
2325
2368
2326 if 'REQUIRESNI' in entry and not sslutil.hassni:
2369 if 'REQUIRESNI' in entry and not sslutil.hassni:
2327 repo.ui.debug('filtering %s because SNI not supported\n' %
2370 repo.ui.debug('filtering %s because SNI not supported\n' %
2328 entry['URL'])
2371 entry['URL'])
2329 continue
2372 continue
2330
2373
2331 newentries.append(entry)
2374 newentries.append(entry)
2332
2375
2333 return newentries
2376 return newentries
2334
2377
2335 class clonebundleentry(object):
2378 class clonebundleentry(object):
2336 """Represents an item in a clone bundles manifest.
2379 """Represents an item in a clone bundles manifest.
2337
2380
2338 This rich class is needed to support sorting since sorted() in Python 3
2381 This rich class is needed to support sorting since sorted() in Python 3
2339 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2382 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2340 won't work.
2383 won't work.
2341 """
2384 """
2342
2385
2343 def __init__(self, value, prefers):
2386 def __init__(self, value, prefers):
2344 self.value = value
2387 self.value = value
2345 self.prefers = prefers
2388 self.prefers = prefers
2346
2389
2347 def _cmp(self, other):
2390 def _cmp(self, other):
2348 for prefkey, prefvalue in self.prefers:
2391 for prefkey, prefvalue in self.prefers:
2349 avalue = self.value.get(prefkey)
2392 avalue = self.value.get(prefkey)
2350 bvalue = other.value.get(prefkey)
2393 bvalue = other.value.get(prefkey)
2351
2394
2352 # Special case for b missing attribute and a matches exactly.
2395 # Special case for b missing attribute and a matches exactly.
2353 if avalue is not None and bvalue is None and avalue == prefvalue:
2396 if avalue is not None and bvalue is None and avalue == prefvalue:
2354 return -1
2397 return -1
2355
2398
2356 # Special case for a missing attribute and b matches exactly.
2399 # Special case for a missing attribute and b matches exactly.
2357 if bvalue is not None and avalue is None and bvalue == prefvalue:
2400 if bvalue is not None and avalue is None and bvalue == prefvalue:
2358 return 1
2401 return 1
2359
2402
2360 # We can't compare unless attribute present on both.
2403 # We can't compare unless attribute present on both.
2361 if avalue is None or bvalue is None:
2404 if avalue is None or bvalue is None:
2362 continue
2405 continue
2363
2406
2364 # Same values should fall back to next attribute.
2407 # Same values should fall back to next attribute.
2365 if avalue == bvalue:
2408 if avalue == bvalue:
2366 continue
2409 continue
2367
2410
2368 # Exact matches come first.
2411 # Exact matches come first.
2369 if avalue == prefvalue:
2412 if avalue == prefvalue:
2370 return -1
2413 return -1
2371 if bvalue == prefvalue:
2414 if bvalue == prefvalue:
2372 return 1
2415 return 1
2373
2416
2374 # Fall back to next attribute.
2417 # Fall back to next attribute.
2375 continue
2418 continue
2376
2419
2377 # If we got here we couldn't sort by attributes and prefers. Fall
2420 # If we got here we couldn't sort by attributes and prefers. Fall
2378 # back to index order.
2421 # back to index order.
2379 return 0
2422 return 0
2380
2423
2381 def __lt__(self, other):
2424 def __lt__(self, other):
2382 return self._cmp(other) < 0
2425 return self._cmp(other) < 0
2383
2426
2384 def __gt__(self, other):
2427 def __gt__(self, other):
2385 return self._cmp(other) > 0
2428 return self._cmp(other) > 0
2386
2429
2387 def __eq__(self, other):
2430 def __eq__(self, other):
2388 return self._cmp(other) == 0
2431 return self._cmp(other) == 0
2389
2432
2390 def __le__(self, other):
2433 def __le__(self, other):
2391 return self._cmp(other) <= 0
2434 return self._cmp(other) <= 0
2392
2435
2393 def __ge__(self, other):
2436 def __ge__(self, other):
2394 return self._cmp(other) >= 0
2437 return self._cmp(other) >= 0
2395
2438
2396 def __ne__(self, other):
2439 def __ne__(self, other):
2397 return self._cmp(other) != 0
2440 return self._cmp(other) != 0
2398
2441
2399 def sortclonebundleentries(ui, entries):
2442 def sortclonebundleentries(ui, entries):
2400 prefers = ui.configlist('ui', 'clonebundleprefers')
2443 prefers = ui.configlist('ui', 'clonebundleprefers')
2401 if not prefers:
2444 if not prefers:
2402 return list(entries)
2445 return list(entries)
2403
2446
2404 prefers = [p.split('=', 1) for p in prefers]
2447 prefers = [p.split('=', 1) for p in prefers]
2405
2448
2406 items = sorted(clonebundleentry(v, prefers) for v in entries)
2449 items = sorted(clonebundleentry(v, prefers) for v in entries)
2407 return [i.value for i in items]
2450 return [i.value for i in items]
2408
2451
2409 def trypullbundlefromurl(ui, repo, url):
2452 def trypullbundlefromurl(ui, repo, url):
2410 """Attempt to apply a bundle from a URL."""
2453 """Attempt to apply a bundle from a URL."""
2411 with repo.lock(), repo.transaction('bundleurl') as tr:
2454 with repo.lock(), repo.transaction('bundleurl') as tr:
2412 try:
2455 try:
2413 fh = urlmod.open(ui, url)
2456 fh = urlmod.open(ui, url)
2414 cg = readbundle(ui, fh, 'stream')
2457 cg = readbundle(ui, fh, 'stream')
2415
2458
2416 if isinstance(cg, streamclone.streamcloneapplier):
2459 if isinstance(cg, streamclone.streamcloneapplier):
2417 cg.apply(repo)
2460 cg.apply(repo)
2418 else:
2461 else:
2419 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2462 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2420 return True
2463 return True
2421 except urlerr.httperror as e:
2464 except urlerr.httperror as e:
2422 ui.warn(_('HTTP error fetching bundle: %s\n') %
2465 ui.warn(_('HTTP error fetching bundle: %s\n') %
2423 stringutil.forcebytestr(e))
2466 stringutil.forcebytestr(e))
2424 except urlerr.urlerror as e:
2467 except urlerr.urlerror as e:
2425 ui.warn(_('error fetching bundle: %s\n') %
2468 ui.warn(_('error fetching bundle: %s\n') %
2426 stringutil.forcebytestr(e.reason))
2469 stringutil.forcebytestr(e.reason))
2427
2470
2428 return False
2471 return False
General Comments 0
You need to be logged in to leave comments. Login now