##// END OF EJS Templates
exchange: move disabling of rev-branch-cache bundle part out of narrow...
Gregory Szorc -
r38825:9b64b73d default
parent child Browse files
Show More
@@ -1,510 +1,497 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import struct
12 import struct
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15 from mercurial.node import (
15 from mercurial.node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from mercurial import (
20 from mercurial import (
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 dagutil,
23 dagutil,
24 error,
24 error,
25 exchange,
25 exchange,
26 extensions,
26 extensions,
27 narrowspec,
27 narrowspec,
28 repair,
28 repair,
29 util,
29 util,
30 wireprototypes,
30 wireprototypes,
31 )
31 )
32 from mercurial.utils import (
32 from mercurial.utils import (
33 stringutil,
33 stringutil,
34 )
34 )
35
35
36 NARROWCAP = 'narrow'
36 NARROWCAP = 'narrow'
37 _NARROWACL_SECTION = 'narrowhgacl'
37 _NARROWACL_SECTION = 'narrowhgacl'
38 _CHANGESPECPART = NARROWCAP + ':changespec'
38 _CHANGESPECPART = NARROWCAP + ':changespec'
39 _SPECPART = NARROWCAP + ':spec'
39 _SPECPART = NARROWCAP + ':spec'
40 _SPECPART_INCLUDE = 'include'
40 _SPECPART_INCLUDE = 'include'
41 _SPECPART_EXCLUDE = 'exclude'
41 _SPECPART_EXCLUDE = 'exclude'
42 _KILLNODESIGNAL = 'KILL'
42 _KILLNODESIGNAL = 'KILL'
43 _DONESIGNAL = 'DONE'
43 _DONESIGNAL = 'DONE'
44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
44 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
45 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
46 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
47 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
48
48
49 # When advertising capabilities, always include narrow clone support.
49 # When advertising capabilities, always include narrow clone support.
50 def getrepocaps_narrow(orig, repo, **kwargs):
50 def getrepocaps_narrow(orig, repo, **kwargs):
51 caps = orig(repo, **kwargs)
51 caps = orig(repo, **kwargs)
52 caps[NARROWCAP] = ['v0']
52 caps[NARROWCAP] = ['v0']
53 return caps
53 return caps
54
54
55 def _computeellipsis(repo, common, heads, known, match, depth=None):
55 def _computeellipsis(repo, common, heads, known, match, depth=None):
56 """Compute the shape of a narrowed DAG.
56 """Compute the shape of a narrowed DAG.
57
57
58 Args:
58 Args:
59 repo: The repository we're transferring.
59 repo: The repository we're transferring.
60 common: The roots of the DAG range we're transferring.
60 common: The roots of the DAG range we're transferring.
61 May be just [nullid], which means all ancestors of heads.
61 May be just [nullid], which means all ancestors of heads.
62 heads: The heads of the DAG range we're transferring.
62 heads: The heads of the DAG range we're transferring.
63 match: The narrowmatcher that allows us to identify relevant changes.
63 match: The narrowmatcher that allows us to identify relevant changes.
64 depth: If not None, only consider nodes to be full nodes if they are at
64 depth: If not None, only consider nodes to be full nodes if they are at
65 most depth changesets away from one of heads.
65 most depth changesets away from one of heads.
66
66
67 Returns:
67 Returns:
68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
68 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
69
69
70 visitnodes: The list of nodes (either full or ellipsis) which
70 visitnodes: The list of nodes (either full or ellipsis) which
71 need to be sent to the client.
71 need to be sent to the client.
72 relevant_nodes: The set of changelog nodes which change a file inside
72 relevant_nodes: The set of changelog nodes which change a file inside
73 the narrowspec. The client needs these as non-ellipsis nodes.
73 the narrowspec. The client needs these as non-ellipsis nodes.
74 ellipsisroots: A dict of {rev: parents} that is used in
74 ellipsisroots: A dict of {rev: parents} that is used in
75 narrowchangegroup to produce ellipsis nodes with the
75 narrowchangegroup to produce ellipsis nodes with the
76 correct parents.
76 correct parents.
77 """
77 """
78 cl = repo.changelog
78 cl = repo.changelog
79 mfl = repo.manifestlog
79 mfl = repo.manifestlog
80
80
81 cldag = dagutil.revlogdag(cl)
81 cldag = dagutil.revlogdag(cl)
82 # dagutil does not like nullid/nullrev
82 # dagutil does not like nullid/nullrev
83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
83 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
84 headsrevs = cldag.internalizeall(heads)
84 headsrevs = cldag.internalizeall(heads)
85 if depth:
85 if depth:
86 revdepth = {h: 0 for h in headsrevs}
86 revdepth = {h: 0 for h in headsrevs}
87
87
88 ellipsisheads = collections.defaultdict(set)
88 ellipsisheads = collections.defaultdict(set)
89 ellipsisroots = collections.defaultdict(set)
89 ellipsisroots = collections.defaultdict(set)
90
90
91 def addroot(head, curchange):
91 def addroot(head, curchange):
92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
92 """Add a root to an ellipsis head, splitting heads with 3 roots."""
93 ellipsisroots[head].add(curchange)
93 ellipsisroots[head].add(curchange)
94 # Recursively split ellipsis heads with 3 roots by finding the
94 # Recursively split ellipsis heads with 3 roots by finding the
95 # roots' youngest common descendant which is an elided merge commit.
95 # roots' youngest common descendant which is an elided merge commit.
96 # That descendant takes 2 of the 3 roots as its own, and becomes a
96 # That descendant takes 2 of the 3 roots as its own, and becomes a
97 # root of the head.
97 # root of the head.
98 while len(ellipsisroots[head]) > 2:
98 while len(ellipsisroots[head]) > 2:
99 child, roots = splithead(head)
99 child, roots = splithead(head)
100 splitroots(head, child, roots)
100 splitroots(head, child, roots)
101 head = child # Recurse in case we just added a 3rd root
101 head = child # Recurse in case we just added a 3rd root
102
102
103 def splitroots(head, child, roots):
103 def splitroots(head, child, roots):
104 ellipsisroots[head].difference_update(roots)
104 ellipsisroots[head].difference_update(roots)
105 ellipsisroots[head].add(child)
105 ellipsisroots[head].add(child)
106 ellipsisroots[child].update(roots)
106 ellipsisroots[child].update(roots)
107 ellipsisroots[child].discard(child)
107 ellipsisroots[child].discard(child)
108
108
109 def splithead(head):
109 def splithead(head):
110 r1, r2, r3 = sorted(ellipsisroots[head])
110 r1, r2, r3 = sorted(ellipsisroots[head])
111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
111 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
112 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
113 nr1, head, nr2, head)
113 nr1, head, nr2, head)
114 for j in mid:
114 for j in mid:
115 if j == nr2:
115 if j == nr2:
116 return nr2, (nr1, nr2)
116 return nr2, (nr1, nr2)
117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
117 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
118 return j, (nr1, nr2)
118 return j, (nr1, nr2)
119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
119 raise error.Abort('Failed to split up ellipsis node! head: %d, '
120 'roots: %d %d %d' % (head, r1, r2, r3))
120 'roots: %d %d %d' % (head, r1, r2, r3))
121
121
122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
122 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
123 visit = reversed(missing)
123 visit = reversed(missing)
124 relevant_nodes = set()
124 relevant_nodes = set()
125 visitnodes = [cl.node(m) for m in missing]
125 visitnodes = [cl.node(m) for m in missing]
126 required = set(headsrevs) | known
126 required = set(headsrevs) | known
127 for rev in visit:
127 for rev in visit:
128 clrev = cl.changelogrevision(rev)
128 clrev = cl.changelogrevision(rev)
129 ps = cldag.parents(rev)
129 ps = cldag.parents(rev)
130 if depth is not None:
130 if depth is not None:
131 curdepth = revdepth[rev]
131 curdepth = revdepth[rev]
132 for p in ps:
132 for p in ps:
133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
133 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
134 needed = False
134 needed = False
135 shallow_enough = depth is None or revdepth[rev] <= depth
135 shallow_enough = depth is None or revdepth[rev] <= depth
136 if shallow_enough:
136 if shallow_enough:
137 curmf = mfl[clrev.manifest].read()
137 curmf = mfl[clrev.manifest].read()
138 if ps:
138 if ps:
139 # We choose to not trust the changed files list in
139 # We choose to not trust the changed files list in
140 # changesets because it's not always correct. TODO: could
140 # changesets because it's not always correct. TODO: could
141 # we trust it for the non-merge case?
141 # we trust it for the non-merge case?
142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
142 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
143 needed = bool(curmf.diff(p1mf, match))
143 needed = bool(curmf.diff(p1mf, match))
144 if not needed and len(ps) > 1:
144 if not needed and len(ps) > 1:
145 # For merge changes, the list of changed files is not
145 # For merge changes, the list of changed files is not
146 # helpful, since we need to emit the merge if a file
146 # helpful, since we need to emit the merge if a file
147 # in the narrow spec has changed on either side of the
147 # in the narrow spec has changed on either side of the
148 # merge. As a result, we do a manifest diff to check.
148 # merge. As a result, we do a manifest diff to check.
149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
149 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
150 needed = bool(curmf.diff(p2mf, match))
150 needed = bool(curmf.diff(p2mf, match))
151 else:
151 else:
152 # For a root node, we need to include the node if any
152 # For a root node, we need to include the node if any
153 # files in the node match the narrowspec.
153 # files in the node match the narrowspec.
154 needed = any(curmf.walk(match))
154 needed = any(curmf.walk(match))
155
155
156 if needed:
156 if needed:
157 for head in ellipsisheads[rev]:
157 for head in ellipsisheads[rev]:
158 addroot(head, rev)
158 addroot(head, rev)
159 for p in ps:
159 for p in ps:
160 required.add(p)
160 required.add(p)
161 relevant_nodes.add(cl.node(rev))
161 relevant_nodes.add(cl.node(rev))
162 else:
162 else:
163 if not ps:
163 if not ps:
164 ps = [nullrev]
164 ps = [nullrev]
165 if rev in required:
165 if rev in required:
166 for head in ellipsisheads[rev]:
166 for head in ellipsisheads[rev]:
167 addroot(head, rev)
167 addroot(head, rev)
168 for p in ps:
168 for p in ps:
169 ellipsisheads[p].add(rev)
169 ellipsisheads[p].add(rev)
170 else:
170 else:
171 for p in ps:
171 for p in ps:
172 ellipsisheads[p] |= ellipsisheads[rev]
172 ellipsisheads[p] |= ellipsisheads[rev]
173
173
174 # add common changesets as roots of their reachable ellipsis heads
174 # add common changesets as roots of their reachable ellipsis heads
175 for c in commonrevs:
175 for c in commonrevs:
176 for head in ellipsisheads[c]:
176 for head in ellipsisheads[c]:
177 addroot(head, c)
177 addroot(head, c)
178 return visitnodes, relevant_nodes, ellipsisroots
178 return visitnodes, relevant_nodes, ellipsisroots
179
179
180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
180 def _packellipsischangegroup(repo, common, match, relevant_nodes,
181 ellipsisroots, visitnodes, depth, source, version):
181 ellipsisroots, visitnodes, depth, source, version):
182 if version in ('01', '02'):
182 if version in ('01', '02'):
183 raise error.Abort(
183 raise error.Abort(
184 'ellipsis nodes require at least cg3 on client and server, '
184 'ellipsis nodes require at least cg3 on client and server, '
185 'but negotiated version %s' % version)
185 'but negotiated version %s' % version)
186 # We wrap cg1packer.revchunk, using a side channel to pass
186 # We wrap cg1packer.revchunk, using a side channel to pass
187 # relevant_nodes into that area. Then if linknode isn't in the
187 # relevant_nodes into that area. Then if linknode isn't in the
188 # set, we know we have an ellipsis node and we should defer
188 # set, we know we have an ellipsis node and we should defer
189 # sending that node's data. We override close() to detect
189 # sending that node's data. We override close() to detect
190 # pending ellipsis nodes and flush them.
190 # pending ellipsis nodes and flush them.
191 packer = changegroup.getbundler(version, repo)
191 packer = changegroup.getbundler(version, repo)
192 # Let the packer have access to the narrow matcher so it can
192 # Let the packer have access to the narrow matcher so it can
193 # omit filelogs and dirlogs as needed
193 # omit filelogs and dirlogs as needed
194 packer._narrow_matcher = lambda : match
194 packer._narrow_matcher = lambda : match
195 # Give the packer the list of nodes which should not be
195 # Give the packer the list of nodes which should not be
196 # ellipsis nodes. We store this rather than the set of nodes
196 # ellipsis nodes. We store this rather than the set of nodes
197 # that should be an ellipsis because for very large histories
197 # that should be an ellipsis because for very large histories
198 # we expect this to be significantly smaller.
198 # we expect this to be significantly smaller.
199 packer.full_nodes = relevant_nodes
199 packer.full_nodes = relevant_nodes
200 # Maps ellipsis revs to their roots at the changelog level.
200 # Maps ellipsis revs to their roots at the changelog level.
201 packer.precomputed_ellipsis = ellipsisroots
201 packer.precomputed_ellipsis = ellipsisroots
202 # Maps CL revs to per-revlog revisions. Cleared in close() at
202 # Maps CL revs to per-revlog revisions. Cleared in close() at
203 # the end of each group.
203 # the end of each group.
204 packer.clrev_to_localrev = {}
204 packer.clrev_to_localrev = {}
205 packer.next_clrev_to_localrev = {}
205 packer.next_clrev_to_localrev = {}
206 # Maps changelog nodes to changelog revs. Filled in once
206 # Maps changelog nodes to changelog revs. Filled in once
207 # during changelog stage and then left unmodified.
207 # during changelog stage and then left unmodified.
208 packer.clnode_to_rev = {}
208 packer.clnode_to_rev = {}
209 packer.changelog_done = False
209 packer.changelog_done = False
210 # If true, informs the packer that it is serving shallow content and might
210 # If true, informs the packer that it is serving shallow content and might
211 # need to pack file contents not introduced by the changes being packed.
211 # need to pack file contents not introduced by the changes being packed.
212 packer.is_shallow = depth is not None
212 packer.is_shallow = depth is not None
213
213
214 return packer.generate(common, visitnodes, False, source)
214 return packer.generate(common, visitnodes, False, source)
215
215
216 # Serve a changegroup for a client with a narrow clone.
216 # Serve a changegroup for a client with a narrow clone.
217 def getbundlechangegrouppart_narrow(bundler, repo, source,
217 def getbundlechangegrouppart_narrow(bundler, repo, source,
218 bundlecaps=None, b2caps=None, heads=None,
218 bundlecaps=None, b2caps=None, heads=None,
219 common=None, **kwargs):
219 common=None, **kwargs):
220 cgversions = b2caps.get('changegroup')
220 cgversions = b2caps.get('changegroup')
221 if cgversions: # 3.1 and 3.2 ship with an empty value
221 if cgversions: # 3.1 and 3.2 ship with an empty value
222 cgversions = [v for v in cgversions
222 cgversions = [v for v in cgversions
223 if v in changegroup.supportedoutgoingversions(repo)]
223 if v in changegroup.supportedoutgoingversions(repo)]
224 if not cgversions:
224 if not cgversions:
225 raise ValueError(_('no common changegroup version'))
225 raise ValueError(_('no common changegroup version'))
226 version = max(cgversions)
226 version = max(cgversions)
227 else:
227 else:
228 raise ValueError(_("server does not advertise changegroup version,"
228 raise ValueError(_("server does not advertise changegroup version,"
229 " can't negotiate support for ellipsis nodes"))
229 " can't negotiate support for ellipsis nodes"))
230
230
231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
231 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
232 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
233 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
234 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
235 outgoing = exchange._computeoutgoing(repo, heads, common)
235 outgoing = exchange._computeoutgoing(repo, heads, common)
236 if not outgoing.missing:
236 if not outgoing.missing:
237 return
237 return
238 def wrappedgetbundler(orig, *args, **kwargs):
238 def wrappedgetbundler(orig, *args, **kwargs):
239 bundler = orig(*args, **kwargs)
239 bundler = orig(*args, **kwargs)
240 bundler._narrow_matcher = lambda : newmatch
240 bundler._narrow_matcher = lambda : newmatch
241 return bundler
241 return bundler
242 with extensions.wrappedfunction(changegroup, 'getbundler',
242 with extensions.wrappedfunction(changegroup, 'getbundler',
243 wrappedgetbundler):
243 wrappedgetbundler):
244 cg = changegroup.makestream(repo, outgoing, version, source)
244 cg = changegroup.makestream(repo, outgoing, version, source)
245 part = bundler.newpart('changegroup', data=cg)
245 part = bundler.newpart('changegroup', data=cg)
246 part.addparam('version', version)
246 part.addparam('version', version)
247 if 'treemanifest' in repo.requirements:
247 if 'treemanifest' in repo.requirements:
248 part.addparam('treemanifest', '1')
248 part.addparam('treemanifest', '1')
249
249
250 if include or exclude:
250 if include or exclude:
251 narrowspecpart = bundler.newpart(_SPECPART)
251 narrowspecpart = bundler.newpart(_SPECPART)
252 if include:
252 if include:
253 narrowspecpart.addparam(
253 narrowspecpart.addparam(
254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
254 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
255 if exclude:
255 if exclude:
256 narrowspecpart.addparam(
256 narrowspecpart.addparam(
257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
257 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
258
258
259 return
259 return
260
260
261 depth = kwargs.get(r'depth', None)
261 depth = kwargs.get(r'depth', None)
262 if depth is not None:
262 if depth is not None:
263 depth = int(depth)
263 depth = int(depth)
264 if depth < 1:
264 if depth < 1:
265 raise error.Abort(_('depth must be positive, got %d') % depth)
265 raise error.Abort(_('depth must be positive, got %d') % depth)
266
266
267 heads = set(heads or repo.heads())
267 heads = set(heads or repo.heads())
268 common = set(common or [nullid])
268 common = set(common or [nullid])
269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
269 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
270 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
271 known = {bin(n) for n in kwargs.get(r'known', [])}
271 known = {bin(n) for n in kwargs.get(r'known', [])}
272 if known and (oldinclude != include or oldexclude != exclude):
272 if known and (oldinclude != include or oldexclude != exclude):
273 # Steps:
273 # Steps:
274 # 1. Send kill for "$known & ::common"
274 # 1. Send kill for "$known & ::common"
275 #
275 #
276 # 2. Send changegroup for ::common
276 # 2. Send changegroup for ::common
277 #
277 #
278 # 3. Proceed.
278 # 3. Proceed.
279 #
279 #
280 # In the future, we can send kills for only the specific
280 # In the future, we can send kills for only the specific
281 # nodes we know should go away or change shape, and then
281 # nodes we know should go away or change shape, and then
282 # send a data stream that tells the client something like this:
282 # send a data stream that tells the client something like this:
283 #
283 #
284 # a) apply this changegroup
284 # a) apply this changegroup
285 # b) apply nodes XXX, YYY, ZZZ that you already have
285 # b) apply nodes XXX, YYY, ZZZ that you already have
286 # c) goto a
286 # c) goto a
287 #
287 #
288 # until they've built up the full new state.
288 # until they've built up the full new state.
289 # Convert to revnums and intersect with "common". The client should
289 # Convert to revnums and intersect with "common". The client should
290 # have made it a subset of "common" already, but let's be safe.
290 # have made it a subset of "common" already, but let's be safe.
291 known = set(repo.revs("%ln & ::%ln", known, common))
291 known = set(repo.revs("%ln & ::%ln", known, common))
292 # TODO: we could send only roots() of this set, and the
292 # TODO: we could send only roots() of this set, and the
293 # list of nodes in common, and the client could work out
293 # list of nodes in common, and the client could work out
294 # what to strip, instead of us explicitly sending every
294 # what to strip, instead of us explicitly sending every
295 # single node.
295 # single node.
296 deadrevs = known
296 deadrevs = known
297 def genkills():
297 def genkills():
298 for r in deadrevs:
298 for r in deadrevs:
299 yield _KILLNODESIGNAL
299 yield _KILLNODESIGNAL
300 yield repo.changelog.node(r)
300 yield repo.changelog.node(r)
301 yield _DONESIGNAL
301 yield _DONESIGNAL
302 bundler.newpart(_CHANGESPECPART, data=genkills())
302 bundler.newpart(_CHANGESPECPART, data=genkills())
303 newvisit, newfull, newellipsis = _computeellipsis(
303 newvisit, newfull, newellipsis = _computeellipsis(
304 repo, set(), common, known, newmatch)
304 repo, set(), common, known, newmatch)
305 if newvisit:
305 if newvisit:
306 cg = _packellipsischangegroup(
306 cg = _packellipsischangegroup(
307 repo, common, newmatch, newfull, newellipsis,
307 repo, common, newmatch, newfull, newellipsis,
308 newvisit, depth, source, version)
308 newvisit, depth, source, version)
309 part = bundler.newpart('changegroup', data=cg)
309 part = bundler.newpart('changegroup', data=cg)
310 part.addparam('version', version)
310 part.addparam('version', version)
311 if 'treemanifest' in repo.requirements:
311 if 'treemanifest' in repo.requirements:
312 part.addparam('treemanifest', '1')
312 part.addparam('treemanifest', '1')
313
313
314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
314 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
315 repo, common, heads, set(), newmatch, depth=depth)
315 repo, common, heads, set(), newmatch, depth=depth)
316
316
317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
317 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
318 if visitnodes:
318 if visitnodes:
319 cg = _packellipsischangegroup(
319 cg = _packellipsischangegroup(
320 repo, common, newmatch, relevant_nodes, ellipsisroots,
320 repo, common, newmatch, relevant_nodes, ellipsisroots,
321 visitnodes, depth, source, version)
321 visitnodes, depth, source, version)
322 part = bundler.newpart('changegroup', data=cg)
322 part = bundler.newpart('changegroup', data=cg)
323 part.addparam('version', version)
323 part.addparam('version', version)
324 if 'treemanifest' in repo.requirements:
324 if 'treemanifest' in repo.requirements:
325 part.addparam('treemanifest', '1')
325 part.addparam('treemanifest', '1')
326
326
327 def applyacl_narrow(repo, kwargs):
327 def applyacl_narrow(repo, kwargs):
328 ui = repo.ui
328 ui = repo.ui
329 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
329 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
330 user_includes = ui.configlist(
330 user_includes = ui.configlist(
331 _NARROWACL_SECTION, username + '.includes',
331 _NARROWACL_SECTION, username + '.includes',
332 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
332 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
333 user_excludes = ui.configlist(
333 user_excludes = ui.configlist(
334 _NARROWACL_SECTION, username + '.excludes',
334 _NARROWACL_SECTION, username + '.excludes',
335 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
335 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
336 if not user_includes:
336 if not user_includes:
337 raise error.Abort(_("{} configuration for user {} is empty")
337 raise error.Abort(_("{} configuration for user {} is empty")
338 .format(_NARROWACL_SECTION, username))
338 .format(_NARROWACL_SECTION, username))
339
339
340 user_includes = [
340 user_includes = [
341 'path:.' if p == '*' else 'path:' + p for p in user_includes]
341 'path:.' if p == '*' else 'path:' + p for p in user_includes]
342 user_excludes = [
342 user_excludes = [
343 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
343 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
344
344
345 req_includes = set(kwargs.get(r'includepats', []))
345 req_includes = set(kwargs.get(r'includepats', []))
346 req_excludes = set(kwargs.get(r'excludepats', []))
346 req_excludes = set(kwargs.get(r'excludepats', []))
347
347
348 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
348 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
349 req_includes, req_excludes, user_includes, user_excludes)
349 req_includes, req_excludes, user_includes, user_excludes)
350
350
351 if invalid_includes:
351 if invalid_includes:
352 raise error.Abort(
352 raise error.Abort(
353 _("The following includes are not accessible for {}: {}")
353 _("The following includes are not accessible for {}: {}")
354 .format(username, invalid_includes))
354 .format(username, invalid_includes))
355
355
356 new_args = {}
356 new_args = {}
357 new_args.update(kwargs)
357 new_args.update(kwargs)
358 new_args['includepats'] = req_includes
358 new_args['includepats'] = req_includes
359 if req_excludes:
359 if req_excludes:
360 new_args['excludepats'] = req_excludes
360 new_args['excludepats'] = req_excludes
361 return new_args
361 return new_args
362
362
363 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
363 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
364 def _handlechangespec_2(op, inpart):
364 def _handlechangespec_2(op, inpart):
365 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
365 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
366 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
366 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
367 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
367 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
368 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
368 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
369 op.repo._writerequirements()
369 op.repo._writerequirements()
370 op.repo.setnarrowpats(includepats, excludepats)
370 op.repo.setnarrowpats(includepats, excludepats)
371
371
372 @bundle2.parthandler(_CHANGESPECPART)
372 @bundle2.parthandler(_CHANGESPECPART)
373 def _handlechangespec(op, inpart):
373 def _handlechangespec(op, inpart):
374 repo = op.repo
374 repo = op.repo
375 cl = repo.changelog
375 cl = repo.changelog
376
376
377 # changesets which need to be stripped entirely. either they're no longer
377 # changesets which need to be stripped entirely. either they're no longer
378 # needed in the new narrow spec, or the server is sending a replacement
378 # needed in the new narrow spec, or the server is sending a replacement
379 # in the changegroup part.
379 # in the changegroup part.
380 clkills = set()
380 clkills = set()
381
381
382 # A changespec part contains all the updates to ellipsis nodes
382 # A changespec part contains all the updates to ellipsis nodes
383 # that will happen as a result of widening or narrowing a
383 # that will happen as a result of widening or narrowing a
384 # repo. All the changes that this block encounters are ellipsis
384 # repo. All the changes that this block encounters are ellipsis
385 # nodes or flags to kill an existing ellipsis.
385 # nodes or flags to kill an existing ellipsis.
386 chunksignal = changegroup.readexactly(inpart, 4)
386 chunksignal = changegroup.readexactly(inpart, 4)
387 while chunksignal != _DONESIGNAL:
387 while chunksignal != _DONESIGNAL:
388 if chunksignal == _KILLNODESIGNAL:
388 if chunksignal == _KILLNODESIGNAL:
389 # a node used to be an ellipsis but isn't anymore
389 # a node used to be an ellipsis but isn't anymore
390 ck = changegroup.readexactly(inpart, 20)
390 ck = changegroup.readexactly(inpart, 20)
391 if cl.hasnode(ck):
391 if cl.hasnode(ck):
392 clkills.add(ck)
392 clkills.add(ck)
393 else:
393 else:
394 raise error.Abort(
394 raise error.Abort(
395 _('unexpected changespec node chunk type: %s') % chunksignal)
395 _('unexpected changespec node chunk type: %s') % chunksignal)
396 chunksignal = changegroup.readexactly(inpart, 4)
396 chunksignal = changegroup.readexactly(inpart, 4)
397
397
398 if clkills:
398 if clkills:
399 # preserve bookmarks that repair.strip() would otherwise strip
399 # preserve bookmarks that repair.strip() would otherwise strip
400 bmstore = repo._bookmarks
400 bmstore = repo._bookmarks
401 class dummybmstore(dict):
401 class dummybmstore(dict):
402 def applychanges(self, repo, tr, changes):
402 def applychanges(self, repo, tr, changes):
403 pass
403 pass
404 def recordchange(self, tr): # legacy version
404 def recordchange(self, tr): # legacy version
405 pass
405 pass
406 repo._bookmarks = dummybmstore()
406 repo._bookmarks = dummybmstore()
407 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
407 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
408 topic='widen')
408 topic='widen')
409 repo._bookmarks = bmstore
409 repo._bookmarks = bmstore
410 if chgrpfile:
410 if chgrpfile:
411 op._widen_uninterr = repo.ui.uninterruptable()
411 op._widen_uninterr = repo.ui.uninterruptable()
412 op._widen_uninterr.__enter__()
412 op._widen_uninterr.__enter__()
413 # presence of _widen_bundle attribute activates widen handler later
413 # presence of _widen_bundle attribute activates widen handler later
414 op._widen_bundle = chgrpfile
414 op._widen_bundle = chgrpfile
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
415 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
416 # will currently always be there when using the core+narrowhg server, but
416 # will currently always be there when using the core+narrowhg server, but
417 # other servers may include a changespec part even when not widening (e.g.
417 # other servers may include a changespec part even when not widening (e.g.
418 # because we're deepening a shallow repo).
418 # because we're deepening a shallow repo).
419 if util.safehasattr(repo, 'setnewnarrowpats'):
419 if util.safehasattr(repo, 'setnewnarrowpats'):
420 repo.setnewnarrowpats()
420 repo.setnewnarrowpats()
421
421
422 def handlechangegroup_widen(op, inpart):
422 def handlechangegroup_widen(op, inpart):
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
423 """Changegroup exchange handler which restores temporarily-stripped nodes"""
424 # We saved a bundle with stripped node data we must now restore.
424 # We saved a bundle with stripped node data we must now restore.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
425 # This approach is based on mercurial/repair.py@6ee26a53c111.
426 repo = op.repo
426 repo = op.repo
427 ui = op.ui
427 ui = op.ui
428
428
429 chgrpfile = op._widen_bundle
429 chgrpfile = op._widen_bundle
430 del op._widen_bundle
430 del op._widen_bundle
431 vfs = repo.vfs
431 vfs = repo.vfs
432
432
433 ui.note(_("adding branch\n"))
433 ui.note(_("adding branch\n"))
434 f = vfs.open(chgrpfile, "rb")
434 f = vfs.open(chgrpfile, "rb")
435 try:
435 try:
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
436 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
437 if not ui.verbose:
437 if not ui.verbose:
438 # silence internal shuffling chatter
438 # silence internal shuffling chatter
439 ui.pushbuffer()
439 ui.pushbuffer()
440 if isinstance(gen, bundle2.unbundle20):
440 if isinstance(gen, bundle2.unbundle20):
441 with repo.transaction('strip') as tr:
441 with repo.transaction('strip') as tr:
442 bundle2.processbundle(repo, gen, lambda: tr)
442 bundle2.processbundle(repo, gen, lambda: tr)
443 else:
443 else:
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
444 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
445 if not ui.verbose:
445 if not ui.verbose:
446 ui.popbuffer()
446 ui.popbuffer()
447 finally:
447 finally:
448 f.close()
448 f.close()
449
449
450 # remove undo files
450 # remove undo files
451 for undovfs, undofile in repo.undofiles():
451 for undovfs, undofile in repo.undofiles():
452 try:
452 try:
453 undovfs.unlink(undofile)
453 undovfs.unlink(undofile)
454 except OSError as e:
454 except OSError as e:
455 if e.errno != errno.ENOENT:
455 if e.errno != errno.ENOENT:
456 ui.warn(_('error removing %s: %s\n') %
456 ui.warn(_('error removing %s: %s\n') %
457 (undovfs.join(undofile), stringutil.forcebytestr(e)))
457 (undovfs.join(undofile), stringutil.forcebytestr(e)))
458
458
459 # Remove partial backup only if there were no exceptions
459 # Remove partial backup only if there were no exceptions
460 op._widen_uninterr.__exit__(None, None, None)
460 op._widen_uninterr.__exit__(None, None, None)
461 vfs.unlink(chgrpfile)
461 vfs.unlink(chgrpfile)
462
462
463 def setup():
463 def setup():
464 """Enable narrow repo support in bundle2-related extension points."""
464 """Enable narrow repo support in bundle2-related extension points."""
465 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
465 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
466
466
467 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
467 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
468
468
469 getbundleargs['narrow'] = 'boolean'
469 getbundleargs['narrow'] = 'boolean'
470 getbundleargs['depth'] = 'plain'
470 getbundleargs['depth'] = 'plain'
471 getbundleargs['oldincludepats'] = 'csv'
471 getbundleargs['oldincludepats'] = 'csv'
472 getbundleargs['oldexcludepats'] = 'csv'
472 getbundleargs['oldexcludepats'] = 'csv'
473 getbundleargs['includepats'] = 'csv'
473 getbundleargs['includepats'] = 'csv'
474 getbundleargs['excludepats'] = 'csv'
474 getbundleargs['excludepats'] = 'csv'
475 getbundleargs['known'] = 'csv'
475 getbundleargs['known'] = 'csv'
476
476
477 # Extend changegroup serving to handle requests from narrow clients.
477 # Extend changegroup serving to handle requests from narrow clients.
478 origcgfn = exchange.getbundle2partsmapping['changegroup']
478 origcgfn = exchange.getbundle2partsmapping['changegroup']
479 def wrappedcgfn(*args, **kwargs):
479 def wrappedcgfn(*args, **kwargs):
480 repo = args[1]
480 repo = args[1]
481 if repo.ui.has_section(_NARROWACL_SECTION):
481 if repo.ui.has_section(_NARROWACL_SECTION):
482 getbundlechangegrouppart_narrow(
482 getbundlechangegrouppart_narrow(
483 *args, **applyacl_narrow(repo, kwargs))
483 *args, **applyacl_narrow(repo, kwargs))
484 elif kwargs.get(r'narrow', False):
484 elif kwargs.get(r'narrow', False):
485 getbundlechangegrouppart_narrow(*args, **kwargs)
485 getbundlechangegrouppart_narrow(*args, **kwargs)
486 else:
486 else:
487 origcgfn(*args, **kwargs)
487 origcgfn(*args, **kwargs)
488 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
488 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
489
489
490 # disable rev branch cache exchange when serving a narrow bundle
491 # (currently incompatible with that part)
492 origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache']
493 def wrappedcgfn(*args, **kwargs):
494 repo = args[1]
495 if repo.ui.has_section(_NARROWACL_SECTION):
496 return
497 elif kwargs.get(r'narrow', False):
498 return
499 else:
500 origrbcfn(*args, **kwargs)
501 exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn
502
503 # Extend changegroup receiver so client can fixup after widen requests.
490 # Extend changegroup receiver so client can fixup after widen requests.
504 origcghandler = bundle2.parthandlermapping['changegroup']
491 origcghandler = bundle2.parthandlermapping['changegroup']
505 def wrappedcghandler(op, inpart):
492 def wrappedcghandler(op, inpart):
506 origcghandler(op, inpart)
493 origcghandler(op, inpart)
507 if util.safehasattr(op, '_widen_bundle'):
494 if util.safehasattr(op, '_widen_bundle'):
508 handlechangegroup_widen(op, inpart)
495 handlechangegroup_widen(op, inpart)
509 wrappedcghandler.params = origcghandler.params
496 wrappedcghandler.params = origcghandler.params
510 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
497 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2421 +1,2428 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from .thirdparty import (
19 from .thirdparty import (
20 attr,
20 attr,
21 )
21 )
22 from . import (
22 from . import (
23 bookmarks as bookmod,
23 bookmarks as bookmod,
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 discovery,
26 discovery,
27 error,
27 error,
28 lock as lockmod,
28 lock as lockmod,
29 logexchange,
29 logexchange,
30 obsolete,
30 obsolete,
31 phases,
31 phases,
32 pushkey,
32 pushkey,
33 pycompat,
33 pycompat,
34 scmutil,
34 scmutil,
35 sslutil,
35 sslutil,
36 streamclone,
36 streamclone,
37 url as urlmod,
37 url as urlmod,
38 util,
38 util,
39 )
39 )
40 from .utils import (
40 from .utils import (
41 stringutil,
41 stringutil,
42 )
42 )
43
43
44 urlerr = util.urlerr
44 urlerr = util.urlerr
45 urlreq = util.urlreq
45 urlreq = util.urlreq
46
46
47 _NARROWACL_SECTION = 'narrowhgacl'
48
47 # Maps bundle version human names to changegroup versions.
49 # Maps bundle version human names to changegroup versions.
48 _bundlespeccgversions = {'v1': '01',
50 _bundlespeccgversions = {'v1': '01',
49 'v2': '02',
51 'v2': '02',
50 'packed1': 's1',
52 'packed1': 's1',
51 'bundle2': '02', #legacy
53 'bundle2': '02', #legacy
52 }
54 }
53
55
54 # Maps bundle version with content opts to choose which part to bundle
56 # Maps bundle version with content opts to choose which part to bundle
55 _bundlespeccontentopts = {
57 _bundlespeccontentopts = {
56 'v1': {
58 'v1': {
57 'changegroup': True,
59 'changegroup': True,
58 'cg.version': '01',
60 'cg.version': '01',
59 'obsolescence': False,
61 'obsolescence': False,
60 'phases': False,
62 'phases': False,
61 'tagsfnodescache': False,
63 'tagsfnodescache': False,
62 'revbranchcache': False
64 'revbranchcache': False
63 },
65 },
64 'v2': {
66 'v2': {
65 'changegroup': True,
67 'changegroup': True,
66 'cg.version': '02',
68 'cg.version': '02',
67 'obsolescence': False,
69 'obsolescence': False,
68 'phases': False,
70 'phases': False,
69 'tagsfnodescache': True,
71 'tagsfnodescache': True,
70 'revbranchcache': True
72 'revbranchcache': True
71 },
73 },
72 'packed1' : {
74 'packed1' : {
73 'cg.version': 's1'
75 'cg.version': 's1'
74 }
76 }
75 }
77 }
76 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
77
79
78 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
79 "tagsfnodescache": False,
81 "tagsfnodescache": False,
80 "revbranchcache": False}}
82 "revbranchcache": False}}
81
83
82 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
83 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
84
86
85 @attr.s
87 @attr.s
86 class bundlespec(object):
88 class bundlespec(object):
87 compression = attr.ib()
89 compression = attr.ib()
88 wirecompression = attr.ib()
90 wirecompression = attr.ib()
89 version = attr.ib()
91 version = attr.ib()
90 wireversion = attr.ib()
92 wireversion = attr.ib()
91 params = attr.ib()
93 params = attr.ib()
92 contentopts = attr.ib()
94 contentopts = attr.ib()
93
95
94 def parsebundlespec(repo, spec, strict=True):
96 def parsebundlespec(repo, spec, strict=True):
95 """Parse a bundle string specification into parts.
97 """Parse a bundle string specification into parts.
96
98
97 Bundle specifications denote a well-defined bundle/exchange format.
99 Bundle specifications denote a well-defined bundle/exchange format.
98 The content of a given specification should not change over time in
100 The content of a given specification should not change over time in
99 order to ensure that bundles produced by a newer version of Mercurial are
101 order to ensure that bundles produced by a newer version of Mercurial are
100 readable from an older version.
102 readable from an older version.
101
103
102 The string currently has the form:
104 The string currently has the form:
103
105
104 <compression>-<type>[;<parameter0>[;<parameter1>]]
106 <compression>-<type>[;<parameter0>[;<parameter1>]]
105
107
106 Where <compression> is one of the supported compression formats
108 Where <compression> is one of the supported compression formats
107 and <type> is (currently) a version string. A ";" can follow the type and
109 and <type> is (currently) a version string. A ";" can follow the type and
108 all text afterwards is interpreted as URI encoded, ";" delimited key=value
110 all text afterwards is interpreted as URI encoded, ";" delimited key=value
109 pairs.
111 pairs.
110
112
111 If ``strict`` is True (the default) <compression> is required. Otherwise,
113 If ``strict`` is True (the default) <compression> is required. Otherwise,
112 it is optional.
114 it is optional.
113
115
114 Returns a bundlespec object of (compression, version, parameters).
116 Returns a bundlespec object of (compression, version, parameters).
115 Compression will be ``None`` if not in strict mode and a compression isn't
117 Compression will be ``None`` if not in strict mode and a compression isn't
116 defined.
118 defined.
117
119
118 An ``InvalidBundleSpecification`` is raised when the specification is
120 An ``InvalidBundleSpecification`` is raised when the specification is
119 not syntactically well formed.
121 not syntactically well formed.
120
122
121 An ``UnsupportedBundleSpecification`` is raised when the compression or
123 An ``UnsupportedBundleSpecification`` is raised when the compression or
122 bundle type/version is not recognized.
124 bundle type/version is not recognized.
123
125
124 Note: this function will likely eventually return a more complex data
126 Note: this function will likely eventually return a more complex data
125 structure, including bundle2 part information.
127 structure, including bundle2 part information.
126 """
128 """
127 def parseparams(s):
129 def parseparams(s):
128 if ';' not in s:
130 if ';' not in s:
129 return s, {}
131 return s, {}
130
132
131 params = {}
133 params = {}
132 version, paramstr = s.split(';', 1)
134 version, paramstr = s.split(';', 1)
133
135
134 for p in paramstr.split(';'):
136 for p in paramstr.split(';'):
135 if '=' not in p:
137 if '=' not in p:
136 raise error.InvalidBundleSpecification(
138 raise error.InvalidBundleSpecification(
137 _('invalid bundle specification: '
139 _('invalid bundle specification: '
138 'missing "=" in parameter: %s') % p)
140 'missing "=" in parameter: %s') % p)
139
141
140 key, value = p.split('=', 1)
142 key, value = p.split('=', 1)
141 key = urlreq.unquote(key)
143 key = urlreq.unquote(key)
142 value = urlreq.unquote(value)
144 value = urlreq.unquote(value)
143 params[key] = value
145 params[key] = value
144
146
145 return version, params
147 return version, params
146
148
147
149
148 if strict and '-' not in spec:
150 if strict and '-' not in spec:
149 raise error.InvalidBundleSpecification(
151 raise error.InvalidBundleSpecification(
150 _('invalid bundle specification; '
152 _('invalid bundle specification; '
151 'must be prefixed with compression: %s') % spec)
153 'must be prefixed with compression: %s') % spec)
152
154
153 if '-' in spec:
155 if '-' in spec:
154 compression, version = spec.split('-', 1)
156 compression, version = spec.split('-', 1)
155
157
156 if compression not in util.compengines.supportedbundlenames:
158 if compression not in util.compengines.supportedbundlenames:
157 raise error.UnsupportedBundleSpecification(
159 raise error.UnsupportedBundleSpecification(
158 _('%s compression is not supported') % compression)
160 _('%s compression is not supported') % compression)
159
161
160 version, params = parseparams(version)
162 version, params = parseparams(version)
161
163
162 if version not in _bundlespeccgversions:
164 if version not in _bundlespeccgversions:
163 raise error.UnsupportedBundleSpecification(
165 raise error.UnsupportedBundleSpecification(
164 _('%s is not a recognized bundle version') % version)
166 _('%s is not a recognized bundle version') % version)
165 else:
167 else:
166 # Value could be just the compression or just the version, in which
168 # Value could be just the compression or just the version, in which
167 # case some defaults are assumed (but only when not in strict mode).
169 # case some defaults are assumed (but only when not in strict mode).
168 assert not strict
170 assert not strict
169
171
170 spec, params = parseparams(spec)
172 spec, params = parseparams(spec)
171
173
172 if spec in util.compengines.supportedbundlenames:
174 if spec in util.compengines.supportedbundlenames:
173 compression = spec
175 compression = spec
174 version = 'v1'
176 version = 'v1'
175 # Generaldelta repos require v2.
177 # Generaldelta repos require v2.
176 if 'generaldelta' in repo.requirements:
178 if 'generaldelta' in repo.requirements:
177 version = 'v2'
179 version = 'v2'
178 # Modern compression engines require v2.
180 # Modern compression engines require v2.
179 if compression not in _bundlespecv1compengines:
181 if compression not in _bundlespecv1compengines:
180 version = 'v2'
182 version = 'v2'
181 elif spec in _bundlespeccgversions:
183 elif spec in _bundlespeccgversions:
182 if spec == 'packed1':
184 if spec == 'packed1':
183 compression = 'none'
185 compression = 'none'
184 else:
186 else:
185 compression = 'bzip2'
187 compression = 'bzip2'
186 version = spec
188 version = spec
187 else:
189 else:
188 raise error.UnsupportedBundleSpecification(
190 raise error.UnsupportedBundleSpecification(
189 _('%s is not a recognized bundle specification') % spec)
191 _('%s is not a recognized bundle specification') % spec)
190
192
191 # Bundle version 1 only supports a known set of compression engines.
193 # Bundle version 1 only supports a known set of compression engines.
192 if version == 'v1' and compression not in _bundlespecv1compengines:
194 if version == 'v1' and compression not in _bundlespecv1compengines:
193 raise error.UnsupportedBundleSpecification(
195 raise error.UnsupportedBundleSpecification(
194 _('compression engine %s is not supported on v1 bundles') %
196 _('compression engine %s is not supported on v1 bundles') %
195 compression)
197 compression)
196
198
197 # The specification for packed1 can optionally declare the data formats
199 # The specification for packed1 can optionally declare the data formats
198 # required to apply it. If we see this metadata, compare against what the
200 # required to apply it. If we see this metadata, compare against what the
199 # repo supports and error if the bundle isn't compatible.
201 # repo supports and error if the bundle isn't compatible.
200 if version == 'packed1' and 'requirements' in params:
202 if version == 'packed1' and 'requirements' in params:
201 requirements = set(params['requirements'].split(','))
203 requirements = set(params['requirements'].split(','))
202 missingreqs = requirements - repo.supportedformats
204 missingreqs = requirements - repo.supportedformats
203 if missingreqs:
205 if missingreqs:
204 raise error.UnsupportedBundleSpecification(
206 raise error.UnsupportedBundleSpecification(
205 _('missing support for repository features: %s') %
207 _('missing support for repository features: %s') %
206 ', '.join(sorted(missingreqs)))
208 ', '.join(sorted(missingreqs)))
207
209
208 # Compute contentopts based on the version
210 # Compute contentopts based on the version
209 contentopts = _bundlespeccontentopts.get(version, {}).copy()
211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
210
212
211 # Process the variants
213 # Process the variants
212 if "stream" in params and params["stream"] == "v2":
214 if "stream" in params and params["stream"] == "v2":
213 variant = _bundlespecvariants["streamv2"]
215 variant = _bundlespecvariants["streamv2"]
214 contentopts.update(variant)
216 contentopts.update(variant)
215
217
216 engine = util.compengines.forbundlename(compression)
218 engine = util.compengines.forbundlename(compression)
217 compression, wirecompression = engine.bundletype()
219 compression, wirecompression = engine.bundletype()
218 wireversion = _bundlespeccgversions[version]
220 wireversion = _bundlespeccgversions[version]
219
221
220 return bundlespec(compression, wirecompression, version, wireversion,
222 return bundlespec(compression, wirecompression, version, wireversion,
221 params, contentopts)
223 params, contentopts)
222
224
223 def readbundle(ui, fh, fname, vfs=None):
225 def readbundle(ui, fh, fname, vfs=None):
224 header = changegroup.readexactly(fh, 4)
226 header = changegroup.readexactly(fh, 4)
225
227
226 alg = None
228 alg = None
227 if not fname:
229 if not fname:
228 fname = "stream"
230 fname = "stream"
229 if not header.startswith('HG') and header.startswith('\0'):
231 if not header.startswith('HG') and header.startswith('\0'):
230 fh = changegroup.headerlessfixup(fh, header)
232 fh = changegroup.headerlessfixup(fh, header)
231 header = "HG10"
233 header = "HG10"
232 alg = 'UN'
234 alg = 'UN'
233 elif vfs:
235 elif vfs:
234 fname = vfs.join(fname)
236 fname = vfs.join(fname)
235
237
236 magic, version = header[0:2], header[2:4]
238 magic, version = header[0:2], header[2:4]
237
239
238 if magic != 'HG':
240 if magic != 'HG':
239 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
240 if version == '10':
242 if version == '10':
241 if alg is None:
243 if alg is None:
242 alg = changegroup.readexactly(fh, 2)
244 alg = changegroup.readexactly(fh, 2)
243 return changegroup.cg1unpacker(fh, alg)
245 return changegroup.cg1unpacker(fh, alg)
244 elif version.startswith('2'):
246 elif version.startswith('2'):
245 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
246 elif version == 'S1':
248 elif version == 'S1':
247 return streamclone.streamcloneapplier(fh)
249 return streamclone.streamcloneapplier(fh)
248 else:
250 else:
249 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
250
252
251 def getbundlespec(ui, fh):
253 def getbundlespec(ui, fh):
252 """Infer the bundlespec from a bundle file handle.
254 """Infer the bundlespec from a bundle file handle.
253
255
254 The input file handle is seeked and the original seek position is not
256 The input file handle is seeked and the original seek position is not
255 restored.
257 restored.
256 """
258 """
257 def speccompression(alg):
259 def speccompression(alg):
258 try:
260 try:
259 return util.compengines.forbundletype(alg).bundletype()[0]
261 return util.compengines.forbundletype(alg).bundletype()[0]
260 except KeyError:
262 except KeyError:
261 return None
263 return None
262
264
263 b = readbundle(ui, fh, None)
265 b = readbundle(ui, fh, None)
264 if isinstance(b, changegroup.cg1unpacker):
266 if isinstance(b, changegroup.cg1unpacker):
265 alg = b._type
267 alg = b._type
266 if alg == '_truncatedBZ':
268 if alg == '_truncatedBZ':
267 alg = 'BZ'
269 alg = 'BZ'
268 comp = speccompression(alg)
270 comp = speccompression(alg)
269 if not comp:
271 if not comp:
270 raise error.Abort(_('unknown compression algorithm: %s') % alg)
272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
271 return '%s-v1' % comp
273 return '%s-v1' % comp
272 elif isinstance(b, bundle2.unbundle20):
274 elif isinstance(b, bundle2.unbundle20):
273 if 'Compression' in b.params:
275 if 'Compression' in b.params:
274 comp = speccompression(b.params['Compression'])
276 comp = speccompression(b.params['Compression'])
275 if not comp:
277 if not comp:
276 raise error.Abort(_('unknown compression algorithm: %s') % comp)
278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
277 else:
279 else:
278 comp = 'none'
280 comp = 'none'
279
281
280 version = None
282 version = None
281 for part in b.iterparts():
283 for part in b.iterparts():
282 if part.type == 'changegroup':
284 if part.type == 'changegroup':
283 version = part.params['version']
285 version = part.params['version']
284 if version in ('01', '02'):
286 if version in ('01', '02'):
285 version = 'v2'
287 version = 'v2'
286 else:
288 else:
287 raise error.Abort(_('changegroup version %s does not have '
289 raise error.Abort(_('changegroup version %s does not have '
288 'a known bundlespec') % version,
290 'a known bundlespec') % version,
289 hint=_('try upgrading your Mercurial '
291 hint=_('try upgrading your Mercurial '
290 'client'))
292 'client'))
291 elif part.type == 'stream2' and version is None:
293 elif part.type == 'stream2' and version is None:
292 # A stream2 part requires to be part of a v2 bundle
294 # A stream2 part requires to be part of a v2 bundle
293 version = "v2"
295 version = "v2"
294 requirements = urlreq.unquote(part.params['requirements'])
296 requirements = urlreq.unquote(part.params['requirements'])
295 splitted = requirements.split()
297 splitted = requirements.split()
296 params = bundle2._formatrequirementsparams(splitted)
298 params = bundle2._formatrequirementsparams(splitted)
297 return 'none-v2;stream=v2;%s' % params
299 return 'none-v2;stream=v2;%s' % params
298
300
299 if not version:
301 if not version:
300 raise error.Abort(_('could not identify changegroup version in '
302 raise error.Abort(_('could not identify changegroup version in '
301 'bundle'))
303 'bundle'))
302
304
303 return '%s-%s' % (comp, version)
305 return '%s-%s' % (comp, version)
304 elif isinstance(b, streamclone.streamcloneapplier):
306 elif isinstance(b, streamclone.streamcloneapplier):
305 requirements = streamclone.readbundle1header(fh)[2]
307 requirements = streamclone.readbundle1header(fh)[2]
306 formatted = bundle2._formatrequirementsparams(requirements)
308 formatted = bundle2._formatrequirementsparams(requirements)
307 return 'none-packed1;%s' % formatted
309 return 'none-packed1;%s' % formatted
308 else:
310 else:
309 raise error.Abort(_('unknown bundle type: %s') % b)
311 raise error.Abort(_('unknown bundle type: %s') % b)
310
312
311 def _computeoutgoing(repo, heads, common):
313 def _computeoutgoing(repo, heads, common):
312 """Computes which revs are outgoing given a set of common
314 """Computes which revs are outgoing given a set of common
313 and a set of heads.
315 and a set of heads.
314
316
315 This is a separate function so extensions can have access to
317 This is a separate function so extensions can have access to
316 the logic.
318 the logic.
317
319
318 Returns a discovery.outgoing object.
320 Returns a discovery.outgoing object.
319 """
321 """
320 cl = repo.changelog
322 cl = repo.changelog
321 if common:
323 if common:
322 hasnode = cl.hasnode
324 hasnode = cl.hasnode
323 common = [n for n in common if hasnode(n)]
325 common = [n for n in common if hasnode(n)]
324 else:
326 else:
325 common = [nullid]
327 common = [nullid]
326 if not heads:
328 if not heads:
327 heads = cl.heads()
329 heads = cl.heads()
328 return discovery.outgoing(repo, common, heads)
330 return discovery.outgoing(repo, common, heads)
329
331
330 def _forcebundle1(op):
332 def _forcebundle1(op):
331 """return true if a pull/push must use bundle1
333 """return true if a pull/push must use bundle1
332
334
333 This function is used to allow testing of the older bundle version"""
335 This function is used to allow testing of the older bundle version"""
334 ui = op.repo.ui
336 ui = op.repo.ui
335 # The goal is this config is to allow developer to choose the bundle
337 # The goal is this config is to allow developer to choose the bundle
336 # version used during exchanged. This is especially handy during test.
338 # version used during exchanged. This is especially handy during test.
337 # Value is a list of bundle version to be picked from, highest version
339 # Value is a list of bundle version to be picked from, highest version
338 # should be used.
340 # should be used.
339 #
341 #
340 # developer config: devel.legacy.exchange
342 # developer config: devel.legacy.exchange
341 exchange = ui.configlist('devel', 'legacy.exchange')
343 exchange = ui.configlist('devel', 'legacy.exchange')
342 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
343 return forcebundle1 or not op.remote.capable('bundle2')
345 return forcebundle1 or not op.remote.capable('bundle2')
344
346
345 class pushoperation(object):
347 class pushoperation(object):
346 """A object that represent a single push operation
348 """A object that represent a single push operation
347
349
348 Its purpose is to carry push related state and very common operations.
350 Its purpose is to carry push related state and very common operations.
349
351
350 A new pushoperation should be created at the beginning of each push and
352 A new pushoperation should be created at the beginning of each push and
351 discarded afterward.
353 discarded afterward.
352 """
354 """
353
355
354 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
355 bookmarks=(), pushvars=None):
357 bookmarks=(), pushvars=None):
356 # repo we push from
358 # repo we push from
357 self.repo = repo
359 self.repo = repo
358 self.ui = repo.ui
360 self.ui = repo.ui
359 # repo we push to
361 # repo we push to
360 self.remote = remote
362 self.remote = remote
361 # force option provided
363 # force option provided
362 self.force = force
364 self.force = force
363 # revs to be pushed (None is "all")
365 # revs to be pushed (None is "all")
364 self.revs = revs
366 self.revs = revs
365 # bookmark explicitly pushed
367 # bookmark explicitly pushed
366 self.bookmarks = bookmarks
368 self.bookmarks = bookmarks
367 # allow push of new branch
369 # allow push of new branch
368 self.newbranch = newbranch
370 self.newbranch = newbranch
369 # step already performed
371 # step already performed
370 # (used to check what steps have been already performed through bundle2)
372 # (used to check what steps have been already performed through bundle2)
371 self.stepsdone = set()
373 self.stepsdone = set()
372 # Integer version of the changegroup push result
374 # Integer version of the changegroup push result
373 # - None means nothing to push
375 # - None means nothing to push
374 # - 0 means HTTP error
376 # - 0 means HTTP error
375 # - 1 means we pushed and remote head count is unchanged *or*
377 # - 1 means we pushed and remote head count is unchanged *or*
376 # we have outgoing changesets but refused to push
378 # we have outgoing changesets but refused to push
377 # - other values as described by addchangegroup()
379 # - other values as described by addchangegroup()
378 self.cgresult = None
380 self.cgresult = None
379 # Boolean value for the bookmark push
381 # Boolean value for the bookmark push
380 self.bkresult = None
382 self.bkresult = None
381 # discover.outgoing object (contains common and outgoing data)
383 # discover.outgoing object (contains common and outgoing data)
382 self.outgoing = None
384 self.outgoing = None
383 # all remote topological heads before the push
385 # all remote topological heads before the push
384 self.remoteheads = None
386 self.remoteheads = None
385 # Details of the remote branch pre and post push
387 # Details of the remote branch pre and post push
386 #
388 #
387 # mapping: {'branch': ([remoteheads],
389 # mapping: {'branch': ([remoteheads],
388 # [newheads],
390 # [newheads],
389 # [unsyncedheads],
391 # [unsyncedheads],
390 # [discardedheads])}
392 # [discardedheads])}
391 # - branch: the branch name
393 # - branch: the branch name
392 # - remoteheads: the list of remote heads known locally
394 # - remoteheads: the list of remote heads known locally
393 # None if the branch is new
395 # None if the branch is new
394 # - newheads: the new remote heads (known locally) with outgoing pushed
396 # - newheads: the new remote heads (known locally) with outgoing pushed
395 # - unsyncedheads: the list of remote heads unknown locally.
397 # - unsyncedheads: the list of remote heads unknown locally.
396 # - discardedheads: the list of remote heads made obsolete by the push
398 # - discardedheads: the list of remote heads made obsolete by the push
397 self.pushbranchmap = None
399 self.pushbranchmap = None
398 # testable as a boolean indicating if any nodes are missing locally.
400 # testable as a boolean indicating if any nodes are missing locally.
399 self.incoming = None
401 self.incoming = None
400 # summary of the remote phase situation
402 # summary of the remote phase situation
401 self.remotephases = None
403 self.remotephases = None
402 # phases changes that must be pushed along side the changesets
404 # phases changes that must be pushed along side the changesets
403 self.outdatedphases = None
405 self.outdatedphases = None
404 # phases changes that must be pushed if changeset push fails
406 # phases changes that must be pushed if changeset push fails
405 self.fallbackoutdatedphases = None
407 self.fallbackoutdatedphases = None
406 # outgoing obsmarkers
408 # outgoing obsmarkers
407 self.outobsmarkers = set()
409 self.outobsmarkers = set()
408 # outgoing bookmarks
410 # outgoing bookmarks
409 self.outbookmarks = []
411 self.outbookmarks = []
410 # transaction manager
412 # transaction manager
411 self.trmanager = None
413 self.trmanager = None
412 # map { pushkey partid -> callback handling failure}
414 # map { pushkey partid -> callback handling failure}
413 # used to handle exception from mandatory pushkey part failure
415 # used to handle exception from mandatory pushkey part failure
414 self.pkfailcb = {}
416 self.pkfailcb = {}
415 # an iterable of pushvars or None
417 # an iterable of pushvars or None
416 self.pushvars = pushvars
418 self.pushvars = pushvars
417
419
418 @util.propertycache
420 @util.propertycache
419 def futureheads(self):
421 def futureheads(self):
420 """future remote heads if the changeset push succeeds"""
422 """future remote heads if the changeset push succeeds"""
421 return self.outgoing.missingheads
423 return self.outgoing.missingheads
422
424
423 @util.propertycache
425 @util.propertycache
424 def fallbackheads(self):
426 def fallbackheads(self):
425 """future remote heads if the changeset push fails"""
427 """future remote heads if the changeset push fails"""
426 if self.revs is None:
428 if self.revs is None:
427 # not target to push, all common are relevant
429 # not target to push, all common are relevant
428 return self.outgoing.commonheads
430 return self.outgoing.commonheads
429 unfi = self.repo.unfiltered()
431 unfi = self.repo.unfiltered()
430 # I want cheads = heads(::missingheads and ::commonheads)
432 # I want cheads = heads(::missingheads and ::commonheads)
431 # (missingheads is revs with secret changeset filtered out)
433 # (missingheads is revs with secret changeset filtered out)
432 #
434 #
433 # This can be expressed as:
435 # This can be expressed as:
434 # cheads = ( (missingheads and ::commonheads)
436 # cheads = ( (missingheads and ::commonheads)
435 # + (commonheads and ::missingheads))"
437 # + (commonheads and ::missingheads))"
436 # )
438 # )
437 #
439 #
438 # while trying to push we already computed the following:
440 # while trying to push we already computed the following:
439 # common = (::commonheads)
441 # common = (::commonheads)
440 # missing = ((commonheads::missingheads) - commonheads)
442 # missing = ((commonheads::missingheads) - commonheads)
441 #
443 #
442 # We can pick:
444 # We can pick:
443 # * missingheads part of common (::commonheads)
445 # * missingheads part of common (::commonheads)
444 common = self.outgoing.common
446 common = self.outgoing.common
445 nm = self.repo.changelog.nodemap
447 nm = self.repo.changelog.nodemap
446 cheads = [node for node in self.revs if nm[node] in common]
448 cheads = [node for node in self.revs if nm[node] in common]
447 # and
449 # and
448 # * commonheads parents on missing
450 # * commonheads parents on missing
449 revset = unfi.set('%ln and parents(roots(%ln))',
451 revset = unfi.set('%ln and parents(roots(%ln))',
450 self.outgoing.commonheads,
452 self.outgoing.commonheads,
451 self.outgoing.missing)
453 self.outgoing.missing)
452 cheads.extend(c.node() for c in revset)
454 cheads.extend(c.node() for c in revset)
453 return cheads
455 return cheads
454
456
455 @property
457 @property
456 def commonheads(self):
458 def commonheads(self):
457 """set of all common heads after changeset bundle push"""
459 """set of all common heads after changeset bundle push"""
458 if self.cgresult:
460 if self.cgresult:
459 return self.futureheads
461 return self.futureheads
460 else:
462 else:
461 return self.fallbackheads
463 return self.fallbackheads
462
464
463 # mapping of message used when pushing bookmark
465 # mapping of message used when pushing bookmark
464 bookmsgmap = {'update': (_("updating bookmark %s\n"),
466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
465 _('updating bookmark %s failed!\n')),
467 _('updating bookmark %s failed!\n')),
466 'export': (_("exporting bookmark %s\n"),
468 'export': (_("exporting bookmark %s\n"),
467 _('exporting bookmark %s failed!\n')),
469 _('exporting bookmark %s failed!\n')),
468 'delete': (_("deleting remote bookmark %s\n"),
470 'delete': (_("deleting remote bookmark %s\n"),
469 _('deleting remote bookmark %s failed!\n')),
471 _('deleting remote bookmark %s failed!\n')),
470 }
472 }
471
473
472
474
473 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
474 opargs=None):
476 opargs=None):
475 '''Push outgoing changesets (limited by revs) from a local
477 '''Push outgoing changesets (limited by revs) from a local
476 repository to remote. Return an integer:
478 repository to remote. Return an integer:
477 - None means nothing to push
479 - None means nothing to push
478 - 0 means HTTP error
480 - 0 means HTTP error
479 - 1 means we pushed and remote head count is unchanged *or*
481 - 1 means we pushed and remote head count is unchanged *or*
480 we have outgoing changesets but refused to push
482 we have outgoing changesets but refused to push
481 - other values as described by addchangegroup()
483 - other values as described by addchangegroup()
482 '''
484 '''
483 if opargs is None:
485 if opargs is None:
484 opargs = {}
486 opargs = {}
485 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
486 **pycompat.strkwargs(opargs))
488 **pycompat.strkwargs(opargs))
487 if pushop.remote.local():
489 if pushop.remote.local():
488 missing = (set(pushop.repo.requirements)
490 missing = (set(pushop.repo.requirements)
489 - pushop.remote.local().supported)
491 - pushop.remote.local().supported)
490 if missing:
492 if missing:
491 msg = _("required features are not"
493 msg = _("required features are not"
492 " supported in the destination:"
494 " supported in the destination:"
493 " %s") % (', '.join(sorted(missing)))
495 " %s") % (', '.join(sorted(missing)))
494 raise error.Abort(msg)
496 raise error.Abort(msg)
495
497
496 if not pushop.remote.canpush():
498 if not pushop.remote.canpush():
497 raise error.Abort(_("destination does not support push"))
499 raise error.Abort(_("destination does not support push"))
498
500
499 if not pushop.remote.capable('unbundle'):
501 if not pushop.remote.capable('unbundle'):
500 raise error.Abort(_('cannot push: destination does not support the '
502 raise error.Abort(_('cannot push: destination does not support the '
501 'unbundle wire protocol command'))
503 'unbundle wire protocol command'))
502
504
503 # get lock as we might write phase data
505 # get lock as we might write phase data
504 wlock = lock = None
506 wlock = lock = None
505 try:
507 try:
506 # bundle2 push may receive a reply bundle touching bookmarks or other
508 # bundle2 push may receive a reply bundle touching bookmarks or other
507 # things requiring the wlock. Take it now to ensure proper ordering.
509 # things requiring the wlock. Take it now to ensure proper ordering.
508 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
509 if (not _forcebundle1(pushop)) and maypushback:
511 if (not _forcebundle1(pushop)) and maypushback:
510 wlock = pushop.repo.wlock()
512 wlock = pushop.repo.wlock()
511 lock = pushop.repo.lock()
513 lock = pushop.repo.lock()
512 pushop.trmanager = transactionmanager(pushop.repo,
514 pushop.trmanager = transactionmanager(pushop.repo,
513 'push-response',
515 'push-response',
514 pushop.remote.url())
516 pushop.remote.url())
515 except error.LockUnavailable as err:
517 except error.LockUnavailable as err:
516 # source repo cannot be locked.
518 # source repo cannot be locked.
517 # We do not abort the push, but just disable the local phase
519 # We do not abort the push, but just disable the local phase
518 # synchronisation.
520 # synchronisation.
519 msg = 'cannot lock source repository: %s\n' % err
521 msg = 'cannot lock source repository: %s\n' % err
520 pushop.ui.debug(msg)
522 pushop.ui.debug(msg)
521
523
522 with wlock or util.nullcontextmanager(), \
524 with wlock or util.nullcontextmanager(), \
523 lock or util.nullcontextmanager(), \
525 lock or util.nullcontextmanager(), \
524 pushop.trmanager or util.nullcontextmanager():
526 pushop.trmanager or util.nullcontextmanager():
525 pushop.repo.checkpush(pushop)
527 pushop.repo.checkpush(pushop)
526 _pushdiscovery(pushop)
528 _pushdiscovery(pushop)
527 if not _forcebundle1(pushop):
529 if not _forcebundle1(pushop):
528 _pushbundle2(pushop)
530 _pushbundle2(pushop)
529 _pushchangeset(pushop)
531 _pushchangeset(pushop)
530 _pushsyncphase(pushop)
532 _pushsyncphase(pushop)
531 _pushobsolete(pushop)
533 _pushobsolete(pushop)
532 _pushbookmark(pushop)
534 _pushbookmark(pushop)
533
535
534 if repo.ui.configbool('experimental', 'remotenames'):
536 if repo.ui.configbool('experimental', 'remotenames'):
535 logexchange.pullremotenames(repo, remote)
537 logexchange.pullremotenames(repo, remote)
536
538
537 return pushop
539 return pushop
538
540
539 # list of steps to perform discovery before push
541 # list of steps to perform discovery before push
540 pushdiscoveryorder = []
542 pushdiscoveryorder = []
541
543
542 # Mapping between step name and function
544 # Mapping between step name and function
543 #
545 #
544 # This exists to help extensions wrap steps if necessary
546 # This exists to help extensions wrap steps if necessary
545 pushdiscoverymapping = {}
547 pushdiscoverymapping = {}
546
548
547 def pushdiscovery(stepname):
549 def pushdiscovery(stepname):
548 """decorator for function performing discovery before push
550 """decorator for function performing discovery before push
549
551
550 The function is added to the step -> function mapping and appended to the
552 The function is added to the step -> function mapping and appended to the
551 list of steps. Beware that decorated function will be added in order (this
553 list of steps. Beware that decorated function will be added in order (this
552 may matter).
554 may matter).
553
555
554 You can only use this decorator for a new step, if you want to wrap a step
556 You can only use this decorator for a new step, if you want to wrap a step
555 from an extension, change the pushdiscovery dictionary directly."""
557 from an extension, change the pushdiscovery dictionary directly."""
556 def dec(func):
558 def dec(func):
557 assert stepname not in pushdiscoverymapping
559 assert stepname not in pushdiscoverymapping
558 pushdiscoverymapping[stepname] = func
560 pushdiscoverymapping[stepname] = func
559 pushdiscoveryorder.append(stepname)
561 pushdiscoveryorder.append(stepname)
560 return func
562 return func
561 return dec
563 return dec
562
564
563 def _pushdiscovery(pushop):
565 def _pushdiscovery(pushop):
564 """Run all discovery steps"""
566 """Run all discovery steps"""
565 for stepname in pushdiscoveryorder:
567 for stepname in pushdiscoveryorder:
566 step = pushdiscoverymapping[stepname]
568 step = pushdiscoverymapping[stepname]
567 step(pushop)
569 step(pushop)
568
570
569 @pushdiscovery('changeset')
571 @pushdiscovery('changeset')
570 def _pushdiscoverychangeset(pushop):
572 def _pushdiscoverychangeset(pushop):
571 """discover the changeset that need to be pushed"""
573 """discover the changeset that need to be pushed"""
572 fci = discovery.findcommonincoming
574 fci = discovery.findcommonincoming
573 if pushop.revs:
575 if pushop.revs:
574 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
575 ancestorsof=pushop.revs)
577 ancestorsof=pushop.revs)
576 else:
578 else:
577 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
578 common, inc, remoteheads = commoninc
580 common, inc, remoteheads = commoninc
579 fco = discovery.findcommonoutgoing
581 fco = discovery.findcommonoutgoing
580 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
581 commoninc=commoninc, force=pushop.force)
583 commoninc=commoninc, force=pushop.force)
582 pushop.outgoing = outgoing
584 pushop.outgoing = outgoing
583 pushop.remoteheads = remoteheads
585 pushop.remoteheads = remoteheads
584 pushop.incoming = inc
586 pushop.incoming = inc
585
587
586 @pushdiscovery('phase')
588 @pushdiscovery('phase')
587 def _pushdiscoveryphase(pushop):
589 def _pushdiscoveryphase(pushop):
588 """discover the phase that needs to be pushed
590 """discover the phase that needs to be pushed
589
591
590 (computed for both success and failure case for changesets push)"""
592 (computed for both success and failure case for changesets push)"""
591 outgoing = pushop.outgoing
593 outgoing = pushop.outgoing
592 unfi = pushop.repo.unfiltered()
594 unfi = pushop.repo.unfiltered()
593 remotephases = listkeys(pushop.remote, 'phases')
595 remotephases = listkeys(pushop.remote, 'phases')
594
596
595 if (pushop.ui.configbool('ui', '_usedassubrepo')
597 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 and remotephases # server supports phases
598 and remotephases # server supports phases
597 and not pushop.outgoing.missing # no changesets to be pushed
599 and not pushop.outgoing.missing # no changesets to be pushed
598 and remotephases.get('publishing', False)):
600 and remotephases.get('publishing', False)):
599 # When:
601 # When:
600 # - this is a subrepo push
602 # - this is a subrepo push
601 # - and remote support phase
603 # - and remote support phase
602 # - and no changeset are to be pushed
604 # - and no changeset are to be pushed
603 # - and remote is publishing
605 # - and remote is publishing
604 # We may be in issue 3781 case!
606 # We may be in issue 3781 case!
605 # We drop the possible phase synchronisation done by
607 # We drop the possible phase synchronisation done by
606 # courtesy to publish changesets possibly locally draft
608 # courtesy to publish changesets possibly locally draft
607 # on the remote.
609 # on the remote.
608 pushop.outdatedphases = []
610 pushop.outdatedphases = []
609 pushop.fallbackoutdatedphases = []
611 pushop.fallbackoutdatedphases = []
610 return
612 return
611
613
612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
614 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 pushop.fallbackheads,
615 pushop.fallbackheads,
614 remotephases)
616 remotephases)
615 droots = pushop.remotephases.draftroots
617 droots = pushop.remotephases.draftroots
616
618
617 extracond = ''
619 extracond = ''
618 if not pushop.remotephases.publishing:
620 if not pushop.remotephases.publishing:
619 extracond = ' and public()'
621 extracond = ' and public()'
620 revset = 'heads((%%ln::%%ln) %s)' % extracond
622 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 # Get the list of all revs draft on remote by public here.
623 # Get the list of all revs draft on remote by public here.
622 # XXX Beware that revset break if droots is not strictly
624 # XXX Beware that revset break if droots is not strictly
623 # XXX root we may want to ensure it is but it is costly
625 # XXX root we may want to ensure it is but it is costly
624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
626 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 if not outgoing.missing:
627 if not outgoing.missing:
626 future = fallback
628 future = fallback
627 else:
629 else:
628 # adds changeset we are going to push as draft
630 # adds changeset we are going to push as draft
629 #
631 #
630 # should not be necessary for publishing server, but because of an
632 # should not be necessary for publishing server, but because of an
631 # issue fixed in xxxxx we have to do it anyway.
633 # issue fixed in xxxxx we have to do it anyway.
632 fdroots = list(unfi.set('roots(%ln + %ln::)',
634 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 outgoing.missing, droots))
635 outgoing.missing, droots))
634 fdroots = [f.node() for f in fdroots]
636 fdroots = [f.node() for f in fdroots]
635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
637 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 pushop.outdatedphases = future
638 pushop.outdatedphases = future
637 pushop.fallbackoutdatedphases = fallback
639 pushop.fallbackoutdatedphases = fallback
638
640
639 @pushdiscovery('obsmarker')
641 @pushdiscovery('obsmarker')
640 def _pushdiscoveryobsmarkers(pushop):
642 def _pushdiscoveryobsmarkers(pushop):
641 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
643 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
642 return
644 return
643
645
644 if not pushop.repo.obsstore:
646 if not pushop.repo.obsstore:
645 return
647 return
646
648
647 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
649 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
648 return
650 return
649
651
650 repo = pushop.repo
652 repo = pushop.repo
651 # very naive computation, that can be quite expensive on big repo.
653 # very naive computation, that can be quite expensive on big repo.
652 # However: evolution is currently slow on them anyway.
654 # However: evolution is currently slow on them anyway.
653 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
655 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
654 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
656 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
655
657
656 @pushdiscovery('bookmarks')
658 @pushdiscovery('bookmarks')
657 def _pushdiscoverybookmarks(pushop):
659 def _pushdiscoverybookmarks(pushop):
658 ui = pushop.ui
660 ui = pushop.ui
659 repo = pushop.repo.unfiltered()
661 repo = pushop.repo.unfiltered()
660 remote = pushop.remote
662 remote = pushop.remote
661 ui.debug("checking for updated bookmarks\n")
663 ui.debug("checking for updated bookmarks\n")
662 ancestors = ()
664 ancestors = ()
663 if pushop.revs:
665 if pushop.revs:
664 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
666 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
665 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
667 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
666
668
667 remotebookmark = listkeys(remote, 'bookmarks')
669 remotebookmark = listkeys(remote, 'bookmarks')
668
670
669 explicit = set([repo._bookmarks.expandname(bookmark)
671 explicit = set([repo._bookmarks.expandname(bookmark)
670 for bookmark in pushop.bookmarks])
672 for bookmark in pushop.bookmarks])
671
673
672 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
674 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
673 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
675 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
674
676
675 def safehex(x):
677 def safehex(x):
676 if x is None:
678 if x is None:
677 return x
679 return x
678 return hex(x)
680 return hex(x)
679
681
680 def hexifycompbookmarks(bookmarks):
682 def hexifycompbookmarks(bookmarks):
681 return [(b, safehex(scid), safehex(dcid))
683 return [(b, safehex(scid), safehex(dcid))
682 for (b, scid, dcid) in bookmarks]
684 for (b, scid, dcid) in bookmarks]
683
685
684 comp = [hexifycompbookmarks(marks) for marks in comp]
686 comp = [hexifycompbookmarks(marks) for marks in comp]
685 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
687 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
686
688
687 def _processcompared(pushop, pushed, explicit, remotebms, comp):
689 def _processcompared(pushop, pushed, explicit, remotebms, comp):
688 """take decision on bookmark to pull from the remote bookmark
690 """take decision on bookmark to pull from the remote bookmark
689
691
690 Exist to help extensions who want to alter this behavior.
692 Exist to help extensions who want to alter this behavior.
691 """
693 """
692 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
694 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
693
695
694 repo = pushop.repo
696 repo = pushop.repo
695
697
696 for b, scid, dcid in advsrc:
698 for b, scid, dcid in advsrc:
697 if b in explicit:
699 if b in explicit:
698 explicit.remove(b)
700 explicit.remove(b)
699 if not pushed or repo[scid].rev() in pushed:
701 if not pushed or repo[scid].rev() in pushed:
700 pushop.outbookmarks.append((b, dcid, scid))
702 pushop.outbookmarks.append((b, dcid, scid))
701 # search added bookmark
703 # search added bookmark
702 for b, scid, dcid in addsrc:
704 for b, scid, dcid in addsrc:
703 if b in explicit:
705 if b in explicit:
704 explicit.remove(b)
706 explicit.remove(b)
705 pushop.outbookmarks.append((b, '', scid))
707 pushop.outbookmarks.append((b, '', scid))
706 # search for overwritten bookmark
708 # search for overwritten bookmark
707 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
709 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
708 if b in explicit:
710 if b in explicit:
709 explicit.remove(b)
711 explicit.remove(b)
710 pushop.outbookmarks.append((b, dcid, scid))
712 pushop.outbookmarks.append((b, dcid, scid))
711 # search for bookmark to delete
713 # search for bookmark to delete
712 for b, scid, dcid in adddst:
714 for b, scid, dcid in adddst:
713 if b in explicit:
715 if b in explicit:
714 explicit.remove(b)
716 explicit.remove(b)
715 # treat as "deleted locally"
717 # treat as "deleted locally"
716 pushop.outbookmarks.append((b, dcid, ''))
718 pushop.outbookmarks.append((b, dcid, ''))
717 # identical bookmarks shouldn't get reported
719 # identical bookmarks shouldn't get reported
718 for b, scid, dcid in same:
720 for b, scid, dcid in same:
719 if b in explicit:
721 if b in explicit:
720 explicit.remove(b)
722 explicit.remove(b)
721
723
722 if explicit:
724 if explicit:
723 explicit = sorted(explicit)
725 explicit = sorted(explicit)
724 # we should probably list all of them
726 # we should probably list all of them
725 pushop.ui.warn(_('bookmark %s does not exist on the local '
727 pushop.ui.warn(_('bookmark %s does not exist on the local '
726 'or remote repository!\n') % explicit[0])
728 'or remote repository!\n') % explicit[0])
727 pushop.bkresult = 2
729 pushop.bkresult = 2
728
730
729 pushop.outbookmarks.sort()
731 pushop.outbookmarks.sort()
730
732
731 def _pushcheckoutgoing(pushop):
733 def _pushcheckoutgoing(pushop):
732 outgoing = pushop.outgoing
734 outgoing = pushop.outgoing
733 unfi = pushop.repo.unfiltered()
735 unfi = pushop.repo.unfiltered()
734 if not outgoing.missing:
736 if not outgoing.missing:
735 # nothing to push
737 # nothing to push
736 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
738 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
737 return False
739 return False
738 # something to push
740 # something to push
739 if not pushop.force:
741 if not pushop.force:
740 # if repo.obsstore == False --> no obsolete
742 # if repo.obsstore == False --> no obsolete
741 # then, save the iteration
743 # then, save the iteration
742 if unfi.obsstore:
744 if unfi.obsstore:
743 # this message are here for 80 char limit reason
745 # this message are here for 80 char limit reason
744 mso = _("push includes obsolete changeset: %s!")
746 mso = _("push includes obsolete changeset: %s!")
745 mspd = _("push includes phase-divergent changeset: %s!")
747 mspd = _("push includes phase-divergent changeset: %s!")
746 mscd = _("push includes content-divergent changeset: %s!")
748 mscd = _("push includes content-divergent changeset: %s!")
747 mst = {"orphan": _("push includes orphan changeset: %s!"),
749 mst = {"orphan": _("push includes orphan changeset: %s!"),
748 "phase-divergent": mspd,
750 "phase-divergent": mspd,
749 "content-divergent": mscd}
751 "content-divergent": mscd}
750 # If we are to push if there is at least one
752 # If we are to push if there is at least one
751 # obsolete or unstable changeset in missing, at
753 # obsolete or unstable changeset in missing, at
752 # least one of the missinghead will be obsolete or
754 # least one of the missinghead will be obsolete or
753 # unstable. So checking heads only is ok
755 # unstable. So checking heads only is ok
754 for node in outgoing.missingheads:
756 for node in outgoing.missingheads:
755 ctx = unfi[node]
757 ctx = unfi[node]
756 if ctx.obsolete():
758 if ctx.obsolete():
757 raise error.Abort(mso % ctx)
759 raise error.Abort(mso % ctx)
758 elif ctx.isunstable():
760 elif ctx.isunstable():
759 # TODO print more than one instability in the abort
761 # TODO print more than one instability in the abort
760 # message
762 # message
761 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
763 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
762
764
763 discovery.checkheads(pushop)
765 discovery.checkheads(pushop)
764 return True
766 return True
765
767
766 # List of names of steps to perform for an outgoing bundle2, order matters.
768 # List of names of steps to perform for an outgoing bundle2, order matters.
767 b2partsgenorder = []
769 b2partsgenorder = []
768
770
769 # Mapping between step name and function
771 # Mapping between step name and function
770 #
772 #
771 # This exists to help extensions wrap steps if necessary
773 # This exists to help extensions wrap steps if necessary
772 b2partsgenmapping = {}
774 b2partsgenmapping = {}
773
775
774 def b2partsgenerator(stepname, idx=None):
776 def b2partsgenerator(stepname, idx=None):
775 """decorator for function generating bundle2 part
777 """decorator for function generating bundle2 part
776
778
777 The function is added to the step -> function mapping and appended to the
779 The function is added to the step -> function mapping and appended to the
778 list of steps. Beware that decorated functions will be added in order
780 list of steps. Beware that decorated functions will be added in order
779 (this may matter).
781 (this may matter).
780
782
781 You can only use this decorator for new steps, if you want to wrap a step
783 You can only use this decorator for new steps, if you want to wrap a step
782 from an extension, attack the b2partsgenmapping dictionary directly."""
784 from an extension, attack the b2partsgenmapping dictionary directly."""
783 def dec(func):
785 def dec(func):
784 assert stepname not in b2partsgenmapping
786 assert stepname not in b2partsgenmapping
785 b2partsgenmapping[stepname] = func
787 b2partsgenmapping[stepname] = func
786 if idx is None:
788 if idx is None:
787 b2partsgenorder.append(stepname)
789 b2partsgenorder.append(stepname)
788 else:
790 else:
789 b2partsgenorder.insert(idx, stepname)
791 b2partsgenorder.insert(idx, stepname)
790 return func
792 return func
791 return dec
793 return dec
792
794
793 def _pushb2ctxcheckheads(pushop, bundler):
795 def _pushb2ctxcheckheads(pushop, bundler):
794 """Generate race condition checking parts
796 """Generate race condition checking parts
795
797
796 Exists as an independent function to aid extensions
798 Exists as an independent function to aid extensions
797 """
799 """
798 # * 'force' do not check for push race,
800 # * 'force' do not check for push race,
799 # * if we don't push anything, there are nothing to check.
801 # * if we don't push anything, there are nothing to check.
800 if not pushop.force and pushop.outgoing.missingheads:
802 if not pushop.force and pushop.outgoing.missingheads:
801 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
803 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
802 emptyremote = pushop.pushbranchmap is None
804 emptyremote = pushop.pushbranchmap is None
803 if not allowunrelated or emptyremote:
805 if not allowunrelated or emptyremote:
804 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
806 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
805 else:
807 else:
806 affected = set()
808 affected = set()
807 for branch, heads in pushop.pushbranchmap.iteritems():
809 for branch, heads in pushop.pushbranchmap.iteritems():
808 remoteheads, newheads, unsyncedheads, discardedheads = heads
810 remoteheads, newheads, unsyncedheads, discardedheads = heads
809 if remoteheads is not None:
811 if remoteheads is not None:
810 remote = set(remoteheads)
812 remote = set(remoteheads)
811 affected |= set(discardedheads) & remote
813 affected |= set(discardedheads) & remote
812 affected |= remote - set(newheads)
814 affected |= remote - set(newheads)
813 if affected:
815 if affected:
814 data = iter(sorted(affected))
816 data = iter(sorted(affected))
815 bundler.newpart('check:updated-heads', data=data)
817 bundler.newpart('check:updated-heads', data=data)
816
818
817 def _pushing(pushop):
819 def _pushing(pushop):
818 """return True if we are pushing anything"""
820 """return True if we are pushing anything"""
819 return bool(pushop.outgoing.missing
821 return bool(pushop.outgoing.missing
820 or pushop.outdatedphases
822 or pushop.outdatedphases
821 or pushop.outobsmarkers
823 or pushop.outobsmarkers
822 or pushop.outbookmarks)
824 or pushop.outbookmarks)
823
825
824 @b2partsgenerator('check-bookmarks')
826 @b2partsgenerator('check-bookmarks')
825 def _pushb2checkbookmarks(pushop, bundler):
827 def _pushb2checkbookmarks(pushop, bundler):
826 """insert bookmark move checking"""
828 """insert bookmark move checking"""
827 if not _pushing(pushop) or pushop.force:
829 if not _pushing(pushop) or pushop.force:
828 return
830 return
829 b2caps = bundle2.bundle2caps(pushop.remote)
831 b2caps = bundle2.bundle2caps(pushop.remote)
830 hasbookmarkcheck = 'bookmarks' in b2caps
832 hasbookmarkcheck = 'bookmarks' in b2caps
831 if not (pushop.outbookmarks and hasbookmarkcheck):
833 if not (pushop.outbookmarks and hasbookmarkcheck):
832 return
834 return
833 data = []
835 data = []
834 for book, old, new in pushop.outbookmarks:
836 for book, old, new in pushop.outbookmarks:
835 old = bin(old)
837 old = bin(old)
836 data.append((book, old))
838 data.append((book, old))
837 checkdata = bookmod.binaryencode(data)
839 checkdata = bookmod.binaryencode(data)
838 bundler.newpart('check:bookmarks', data=checkdata)
840 bundler.newpart('check:bookmarks', data=checkdata)
839
841
840 @b2partsgenerator('check-phases')
842 @b2partsgenerator('check-phases')
841 def _pushb2checkphases(pushop, bundler):
843 def _pushb2checkphases(pushop, bundler):
842 """insert phase move checking"""
844 """insert phase move checking"""
843 if not _pushing(pushop) or pushop.force:
845 if not _pushing(pushop) or pushop.force:
844 return
846 return
845 b2caps = bundle2.bundle2caps(pushop.remote)
847 b2caps = bundle2.bundle2caps(pushop.remote)
846 hasphaseheads = 'heads' in b2caps.get('phases', ())
848 hasphaseheads = 'heads' in b2caps.get('phases', ())
847 if pushop.remotephases is not None and hasphaseheads:
849 if pushop.remotephases is not None and hasphaseheads:
848 # check that the remote phase has not changed
850 # check that the remote phase has not changed
849 checks = [[] for p in phases.allphases]
851 checks = [[] for p in phases.allphases]
850 checks[phases.public].extend(pushop.remotephases.publicheads)
852 checks[phases.public].extend(pushop.remotephases.publicheads)
851 checks[phases.draft].extend(pushop.remotephases.draftroots)
853 checks[phases.draft].extend(pushop.remotephases.draftroots)
852 if any(checks):
854 if any(checks):
853 for nodes in checks:
855 for nodes in checks:
854 nodes.sort()
856 nodes.sort()
855 checkdata = phases.binaryencode(checks)
857 checkdata = phases.binaryencode(checks)
856 bundler.newpart('check:phases', data=checkdata)
858 bundler.newpart('check:phases', data=checkdata)
857
859
858 @b2partsgenerator('changeset')
860 @b2partsgenerator('changeset')
859 def _pushb2ctx(pushop, bundler):
861 def _pushb2ctx(pushop, bundler):
860 """handle changegroup push through bundle2
862 """handle changegroup push through bundle2
861
863
862 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
864 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
863 """
865 """
864 if 'changesets' in pushop.stepsdone:
866 if 'changesets' in pushop.stepsdone:
865 return
867 return
866 pushop.stepsdone.add('changesets')
868 pushop.stepsdone.add('changesets')
867 # Send known heads to the server for race detection.
869 # Send known heads to the server for race detection.
868 if not _pushcheckoutgoing(pushop):
870 if not _pushcheckoutgoing(pushop):
869 return
871 return
870 pushop.repo.prepushoutgoinghooks(pushop)
872 pushop.repo.prepushoutgoinghooks(pushop)
871
873
872 _pushb2ctxcheckheads(pushop, bundler)
874 _pushb2ctxcheckheads(pushop, bundler)
873
875
874 b2caps = bundle2.bundle2caps(pushop.remote)
876 b2caps = bundle2.bundle2caps(pushop.remote)
875 version = '01'
877 version = '01'
876 cgversions = b2caps.get('changegroup')
878 cgversions = b2caps.get('changegroup')
877 if cgversions: # 3.1 and 3.2 ship with an empty value
879 if cgversions: # 3.1 and 3.2 ship with an empty value
878 cgversions = [v for v in cgversions
880 cgversions = [v for v in cgversions
879 if v in changegroup.supportedoutgoingversions(
881 if v in changegroup.supportedoutgoingversions(
880 pushop.repo)]
882 pushop.repo)]
881 if not cgversions:
883 if not cgversions:
882 raise ValueError(_('no common changegroup version'))
884 raise ValueError(_('no common changegroup version'))
883 version = max(cgversions)
885 version = max(cgversions)
884 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
886 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
885 'push')
887 'push')
886 cgpart = bundler.newpart('changegroup', data=cgstream)
888 cgpart = bundler.newpart('changegroup', data=cgstream)
887 if cgversions:
889 if cgversions:
888 cgpart.addparam('version', version)
890 cgpart.addparam('version', version)
889 if 'treemanifest' in pushop.repo.requirements:
891 if 'treemanifest' in pushop.repo.requirements:
890 cgpart.addparam('treemanifest', '1')
892 cgpart.addparam('treemanifest', '1')
891 def handlereply(op):
893 def handlereply(op):
892 """extract addchangegroup returns from server reply"""
894 """extract addchangegroup returns from server reply"""
893 cgreplies = op.records.getreplies(cgpart.id)
895 cgreplies = op.records.getreplies(cgpart.id)
894 assert len(cgreplies['changegroup']) == 1
896 assert len(cgreplies['changegroup']) == 1
895 pushop.cgresult = cgreplies['changegroup'][0]['return']
897 pushop.cgresult = cgreplies['changegroup'][0]['return']
896 return handlereply
898 return handlereply
897
899
898 @b2partsgenerator('phase')
900 @b2partsgenerator('phase')
899 def _pushb2phases(pushop, bundler):
901 def _pushb2phases(pushop, bundler):
900 """handle phase push through bundle2"""
902 """handle phase push through bundle2"""
901 if 'phases' in pushop.stepsdone:
903 if 'phases' in pushop.stepsdone:
902 return
904 return
903 b2caps = bundle2.bundle2caps(pushop.remote)
905 b2caps = bundle2.bundle2caps(pushop.remote)
904 ui = pushop.repo.ui
906 ui = pushop.repo.ui
905
907
906 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
908 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
907 haspushkey = 'pushkey' in b2caps
909 haspushkey = 'pushkey' in b2caps
908 hasphaseheads = 'heads' in b2caps.get('phases', ())
910 hasphaseheads = 'heads' in b2caps.get('phases', ())
909
911
910 if hasphaseheads and not legacyphase:
912 if hasphaseheads and not legacyphase:
911 return _pushb2phaseheads(pushop, bundler)
913 return _pushb2phaseheads(pushop, bundler)
912 elif haspushkey:
914 elif haspushkey:
913 return _pushb2phasespushkey(pushop, bundler)
915 return _pushb2phasespushkey(pushop, bundler)
914
916
915 def _pushb2phaseheads(pushop, bundler):
917 def _pushb2phaseheads(pushop, bundler):
916 """push phase information through a bundle2 - binary part"""
918 """push phase information through a bundle2 - binary part"""
917 pushop.stepsdone.add('phases')
919 pushop.stepsdone.add('phases')
918 if pushop.outdatedphases:
920 if pushop.outdatedphases:
919 updates = [[] for p in phases.allphases]
921 updates = [[] for p in phases.allphases]
920 updates[0].extend(h.node() for h in pushop.outdatedphases)
922 updates[0].extend(h.node() for h in pushop.outdatedphases)
921 phasedata = phases.binaryencode(updates)
923 phasedata = phases.binaryencode(updates)
922 bundler.newpart('phase-heads', data=phasedata)
924 bundler.newpart('phase-heads', data=phasedata)
923
925
924 def _pushb2phasespushkey(pushop, bundler):
926 def _pushb2phasespushkey(pushop, bundler):
925 """push phase information through a bundle2 - pushkey part"""
927 """push phase information through a bundle2 - pushkey part"""
926 pushop.stepsdone.add('phases')
928 pushop.stepsdone.add('phases')
927 part2node = []
929 part2node = []
928
930
929 def handlefailure(pushop, exc):
931 def handlefailure(pushop, exc):
930 targetid = int(exc.partid)
932 targetid = int(exc.partid)
931 for partid, node in part2node:
933 for partid, node in part2node:
932 if partid == targetid:
934 if partid == targetid:
933 raise error.Abort(_('updating %s to public failed') % node)
935 raise error.Abort(_('updating %s to public failed') % node)
934
936
935 enc = pushkey.encode
937 enc = pushkey.encode
936 for newremotehead in pushop.outdatedphases:
938 for newremotehead in pushop.outdatedphases:
937 part = bundler.newpart('pushkey')
939 part = bundler.newpart('pushkey')
938 part.addparam('namespace', enc('phases'))
940 part.addparam('namespace', enc('phases'))
939 part.addparam('key', enc(newremotehead.hex()))
941 part.addparam('key', enc(newremotehead.hex()))
940 part.addparam('old', enc('%d' % phases.draft))
942 part.addparam('old', enc('%d' % phases.draft))
941 part.addparam('new', enc('%d' % phases.public))
943 part.addparam('new', enc('%d' % phases.public))
942 part2node.append((part.id, newremotehead))
944 part2node.append((part.id, newremotehead))
943 pushop.pkfailcb[part.id] = handlefailure
945 pushop.pkfailcb[part.id] = handlefailure
944
946
945 def handlereply(op):
947 def handlereply(op):
946 for partid, node in part2node:
948 for partid, node in part2node:
947 partrep = op.records.getreplies(partid)
949 partrep = op.records.getreplies(partid)
948 results = partrep['pushkey']
950 results = partrep['pushkey']
949 assert len(results) <= 1
951 assert len(results) <= 1
950 msg = None
952 msg = None
951 if not results:
953 if not results:
952 msg = _('server ignored update of %s to public!\n') % node
954 msg = _('server ignored update of %s to public!\n') % node
953 elif not int(results[0]['return']):
955 elif not int(results[0]['return']):
954 msg = _('updating %s to public failed!\n') % node
956 msg = _('updating %s to public failed!\n') % node
955 if msg is not None:
957 if msg is not None:
956 pushop.ui.warn(msg)
958 pushop.ui.warn(msg)
957 return handlereply
959 return handlereply
958
960
959 @b2partsgenerator('obsmarkers')
961 @b2partsgenerator('obsmarkers')
960 def _pushb2obsmarkers(pushop, bundler):
962 def _pushb2obsmarkers(pushop, bundler):
961 if 'obsmarkers' in pushop.stepsdone:
963 if 'obsmarkers' in pushop.stepsdone:
962 return
964 return
963 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
965 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
964 if obsolete.commonversion(remoteversions) is None:
966 if obsolete.commonversion(remoteversions) is None:
965 return
967 return
966 pushop.stepsdone.add('obsmarkers')
968 pushop.stepsdone.add('obsmarkers')
967 if pushop.outobsmarkers:
969 if pushop.outobsmarkers:
968 markers = sorted(pushop.outobsmarkers)
970 markers = sorted(pushop.outobsmarkers)
969 bundle2.buildobsmarkerspart(bundler, markers)
971 bundle2.buildobsmarkerspart(bundler, markers)
970
972
971 @b2partsgenerator('bookmarks')
973 @b2partsgenerator('bookmarks')
972 def _pushb2bookmarks(pushop, bundler):
974 def _pushb2bookmarks(pushop, bundler):
973 """handle bookmark push through bundle2"""
975 """handle bookmark push through bundle2"""
974 if 'bookmarks' in pushop.stepsdone:
976 if 'bookmarks' in pushop.stepsdone:
975 return
977 return
976 b2caps = bundle2.bundle2caps(pushop.remote)
978 b2caps = bundle2.bundle2caps(pushop.remote)
977
979
978 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
980 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
979 legacybooks = 'bookmarks' in legacy
981 legacybooks = 'bookmarks' in legacy
980
982
981 if not legacybooks and 'bookmarks' in b2caps:
983 if not legacybooks and 'bookmarks' in b2caps:
982 return _pushb2bookmarkspart(pushop, bundler)
984 return _pushb2bookmarkspart(pushop, bundler)
983 elif 'pushkey' in b2caps:
985 elif 'pushkey' in b2caps:
984 return _pushb2bookmarkspushkey(pushop, bundler)
986 return _pushb2bookmarkspushkey(pushop, bundler)
985
987
986 def _bmaction(old, new):
988 def _bmaction(old, new):
987 """small utility for bookmark pushing"""
989 """small utility for bookmark pushing"""
988 if not old:
990 if not old:
989 return 'export'
991 return 'export'
990 elif not new:
992 elif not new:
991 return 'delete'
993 return 'delete'
992 return 'update'
994 return 'update'
993
995
994 def _pushb2bookmarkspart(pushop, bundler):
996 def _pushb2bookmarkspart(pushop, bundler):
995 pushop.stepsdone.add('bookmarks')
997 pushop.stepsdone.add('bookmarks')
996 if not pushop.outbookmarks:
998 if not pushop.outbookmarks:
997 return
999 return
998
1000
999 allactions = []
1001 allactions = []
1000 data = []
1002 data = []
1001 for book, old, new in pushop.outbookmarks:
1003 for book, old, new in pushop.outbookmarks:
1002 new = bin(new)
1004 new = bin(new)
1003 data.append((book, new))
1005 data.append((book, new))
1004 allactions.append((book, _bmaction(old, new)))
1006 allactions.append((book, _bmaction(old, new)))
1005 checkdata = bookmod.binaryencode(data)
1007 checkdata = bookmod.binaryencode(data)
1006 bundler.newpart('bookmarks', data=checkdata)
1008 bundler.newpart('bookmarks', data=checkdata)
1007
1009
1008 def handlereply(op):
1010 def handlereply(op):
1009 ui = pushop.ui
1011 ui = pushop.ui
1010 # if success
1012 # if success
1011 for book, action in allactions:
1013 for book, action in allactions:
1012 ui.status(bookmsgmap[action][0] % book)
1014 ui.status(bookmsgmap[action][0] % book)
1013
1015
1014 return handlereply
1016 return handlereply
1015
1017
1016 def _pushb2bookmarkspushkey(pushop, bundler):
1018 def _pushb2bookmarkspushkey(pushop, bundler):
1017 pushop.stepsdone.add('bookmarks')
1019 pushop.stepsdone.add('bookmarks')
1018 part2book = []
1020 part2book = []
1019 enc = pushkey.encode
1021 enc = pushkey.encode
1020
1022
1021 def handlefailure(pushop, exc):
1023 def handlefailure(pushop, exc):
1022 targetid = int(exc.partid)
1024 targetid = int(exc.partid)
1023 for partid, book, action in part2book:
1025 for partid, book, action in part2book:
1024 if partid == targetid:
1026 if partid == targetid:
1025 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1027 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1026 # we should not be called for part we did not generated
1028 # we should not be called for part we did not generated
1027 assert False
1029 assert False
1028
1030
1029 for book, old, new in pushop.outbookmarks:
1031 for book, old, new in pushop.outbookmarks:
1030 part = bundler.newpart('pushkey')
1032 part = bundler.newpart('pushkey')
1031 part.addparam('namespace', enc('bookmarks'))
1033 part.addparam('namespace', enc('bookmarks'))
1032 part.addparam('key', enc(book))
1034 part.addparam('key', enc(book))
1033 part.addparam('old', enc(old))
1035 part.addparam('old', enc(old))
1034 part.addparam('new', enc(new))
1036 part.addparam('new', enc(new))
1035 action = 'update'
1037 action = 'update'
1036 if not old:
1038 if not old:
1037 action = 'export'
1039 action = 'export'
1038 elif not new:
1040 elif not new:
1039 action = 'delete'
1041 action = 'delete'
1040 part2book.append((part.id, book, action))
1042 part2book.append((part.id, book, action))
1041 pushop.pkfailcb[part.id] = handlefailure
1043 pushop.pkfailcb[part.id] = handlefailure
1042
1044
1043 def handlereply(op):
1045 def handlereply(op):
1044 ui = pushop.ui
1046 ui = pushop.ui
1045 for partid, book, action in part2book:
1047 for partid, book, action in part2book:
1046 partrep = op.records.getreplies(partid)
1048 partrep = op.records.getreplies(partid)
1047 results = partrep['pushkey']
1049 results = partrep['pushkey']
1048 assert len(results) <= 1
1050 assert len(results) <= 1
1049 if not results:
1051 if not results:
1050 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1052 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1051 else:
1053 else:
1052 ret = int(results[0]['return'])
1054 ret = int(results[0]['return'])
1053 if ret:
1055 if ret:
1054 ui.status(bookmsgmap[action][0] % book)
1056 ui.status(bookmsgmap[action][0] % book)
1055 else:
1057 else:
1056 ui.warn(bookmsgmap[action][1] % book)
1058 ui.warn(bookmsgmap[action][1] % book)
1057 if pushop.bkresult is not None:
1059 if pushop.bkresult is not None:
1058 pushop.bkresult = 1
1060 pushop.bkresult = 1
1059 return handlereply
1061 return handlereply
1060
1062
1061 @b2partsgenerator('pushvars', idx=0)
1063 @b2partsgenerator('pushvars', idx=0)
1062 def _getbundlesendvars(pushop, bundler):
1064 def _getbundlesendvars(pushop, bundler):
1063 '''send shellvars via bundle2'''
1065 '''send shellvars via bundle2'''
1064 pushvars = pushop.pushvars
1066 pushvars = pushop.pushvars
1065 if pushvars:
1067 if pushvars:
1066 shellvars = {}
1068 shellvars = {}
1067 for raw in pushvars:
1069 for raw in pushvars:
1068 if '=' not in raw:
1070 if '=' not in raw:
1069 msg = ("unable to parse variable '%s', should follow "
1071 msg = ("unable to parse variable '%s', should follow "
1070 "'KEY=VALUE' or 'KEY=' format")
1072 "'KEY=VALUE' or 'KEY=' format")
1071 raise error.Abort(msg % raw)
1073 raise error.Abort(msg % raw)
1072 k, v = raw.split('=', 1)
1074 k, v = raw.split('=', 1)
1073 shellvars[k] = v
1075 shellvars[k] = v
1074
1076
1075 part = bundler.newpart('pushvars')
1077 part = bundler.newpart('pushvars')
1076
1078
1077 for key, value in shellvars.iteritems():
1079 for key, value in shellvars.iteritems():
1078 part.addparam(key, value, mandatory=False)
1080 part.addparam(key, value, mandatory=False)
1079
1081
1080 def _pushbundle2(pushop):
1082 def _pushbundle2(pushop):
1081 """push data to the remote using bundle2
1083 """push data to the remote using bundle2
1082
1084
1083 The only currently supported type of data is changegroup but this will
1085 The only currently supported type of data is changegroup but this will
1084 evolve in the future."""
1086 evolve in the future."""
1085 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1087 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1086 pushback = (pushop.trmanager
1088 pushback = (pushop.trmanager
1087 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1089 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1088
1090
1089 # create reply capability
1091 # create reply capability
1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1092 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1091 allowpushback=pushback,
1093 allowpushback=pushback,
1092 role='client'))
1094 role='client'))
1093 bundler.newpart('replycaps', data=capsblob)
1095 bundler.newpart('replycaps', data=capsblob)
1094 replyhandlers = []
1096 replyhandlers = []
1095 for partgenname in b2partsgenorder:
1097 for partgenname in b2partsgenorder:
1096 partgen = b2partsgenmapping[partgenname]
1098 partgen = b2partsgenmapping[partgenname]
1097 ret = partgen(pushop, bundler)
1099 ret = partgen(pushop, bundler)
1098 if callable(ret):
1100 if callable(ret):
1099 replyhandlers.append(ret)
1101 replyhandlers.append(ret)
1100 # do not push if nothing to push
1102 # do not push if nothing to push
1101 if bundler.nbparts <= 1:
1103 if bundler.nbparts <= 1:
1102 return
1104 return
1103 stream = util.chunkbuffer(bundler.getchunks())
1105 stream = util.chunkbuffer(bundler.getchunks())
1104 try:
1106 try:
1105 try:
1107 try:
1106 with pushop.remote.commandexecutor() as e:
1108 with pushop.remote.commandexecutor() as e:
1107 reply = e.callcommand('unbundle', {
1109 reply = e.callcommand('unbundle', {
1108 'bundle': stream,
1110 'bundle': stream,
1109 'heads': ['force'],
1111 'heads': ['force'],
1110 'url': pushop.remote.url(),
1112 'url': pushop.remote.url(),
1111 }).result()
1113 }).result()
1112 except error.BundleValueError as exc:
1114 except error.BundleValueError as exc:
1113 raise error.Abort(_('missing support for %s') % exc)
1115 raise error.Abort(_('missing support for %s') % exc)
1114 try:
1116 try:
1115 trgetter = None
1117 trgetter = None
1116 if pushback:
1118 if pushback:
1117 trgetter = pushop.trmanager.transaction
1119 trgetter = pushop.trmanager.transaction
1118 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1120 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1119 except error.BundleValueError as exc:
1121 except error.BundleValueError as exc:
1120 raise error.Abort(_('missing support for %s') % exc)
1122 raise error.Abort(_('missing support for %s') % exc)
1121 except bundle2.AbortFromPart as exc:
1123 except bundle2.AbortFromPart as exc:
1122 pushop.ui.status(_('remote: %s\n') % exc)
1124 pushop.ui.status(_('remote: %s\n') % exc)
1123 if exc.hint is not None:
1125 if exc.hint is not None:
1124 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1126 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1125 raise error.Abort(_('push failed on remote'))
1127 raise error.Abort(_('push failed on remote'))
1126 except error.PushkeyFailed as exc:
1128 except error.PushkeyFailed as exc:
1127 partid = int(exc.partid)
1129 partid = int(exc.partid)
1128 if partid not in pushop.pkfailcb:
1130 if partid not in pushop.pkfailcb:
1129 raise
1131 raise
1130 pushop.pkfailcb[partid](pushop, exc)
1132 pushop.pkfailcb[partid](pushop, exc)
1131 for rephand in replyhandlers:
1133 for rephand in replyhandlers:
1132 rephand(op)
1134 rephand(op)
1133
1135
1134 def _pushchangeset(pushop):
1136 def _pushchangeset(pushop):
1135 """Make the actual push of changeset bundle to remote repo"""
1137 """Make the actual push of changeset bundle to remote repo"""
1136 if 'changesets' in pushop.stepsdone:
1138 if 'changesets' in pushop.stepsdone:
1137 return
1139 return
1138 pushop.stepsdone.add('changesets')
1140 pushop.stepsdone.add('changesets')
1139 if not _pushcheckoutgoing(pushop):
1141 if not _pushcheckoutgoing(pushop):
1140 return
1142 return
1141
1143
1142 # Should have verified this in push().
1144 # Should have verified this in push().
1143 assert pushop.remote.capable('unbundle')
1145 assert pushop.remote.capable('unbundle')
1144
1146
1145 pushop.repo.prepushoutgoinghooks(pushop)
1147 pushop.repo.prepushoutgoinghooks(pushop)
1146 outgoing = pushop.outgoing
1148 outgoing = pushop.outgoing
1147 # TODO: get bundlecaps from remote
1149 # TODO: get bundlecaps from remote
1148 bundlecaps = None
1150 bundlecaps = None
1149 # create a changegroup from local
1151 # create a changegroup from local
1150 if pushop.revs is None and not (outgoing.excluded
1152 if pushop.revs is None and not (outgoing.excluded
1151 or pushop.repo.changelog.filteredrevs):
1153 or pushop.repo.changelog.filteredrevs):
1152 # push everything,
1154 # push everything,
1153 # use the fast path, no race possible on push
1155 # use the fast path, no race possible on push
1154 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1156 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1155 fastpath=True, bundlecaps=bundlecaps)
1157 fastpath=True, bundlecaps=bundlecaps)
1156 else:
1158 else:
1157 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1158 'push', bundlecaps=bundlecaps)
1160 'push', bundlecaps=bundlecaps)
1159
1161
1160 # apply changegroup to remote
1162 # apply changegroup to remote
1161 # local repo finds heads on server, finds out what
1163 # local repo finds heads on server, finds out what
1162 # revs it must push. once revs transferred, if server
1164 # revs it must push. once revs transferred, if server
1163 # finds it has different heads (someone else won
1165 # finds it has different heads (someone else won
1164 # commit/push race), server aborts.
1166 # commit/push race), server aborts.
1165 if pushop.force:
1167 if pushop.force:
1166 remoteheads = ['force']
1168 remoteheads = ['force']
1167 else:
1169 else:
1168 remoteheads = pushop.remoteheads
1170 remoteheads = pushop.remoteheads
1169 # ssh: return remote's addchangegroup()
1171 # ssh: return remote's addchangegroup()
1170 # http: return remote's addchangegroup() or 0 for error
1172 # http: return remote's addchangegroup() or 0 for error
1171 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1173 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1172 pushop.repo.url())
1174 pushop.repo.url())
1173
1175
1174 def _pushsyncphase(pushop):
1176 def _pushsyncphase(pushop):
1175 """synchronise phase information locally and remotely"""
1177 """synchronise phase information locally and remotely"""
1176 cheads = pushop.commonheads
1178 cheads = pushop.commonheads
1177 # even when we don't push, exchanging phase data is useful
1179 # even when we don't push, exchanging phase data is useful
1178 remotephases = listkeys(pushop.remote, 'phases')
1180 remotephases = listkeys(pushop.remote, 'phases')
1179 if (pushop.ui.configbool('ui', '_usedassubrepo')
1181 if (pushop.ui.configbool('ui', '_usedassubrepo')
1180 and remotephases # server supports phases
1182 and remotephases # server supports phases
1181 and pushop.cgresult is None # nothing was pushed
1183 and pushop.cgresult is None # nothing was pushed
1182 and remotephases.get('publishing', False)):
1184 and remotephases.get('publishing', False)):
1183 # When:
1185 # When:
1184 # - this is a subrepo push
1186 # - this is a subrepo push
1185 # - and remote support phase
1187 # - and remote support phase
1186 # - and no changeset was pushed
1188 # - and no changeset was pushed
1187 # - and remote is publishing
1189 # - and remote is publishing
1188 # We may be in issue 3871 case!
1190 # We may be in issue 3871 case!
1189 # We drop the possible phase synchronisation done by
1191 # We drop the possible phase synchronisation done by
1190 # courtesy to publish changesets possibly locally draft
1192 # courtesy to publish changesets possibly locally draft
1191 # on the remote.
1193 # on the remote.
1192 remotephases = {'publishing': 'True'}
1194 remotephases = {'publishing': 'True'}
1193 if not remotephases: # old server or public only reply from non-publishing
1195 if not remotephases: # old server or public only reply from non-publishing
1194 _localphasemove(pushop, cheads)
1196 _localphasemove(pushop, cheads)
1195 # don't push any phase data as there is nothing to push
1197 # don't push any phase data as there is nothing to push
1196 else:
1198 else:
1197 ana = phases.analyzeremotephases(pushop.repo, cheads,
1199 ana = phases.analyzeremotephases(pushop.repo, cheads,
1198 remotephases)
1200 remotephases)
1199 pheads, droots = ana
1201 pheads, droots = ana
1200 ### Apply remote phase on local
1202 ### Apply remote phase on local
1201 if remotephases.get('publishing', False):
1203 if remotephases.get('publishing', False):
1202 _localphasemove(pushop, cheads)
1204 _localphasemove(pushop, cheads)
1203 else: # publish = False
1205 else: # publish = False
1204 _localphasemove(pushop, pheads)
1206 _localphasemove(pushop, pheads)
1205 _localphasemove(pushop, cheads, phases.draft)
1207 _localphasemove(pushop, cheads, phases.draft)
1206 ### Apply local phase on remote
1208 ### Apply local phase on remote
1207
1209
1208 if pushop.cgresult:
1210 if pushop.cgresult:
1209 if 'phases' in pushop.stepsdone:
1211 if 'phases' in pushop.stepsdone:
1210 # phases already pushed though bundle2
1212 # phases already pushed though bundle2
1211 return
1213 return
1212 outdated = pushop.outdatedphases
1214 outdated = pushop.outdatedphases
1213 else:
1215 else:
1214 outdated = pushop.fallbackoutdatedphases
1216 outdated = pushop.fallbackoutdatedphases
1215
1217
1216 pushop.stepsdone.add('phases')
1218 pushop.stepsdone.add('phases')
1217
1219
1218 # filter heads already turned public by the push
1220 # filter heads already turned public by the push
1219 outdated = [c for c in outdated if c.node() not in pheads]
1221 outdated = [c for c in outdated if c.node() not in pheads]
1220 # fallback to independent pushkey command
1222 # fallback to independent pushkey command
1221 for newremotehead in outdated:
1223 for newremotehead in outdated:
1222 with pushop.remote.commandexecutor() as e:
1224 with pushop.remote.commandexecutor() as e:
1223 r = e.callcommand('pushkey', {
1225 r = e.callcommand('pushkey', {
1224 'namespace': 'phases',
1226 'namespace': 'phases',
1225 'key': newremotehead.hex(),
1227 'key': newremotehead.hex(),
1226 'old': '%d' % phases.draft,
1228 'old': '%d' % phases.draft,
1227 'new': '%d' % phases.public
1229 'new': '%d' % phases.public
1228 }).result()
1230 }).result()
1229
1231
1230 if not r:
1232 if not r:
1231 pushop.ui.warn(_('updating %s to public failed!\n')
1233 pushop.ui.warn(_('updating %s to public failed!\n')
1232 % newremotehead)
1234 % newremotehead)
1233
1235
1234 def _localphasemove(pushop, nodes, phase=phases.public):
1236 def _localphasemove(pushop, nodes, phase=phases.public):
1235 """move <nodes> to <phase> in the local source repo"""
1237 """move <nodes> to <phase> in the local source repo"""
1236 if pushop.trmanager:
1238 if pushop.trmanager:
1237 phases.advanceboundary(pushop.repo,
1239 phases.advanceboundary(pushop.repo,
1238 pushop.trmanager.transaction(),
1240 pushop.trmanager.transaction(),
1239 phase,
1241 phase,
1240 nodes)
1242 nodes)
1241 else:
1243 else:
1242 # repo is not locked, do not change any phases!
1244 # repo is not locked, do not change any phases!
1243 # Informs the user that phases should have been moved when
1245 # Informs the user that phases should have been moved when
1244 # applicable.
1246 # applicable.
1245 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1247 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1246 phasestr = phases.phasenames[phase]
1248 phasestr = phases.phasenames[phase]
1247 if actualmoves:
1249 if actualmoves:
1248 pushop.ui.status(_('cannot lock source repo, skipping '
1250 pushop.ui.status(_('cannot lock source repo, skipping '
1249 'local %s phase update\n') % phasestr)
1251 'local %s phase update\n') % phasestr)
1250
1252
1251 def _pushobsolete(pushop):
1253 def _pushobsolete(pushop):
1252 """utility function to push obsolete markers to a remote"""
1254 """utility function to push obsolete markers to a remote"""
1253 if 'obsmarkers' in pushop.stepsdone:
1255 if 'obsmarkers' in pushop.stepsdone:
1254 return
1256 return
1255 repo = pushop.repo
1257 repo = pushop.repo
1256 remote = pushop.remote
1258 remote = pushop.remote
1257 pushop.stepsdone.add('obsmarkers')
1259 pushop.stepsdone.add('obsmarkers')
1258 if pushop.outobsmarkers:
1260 if pushop.outobsmarkers:
1259 pushop.ui.debug('try to push obsolete markers to remote\n')
1261 pushop.ui.debug('try to push obsolete markers to remote\n')
1260 rslts = []
1262 rslts = []
1261 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1263 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1262 for key in sorted(remotedata, reverse=True):
1264 for key in sorted(remotedata, reverse=True):
1263 # reverse sort to ensure we end with dump0
1265 # reverse sort to ensure we end with dump0
1264 data = remotedata[key]
1266 data = remotedata[key]
1265 rslts.append(remote.pushkey('obsolete', key, '', data))
1267 rslts.append(remote.pushkey('obsolete', key, '', data))
1266 if [r for r in rslts if not r]:
1268 if [r for r in rslts if not r]:
1267 msg = _('failed to push some obsolete markers!\n')
1269 msg = _('failed to push some obsolete markers!\n')
1268 repo.ui.warn(msg)
1270 repo.ui.warn(msg)
1269
1271
1270 def _pushbookmark(pushop):
1272 def _pushbookmark(pushop):
1271 """Update bookmark position on remote"""
1273 """Update bookmark position on remote"""
1272 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1274 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1273 return
1275 return
1274 pushop.stepsdone.add('bookmarks')
1276 pushop.stepsdone.add('bookmarks')
1275 ui = pushop.ui
1277 ui = pushop.ui
1276 remote = pushop.remote
1278 remote = pushop.remote
1277
1279
1278 for b, old, new in pushop.outbookmarks:
1280 for b, old, new in pushop.outbookmarks:
1279 action = 'update'
1281 action = 'update'
1280 if not old:
1282 if not old:
1281 action = 'export'
1283 action = 'export'
1282 elif not new:
1284 elif not new:
1283 action = 'delete'
1285 action = 'delete'
1284
1286
1285 with remote.commandexecutor() as e:
1287 with remote.commandexecutor() as e:
1286 r = e.callcommand('pushkey', {
1288 r = e.callcommand('pushkey', {
1287 'namespace': 'bookmarks',
1289 'namespace': 'bookmarks',
1288 'key': b,
1290 'key': b,
1289 'old': old,
1291 'old': old,
1290 'new': new,
1292 'new': new,
1291 }).result()
1293 }).result()
1292
1294
1293 if r:
1295 if r:
1294 ui.status(bookmsgmap[action][0] % b)
1296 ui.status(bookmsgmap[action][0] % b)
1295 else:
1297 else:
1296 ui.warn(bookmsgmap[action][1] % b)
1298 ui.warn(bookmsgmap[action][1] % b)
1297 # discovery can have set the value form invalid entry
1299 # discovery can have set the value form invalid entry
1298 if pushop.bkresult is not None:
1300 if pushop.bkresult is not None:
1299 pushop.bkresult = 1
1301 pushop.bkresult = 1
1300
1302
1301 class pulloperation(object):
1303 class pulloperation(object):
1302 """A object that represent a single pull operation
1304 """A object that represent a single pull operation
1303
1305
1304 It purpose is to carry pull related state and very common operation.
1306 It purpose is to carry pull related state and very common operation.
1305
1307
1306 A new should be created at the beginning of each pull and discarded
1308 A new should be created at the beginning of each pull and discarded
1307 afterward.
1309 afterward.
1308 """
1310 """
1309
1311
1310 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1312 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1311 remotebookmarks=None, streamclonerequested=None):
1313 remotebookmarks=None, streamclonerequested=None):
1312 # repo we pull into
1314 # repo we pull into
1313 self.repo = repo
1315 self.repo = repo
1314 # repo we pull from
1316 # repo we pull from
1315 self.remote = remote
1317 self.remote = remote
1316 # revision we try to pull (None is "all")
1318 # revision we try to pull (None is "all")
1317 self.heads = heads
1319 self.heads = heads
1318 # bookmark pulled explicitly
1320 # bookmark pulled explicitly
1319 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1321 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1320 for bookmark in bookmarks]
1322 for bookmark in bookmarks]
1321 # do we force pull?
1323 # do we force pull?
1322 self.force = force
1324 self.force = force
1323 # whether a streaming clone was requested
1325 # whether a streaming clone was requested
1324 self.streamclonerequested = streamclonerequested
1326 self.streamclonerequested = streamclonerequested
1325 # transaction manager
1327 # transaction manager
1326 self.trmanager = None
1328 self.trmanager = None
1327 # set of common changeset between local and remote before pull
1329 # set of common changeset between local and remote before pull
1328 self.common = None
1330 self.common = None
1329 # set of pulled head
1331 # set of pulled head
1330 self.rheads = None
1332 self.rheads = None
1331 # list of missing changeset to fetch remotely
1333 # list of missing changeset to fetch remotely
1332 self.fetch = None
1334 self.fetch = None
1333 # remote bookmarks data
1335 # remote bookmarks data
1334 self.remotebookmarks = remotebookmarks
1336 self.remotebookmarks = remotebookmarks
1335 # result of changegroup pulling (used as return code by pull)
1337 # result of changegroup pulling (used as return code by pull)
1336 self.cgresult = None
1338 self.cgresult = None
1337 # list of step already done
1339 # list of step already done
1338 self.stepsdone = set()
1340 self.stepsdone = set()
1339 # Whether we attempted a clone from pre-generated bundles.
1341 # Whether we attempted a clone from pre-generated bundles.
1340 self.clonebundleattempted = False
1342 self.clonebundleattempted = False
1341
1343
1342 @util.propertycache
1344 @util.propertycache
1343 def pulledsubset(self):
1345 def pulledsubset(self):
1344 """heads of the set of changeset target by the pull"""
1346 """heads of the set of changeset target by the pull"""
1345 # compute target subset
1347 # compute target subset
1346 if self.heads is None:
1348 if self.heads is None:
1347 # We pulled every thing possible
1349 # We pulled every thing possible
1348 # sync on everything common
1350 # sync on everything common
1349 c = set(self.common)
1351 c = set(self.common)
1350 ret = list(self.common)
1352 ret = list(self.common)
1351 for n in self.rheads:
1353 for n in self.rheads:
1352 if n not in c:
1354 if n not in c:
1353 ret.append(n)
1355 ret.append(n)
1354 return ret
1356 return ret
1355 else:
1357 else:
1356 # We pulled a specific subset
1358 # We pulled a specific subset
1357 # sync on this subset
1359 # sync on this subset
1358 return self.heads
1360 return self.heads
1359
1361
1360 @util.propertycache
1362 @util.propertycache
1361 def canusebundle2(self):
1363 def canusebundle2(self):
1362 return not _forcebundle1(self)
1364 return not _forcebundle1(self)
1363
1365
1364 @util.propertycache
1366 @util.propertycache
1365 def remotebundle2caps(self):
1367 def remotebundle2caps(self):
1366 return bundle2.bundle2caps(self.remote)
1368 return bundle2.bundle2caps(self.remote)
1367
1369
1368 def gettransaction(self):
1370 def gettransaction(self):
1369 # deprecated; talk to trmanager directly
1371 # deprecated; talk to trmanager directly
1370 return self.trmanager.transaction()
1372 return self.trmanager.transaction()
1371
1373
1372 class transactionmanager(util.transactional):
1374 class transactionmanager(util.transactional):
1373 """An object to manage the life cycle of a transaction
1375 """An object to manage the life cycle of a transaction
1374
1376
1375 It creates the transaction on demand and calls the appropriate hooks when
1377 It creates the transaction on demand and calls the appropriate hooks when
1376 closing the transaction."""
1378 closing the transaction."""
1377 def __init__(self, repo, source, url):
1379 def __init__(self, repo, source, url):
1378 self.repo = repo
1380 self.repo = repo
1379 self.source = source
1381 self.source = source
1380 self.url = url
1382 self.url = url
1381 self._tr = None
1383 self._tr = None
1382
1384
1383 def transaction(self):
1385 def transaction(self):
1384 """Return an open transaction object, constructing if necessary"""
1386 """Return an open transaction object, constructing if necessary"""
1385 if not self._tr:
1387 if not self._tr:
1386 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1388 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1387 self._tr = self.repo.transaction(trname)
1389 self._tr = self.repo.transaction(trname)
1388 self._tr.hookargs['source'] = self.source
1390 self._tr.hookargs['source'] = self.source
1389 self._tr.hookargs['url'] = self.url
1391 self._tr.hookargs['url'] = self.url
1390 return self._tr
1392 return self._tr
1391
1393
1392 def close(self):
1394 def close(self):
1393 """close transaction if created"""
1395 """close transaction if created"""
1394 if self._tr is not None:
1396 if self._tr is not None:
1395 self._tr.close()
1397 self._tr.close()
1396
1398
1397 def release(self):
1399 def release(self):
1398 """release transaction if created"""
1400 """release transaction if created"""
1399 if self._tr is not None:
1401 if self._tr is not None:
1400 self._tr.release()
1402 self._tr.release()
1401
1403
1402 def listkeys(remote, namespace):
1404 def listkeys(remote, namespace):
1403 with remote.commandexecutor() as e:
1405 with remote.commandexecutor() as e:
1404 return e.callcommand('listkeys', {'namespace': namespace}).result()
1406 return e.callcommand('listkeys', {'namespace': namespace}).result()
1405
1407
1406 def _fullpullbundle2(repo, pullop):
1408 def _fullpullbundle2(repo, pullop):
1407 # The server may send a partial reply, i.e. when inlining
1409 # The server may send a partial reply, i.e. when inlining
1408 # pre-computed bundles. In that case, update the common
1410 # pre-computed bundles. In that case, update the common
1409 # set based on the results and pull another bundle.
1411 # set based on the results and pull another bundle.
1410 #
1412 #
1411 # There are two indicators that the process is finished:
1413 # There are two indicators that the process is finished:
1412 # - no changeset has been added, or
1414 # - no changeset has been added, or
1413 # - all remote heads are known locally.
1415 # - all remote heads are known locally.
1414 # The head check must use the unfiltered view as obsoletion
1416 # The head check must use the unfiltered view as obsoletion
1415 # markers can hide heads.
1417 # markers can hide heads.
1416 unfi = repo.unfiltered()
1418 unfi = repo.unfiltered()
1417 unficl = unfi.changelog
1419 unficl = unfi.changelog
1418 def headsofdiff(h1, h2):
1420 def headsofdiff(h1, h2):
1419 """Returns heads(h1 % h2)"""
1421 """Returns heads(h1 % h2)"""
1420 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1422 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1421 return set(ctx.node() for ctx in res)
1423 return set(ctx.node() for ctx in res)
1422 def headsofunion(h1, h2):
1424 def headsofunion(h1, h2):
1423 """Returns heads((h1 + h2) - null)"""
1425 """Returns heads((h1 + h2) - null)"""
1424 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1426 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1425 return set(ctx.node() for ctx in res)
1427 return set(ctx.node() for ctx in res)
1426 while True:
1428 while True:
1427 old_heads = unficl.heads()
1429 old_heads = unficl.heads()
1428 clstart = len(unficl)
1430 clstart = len(unficl)
1429 _pullbundle2(pullop)
1431 _pullbundle2(pullop)
1430 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1432 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1431 # XXX narrow clones filter the heads on the server side during
1433 # XXX narrow clones filter the heads on the server side during
1432 # XXX getbundle and result in partial replies as well.
1434 # XXX getbundle and result in partial replies as well.
1433 # XXX Disable pull bundles in this case as band aid to avoid
1435 # XXX Disable pull bundles in this case as band aid to avoid
1434 # XXX extra round trips.
1436 # XXX extra round trips.
1435 break
1437 break
1436 if clstart == len(unficl):
1438 if clstart == len(unficl):
1437 break
1439 break
1438 if all(unficl.hasnode(n) for n in pullop.rheads):
1440 if all(unficl.hasnode(n) for n in pullop.rheads):
1439 break
1441 break
1440 new_heads = headsofdiff(unficl.heads(), old_heads)
1442 new_heads = headsofdiff(unficl.heads(), old_heads)
1441 pullop.common = headsofunion(new_heads, pullop.common)
1443 pullop.common = headsofunion(new_heads, pullop.common)
1442 pullop.rheads = set(pullop.rheads) - pullop.common
1444 pullop.rheads = set(pullop.rheads) - pullop.common
1443
1445
1444 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1446 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1445 streamclonerequested=None):
1447 streamclonerequested=None):
1446 """Fetch repository data from a remote.
1448 """Fetch repository data from a remote.
1447
1449
1448 This is the main function used to retrieve data from a remote repository.
1450 This is the main function used to retrieve data from a remote repository.
1449
1451
1450 ``repo`` is the local repository to clone into.
1452 ``repo`` is the local repository to clone into.
1451 ``remote`` is a peer instance.
1453 ``remote`` is a peer instance.
1452 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1454 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1453 default) means to pull everything from the remote.
1455 default) means to pull everything from the remote.
1454 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1456 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1455 default, all remote bookmarks are pulled.
1457 default, all remote bookmarks are pulled.
1456 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1458 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1457 initialization.
1459 initialization.
1458 ``streamclonerequested`` is a boolean indicating whether a "streaming
1460 ``streamclonerequested`` is a boolean indicating whether a "streaming
1459 clone" is requested. A "streaming clone" is essentially a raw file copy
1461 clone" is requested. A "streaming clone" is essentially a raw file copy
1460 of revlogs from the server. This only works when the local repository is
1462 of revlogs from the server. This only works when the local repository is
1461 empty. The default value of ``None`` means to respect the server
1463 empty. The default value of ``None`` means to respect the server
1462 configuration for preferring stream clones.
1464 configuration for preferring stream clones.
1463
1465
1464 Returns the ``pulloperation`` created for this pull.
1466 Returns the ``pulloperation`` created for this pull.
1465 """
1467 """
1466 if opargs is None:
1468 if opargs is None:
1467 opargs = {}
1469 opargs = {}
1468 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1470 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1469 streamclonerequested=streamclonerequested,
1471 streamclonerequested=streamclonerequested,
1470 **pycompat.strkwargs(opargs))
1472 **pycompat.strkwargs(opargs))
1471
1473
1472 peerlocal = pullop.remote.local()
1474 peerlocal = pullop.remote.local()
1473 if peerlocal:
1475 if peerlocal:
1474 missing = set(peerlocal.requirements) - pullop.repo.supported
1476 missing = set(peerlocal.requirements) - pullop.repo.supported
1475 if missing:
1477 if missing:
1476 msg = _("required features are not"
1478 msg = _("required features are not"
1477 " supported in the destination:"
1479 " supported in the destination:"
1478 " %s") % (', '.join(sorted(missing)))
1480 " %s") % (', '.join(sorted(missing)))
1479 raise error.Abort(msg)
1481 raise error.Abort(msg)
1480
1482
1481 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1483 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1482 with repo.wlock(), repo.lock(), pullop.trmanager:
1484 with repo.wlock(), repo.lock(), pullop.trmanager:
1483 # This should ideally be in _pullbundle2(). However, it needs to run
1485 # This should ideally be in _pullbundle2(). However, it needs to run
1484 # before discovery to avoid extra work.
1486 # before discovery to avoid extra work.
1485 _maybeapplyclonebundle(pullop)
1487 _maybeapplyclonebundle(pullop)
1486 streamclone.maybeperformlegacystreamclone(pullop)
1488 streamclone.maybeperformlegacystreamclone(pullop)
1487 _pulldiscovery(pullop)
1489 _pulldiscovery(pullop)
1488 if pullop.canusebundle2:
1490 if pullop.canusebundle2:
1489 _fullpullbundle2(repo, pullop)
1491 _fullpullbundle2(repo, pullop)
1490 _pullchangeset(pullop)
1492 _pullchangeset(pullop)
1491 _pullphase(pullop)
1493 _pullphase(pullop)
1492 _pullbookmarks(pullop)
1494 _pullbookmarks(pullop)
1493 _pullobsolete(pullop)
1495 _pullobsolete(pullop)
1494
1496
1495 # storing remotenames
1497 # storing remotenames
1496 if repo.ui.configbool('experimental', 'remotenames'):
1498 if repo.ui.configbool('experimental', 'remotenames'):
1497 logexchange.pullremotenames(repo, remote)
1499 logexchange.pullremotenames(repo, remote)
1498
1500
1499 return pullop
1501 return pullop
1500
1502
1501 # list of steps to perform discovery before pull
1503 # list of steps to perform discovery before pull
1502 pulldiscoveryorder = []
1504 pulldiscoveryorder = []
1503
1505
1504 # Mapping between step name and function
1506 # Mapping between step name and function
1505 #
1507 #
1506 # This exists to help extensions wrap steps if necessary
1508 # This exists to help extensions wrap steps if necessary
1507 pulldiscoverymapping = {}
1509 pulldiscoverymapping = {}
1508
1510
1509 def pulldiscovery(stepname):
1511 def pulldiscovery(stepname):
1510 """decorator for function performing discovery before pull
1512 """decorator for function performing discovery before pull
1511
1513
1512 The function is added to the step -> function mapping and appended to the
1514 The function is added to the step -> function mapping and appended to the
1513 list of steps. Beware that decorated function will be added in order (this
1515 list of steps. Beware that decorated function will be added in order (this
1514 may matter).
1516 may matter).
1515
1517
1516 You can only use this decorator for a new step, if you want to wrap a step
1518 You can only use this decorator for a new step, if you want to wrap a step
1517 from an extension, change the pulldiscovery dictionary directly."""
1519 from an extension, change the pulldiscovery dictionary directly."""
1518 def dec(func):
1520 def dec(func):
1519 assert stepname not in pulldiscoverymapping
1521 assert stepname not in pulldiscoverymapping
1520 pulldiscoverymapping[stepname] = func
1522 pulldiscoverymapping[stepname] = func
1521 pulldiscoveryorder.append(stepname)
1523 pulldiscoveryorder.append(stepname)
1522 return func
1524 return func
1523 return dec
1525 return dec
1524
1526
1525 def _pulldiscovery(pullop):
1527 def _pulldiscovery(pullop):
1526 """Run all discovery steps"""
1528 """Run all discovery steps"""
1527 for stepname in pulldiscoveryorder:
1529 for stepname in pulldiscoveryorder:
1528 step = pulldiscoverymapping[stepname]
1530 step = pulldiscoverymapping[stepname]
1529 step(pullop)
1531 step(pullop)
1530
1532
1531 @pulldiscovery('b1:bookmarks')
1533 @pulldiscovery('b1:bookmarks')
1532 def _pullbookmarkbundle1(pullop):
1534 def _pullbookmarkbundle1(pullop):
1533 """fetch bookmark data in bundle1 case
1535 """fetch bookmark data in bundle1 case
1534
1536
1535 If not using bundle2, we have to fetch bookmarks before changeset
1537 If not using bundle2, we have to fetch bookmarks before changeset
1536 discovery to reduce the chance and impact of race conditions."""
1538 discovery to reduce the chance and impact of race conditions."""
1537 if pullop.remotebookmarks is not None:
1539 if pullop.remotebookmarks is not None:
1538 return
1540 return
1539 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1541 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1540 # all known bundle2 servers now support listkeys, but lets be nice with
1542 # all known bundle2 servers now support listkeys, but lets be nice with
1541 # new implementation.
1543 # new implementation.
1542 return
1544 return
1543 books = listkeys(pullop.remote, 'bookmarks')
1545 books = listkeys(pullop.remote, 'bookmarks')
1544 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1546 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1545
1547
1546
1548
1547 @pulldiscovery('changegroup')
1549 @pulldiscovery('changegroup')
1548 def _pulldiscoverychangegroup(pullop):
1550 def _pulldiscoverychangegroup(pullop):
1549 """discovery phase for the pull
1551 """discovery phase for the pull
1550
1552
1551 Current handle changeset discovery only, will change handle all discovery
1553 Current handle changeset discovery only, will change handle all discovery
1552 at some point."""
1554 at some point."""
1553 tmp = discovery.findcommonincoming(pullop.repo,
1555 tmp = discovery.findcommonincoming(pullop.repo,
1554 pullop.remote,
1556 pullop.remote,
1555 heads=pullop.heads,
1557 heads=pullop.heads,
1556 force=pullop.force)
1558 force=pullop.force)
1557 common, fetch, rheads = tmp
1559 common, fetch, rheads = tmp
1558 nm = pullop.repo.unfiltered().changelog.nodemap
1560 nm = pullop.repo.unfiltered().changelog.nodemap
1559 if fetch and rheads:
1561 if fetch and rheads:
1560 # If a remote heads is filtered locally, put in back in common.
1562 # If a remote heads is filtered locally, put in back in common.
1561 #
1563 #
1562 # This is a hackish solution to catch most of "common but locally
1564 # This is a hackish solution to catch most of "common but locally
1563 # hidden situation". We do not performs discovery on unfiltered
1565 # hidden situation". We do not performs discovery on unfiltered
1564 # repository because it end up doing a pathological amount of round
1566 # repository because it end up doing a pathological amount of round
1565 # trip for w huge amount of changeset we do not care about.
1567 # trip for w huge amount of changeset we do not care about.
1566 #
1568 #
1567 # If a set of such "common but filtered" changeset exist on the server
1569 # If a set of such "common but filtered" changeset exist on the server
1568 # but are not including a remote heads, we'll not be able to detect it,
1570 # but are not including a remote heads, we'll not be able to detect it,
1569 scommon = set(common)
1571 scommon = set(common)
1570 for n in rheads:
1572 for n in rheads:
1571 if n in nm:
1573 if n in nm:
1572 if n not in scommon:
1574 if n not in scommon:
1573 common.append(n)
1575 common.append(n)
1574 if set(rheads).issubset(set(common)):
1576 if set(rheads).issubset(set(common)):
1575 fetch = []
1577 fetch = []
1576 pullop.common = common
1578 pullop.common = common
1577 pullop.fetch = fetch
1579 pullop.fetch = fetch
1578 pullop.rheads = rheads
1580 pullop.rheads = rheads
1579
1581
1580 def _pullbundle2(pullop):
1582 def _pullbundle2(pullop):
1581 """pull data using bundle2
1583 """pull data using bundle2
1582
1584
1583 For now, the only supported data are changegroup."""
1585 For now, the only supported data are changegroup."""
1584 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1586 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1585
1587
1586 # make ui easier to access
1588 # make ui easier to access
1587 ui = pullop.repo.ui
1589 ui = pullop.repo.ui
1588
1590
1589 # At the moment we don't do stream clones over bundle2. If that is
1591 # At the moment we don't do stream clones over bundle2. If that is
1590 # implemented then here's where the check for that will go.
1592 # implemented then here's where the check for that will go.
1591 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1593 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1592
1594
1593 # declare pull perimeters
1595 # declare pull perimeters
1594 kwargs['common'] = pullop.common
1596 kwargs['common'] = pullop.common
1595 kwargs['heads'] = pullop.heads or pullop.rheads
1597 kwargs['heads'] = pullop.heads or pullop.rheads
1596
1598
1597 if streaming:
1599 if streaming:
1598 kwargs['cg'] = False
1600 kwargs['cg'] = False
1599 kwargs['stream'] = True
1601 kwargs['stream'] = True
1600 pullop.stepsdone.add('changegroup')
1602 pullop.stepsdone.add('changegroup')
1601 pullop.stepsdone.add('phases')
1603 pullop.stepsdone.add('phases')
1602
1604
1603 else:
1605 else:
1604 # pulling changegroup
1606 # pulling changegroup
1605 pullop.stepsdone.add('changegroup')
1607 pullop.stepsdone.add('changegroup')
1606
1608
1607 kwargs['cg'] = pullop.fetch
1609 kwargs['cg'] = pullop.fetch
1608
1610
1609 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1611 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1610 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1612 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1611 if (not legacyphase and hasbinaryphase):
1613 if (not legacyphase and hasbinaryphase):
1612 kwargs['phases'] = True
1614 kwargs['phases'] = True
1613 pullop.stepsdone.add('phases')
1615 pullop.stepsdone.add('phases')
1614
1616
1615 if 'listkeys' in pullop.remotebundle2caps:
1617 if 'listkeys' in pullop.remotebundle2caps:
1616 if 'phases' not in pullop.stepsdone:
1618 if 'phases' not in pullop.stepsdone:
1617 kwargs['listkeys'] = ['phases']
1619 kwargs['listkeys'] = ['phases']
1618
1620
1619 bookmarksrequested = False
1621 bookmarksrequested = False
1620 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1622 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1621 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1623 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1622
1624
1623 if pullop.remotebookmarks is not None:
1625 if pullop.remotebookmarks is not None:
1624 pullop.stepsdone.add('request-bookmarks')
1626 pullop.stepsdone.add('request-bookmarks')
1625
1627
1626 if ('request-bookmarks' not in pullop.stepsdone
1628 if ('request-bookmarks' not in pullop.stepsdone
1627 and pullop.remotebookmarks is None
1629 and pullop.remotebookmarks is None
1628 and not legacybookmark and hasbinarybook):
1630 and not legacybookmark and hasbinarybook):
1629 kwargs['bookmarks'] = True
1631 kwargs['bookmarks'] = True
1630 bookmarksrequested = True
1632 bookmarksrequested = True
1631
1633
1632 if 'listkeys' in pullop.remotebundle2caps:
1634 if 'listkeys' in pullop.remotebundle2caps:
1633 if 'request-bookmarks' not in pullop.stepsdone:
1635 if 'request-bookmarks' not in pullop.stepsdone:
1634 # make sure to always includes bookmark data when migrating
1636 # make sure to always includes bookmark data when migrating
1635 # `hg incoming --bundle` to using this function.
1637 # `hg incoming --bundle` to using this function.
1636 pullop.stepsdone.add('request-bookmarks')
1638 pullop.stepsdone.add('request-bookmarks')
1637 kwargs.setdefault('listkeys', []).append('bookmarks')
1639 kwargs.setdefault('listkeys', []).append('bookmarks')
1638
1640
1639 # If this is a full pull / clone and the server supports the clone bundles
1641 # If this is a full pull / clone and the server supports the clone bundles
1640 # feature, tell the server whether we attempted a clone bundle. The
1642 # feature, tell the server whether we attempted a clone bundle. The
1641 # presence of this flag indicates the client supports clone bundles. This
1643 # presence of this flag indicates the client supports clone bundles. This
1642 # will enable the server to treat clients that support clone bundles
1644 # will enable the server to treat clients that support clone bundles
1643 # differently from those that don't.
1645 # differently from those that don't.
1644 if (pullop.remote.capable('clonebundles')
1646 if (pullop.remote.capable('clonebundles')
1645 and pullop.heads is None and list(pullop.common) == [nullid]):
1647 and pullop.heads is None and list(pullop.common) == [nullid]):
1646 kwargs['cbattempted'] = pullop.clonebundleattempted
1648 kwargs['cbattempted'] = pullop.clonebundleattempted
1647
1649
1648 if streaming:
1650 if streaming:
1649 pullop.repo.ui.status(_('streaming all changes\n'))
1651 pullop.repo.ui.status(_('streaming all changes\n'))
1650 elif not pullop.fetch:
1652 elif not pullop.fetch:
1651 pullop.repo.ui.status(_("no changes found\n"))
1653 pullop.repo.ui.status(_("no changes found\n"))
1652 pullop.cgresult = 0
1654 pullop.cgresult = 0
1653 else:
1655 else:
1654 if pullop.heads is None and list(pullop.common) == [nullid]:
1656 if pullop.heads is None and list(pullop.common) == [nullid]:
1655 pullop.repo.ui.status(_("requesting all changes\n"))
1657 pullop.repo.ui.status(_("requesting all changes\n"))
1656 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1658 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1657 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1659 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1658 if obsolete.commonversion(remoteversions) is not None:
1660 if obsolete.commonversion(remoteversions) is not None:
1659 kwargs['obsmarkers'] = True
1661 kwargs['obsmarkers'] = True
1660 pullop.stepsdone.add('obsmarkers')
1662 pullop.stepsdone.add('obsmarkers')
1661 _pullbundle2extraprepare(pullop, kwargs)
1663 _pullbundle2extraprepare(pullop, kwargs)
1662
1664
1663 with pullop.remote.commandexecutor() as e:
1665 with pullop.remote.commandexecutor() as e:
1664 args = dict(kwargs)
1666 args = dict(kwargs)
1665 args['source'] = 'pull'
1667 args['source'] = 'pull'
1666 bundle = e.callcommand('getbundle', args).result()
1668 bundle = e.callcommand('getbundle', args).result()
1667
1669
1668 try:
1670 try:
1669 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1671 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1670 source='pull')
1672 source='pull')
1671 op.modes['bookmarks'] = 'records'
1673 op.modes['bookmarks'] = 'records'
1672 bundle2.processbundle(pullop.repo, bundle, op=op)
1674 bundle2.processbundle(pullop.repo, bundle, op=op)
1673 except bundle2.AbortFromPart as exc:
1675 except bundle2.AbortFromPart as exc:
1674 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1676 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1675 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1677 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1676 except error.BundleValueError as exc:
1678 except error.BundleValueError as exc:
1677 raise error.Abort(_('missing support for %s') % exc)
1679 raise error.Abort(_('missing support for %s') % exc)
1678
1680
1679 if pullop.fetch:
1681 if pullop.fetch:
1680 pullop.cgresult = bundle2.combinechangegroupresults(op)
1682 pullop.cgresult = bundle2.combinechangegroupresults(op)
1681
1683
1682 # processing phases change
1684 # processing phases change
1683 for namespace, value in op.records['listkeys']:
1685 for namespace, value in op.records['listkeys']:
1684 if namespace == 'phases':
1686 if namespace == 'phases':
1685 _pullapplyphases(pullop, value)
1687 _pullapplyphases(pullop, value)
1686
1688
1687 # processing bookmark update
1689 # processing bookmark update
1688 if bookmarksrequested:
1690 if bookmarksrequested:
1689 books = {}
1691 books = {}
1690 for record in op.records['bookmarks']:
1692 for record in op.records['bookmarks']:
1691 books[record['bookmark']] = record["node"]
1693 books[record['bookmark']] = record["node"]
1692 pullop.remotebookmarks = books
1694 pullop.remotebookmarks = books
1693 else:
1695 else:
1694 for namespace, value in op.records['listkeys']:
1696 for namespace, value in op.records['listkeys']:
1695 if namespace == 'bookmarks':
1697 if namespace == 'bookmarks':
1696 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1698 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1697
1699
1698 # bookmark data were either already there or pulled in the bundle
1700 # bookmark data were either already there or pulled in the bundle
1699 if pullop.remotebookmarks is not None:
1701 if pullop.remotebookmarks is not None:
1700 _pullbookmarks(pullop)
1702 _pullbookmarks(pullop)
1701
1703
1702 def _pullbundle2extraprepare(pullop, kwargs):
1704 def _pullbundle2extraprepare(pullop, kwargs):
1703 """hook function so that extensions can extend the getbundle call"""
1705 """hook function so that extensions can extend the getbundle call"""
1704
1706
1705 def _pullchangeset(pullop):
1707 def _pullchangeset(pullop):
1706 """pull changeset from unbundle into the local repo"""
1708 """pull changeset from unbundle into the local repo"""
1707 # We delay the open of the transaction as late as possible so we
1709 # We delay the open of the transaction as late as possible so we
1708 # don't open transaction for nothing or you break future useful
1710 # don't open transaction for nothing or you break future useful
1709 # rollback call
1711 # rollback call
1710 if 'changegroup' in pullop.stepsdone:
1712 if 'changegroup' in pullop.stepsdone:
1711 return
1713 return
1712 pullop.stepsdone.add('changegroup')
1714 pullop.stepsdone.add('changegroup')
1713 if not pullop.fetch:
1715 if not pullop.fetch:
1714 pullop.repo.ui.status(_("no changes found\n"))
1716 pullop.repo.ui.status(_("no changes found\n"))
1715 pullop.cgresult = 0
1717 pullop.cgresult = 0
1716 return
1718 return
1717 tr = pullop.gettransaction()
1719 tr = pullop.gettransaction()
1718 if pullop.heads is None and list(pullop.common) == [nullid]:
1720 if pullop.heads is None and list(pullop.common) == [nullid]:
1719 pullop.repo.ui.status(_("requesting all changes\n"))
1721 pullop.repo.ui.status(_("requesting all changes\n"))
1720 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1722 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1721 # issue1320, avoid a race if remote changed after discovery
1723 # issue1320, avoid a race if remote changed after discovery
1722 pullop.heads = pullop.rheads
1724 pullop.heads = pullop.rheads
1723
1725
1724 if pullop.remote.capable('getbundle'):
1726 if pullop.remote.capable('getbundle'):
1725 # TODO: get bundlecaps from remote
1727 # TODO: get bundlecaps from remote
1726 cg = pullop.remote.getbundle('pull', common=pullop.common,
1728 cg = pullop.remote.getbundle('pull', common=pullop.common,
1727 heads=pullop.heads or pullop.rheads)
1729 heads=pullop.heads or pullop.rheads)
1728 elif pullop.heads is None:
1730 elif pullop.heads is None:
1729 with pullop.remote.commandexecutor() as e:
1731 with pullop.remote.commandexecutor() as e:
1730 cg = e.callcommand('changegroup', {
1732 cg = e.callcommand('changegroup', {
1731 'nodes': pullop.fetch,
1733 'nodes': pullop.fetch,
1732 'source': 'pull',
1734 'source': 'pull',
1733 }).result()
1735 }).result()
1734
1736
1735 elif not pullop.remote.capable('changegroupsubset'):
1737 elif not pullop.remote.capable('changegroupsubset'):
1736 raise error.Abort(_("partial pull cannot be done because "
1738 raise error.Abort(_("partial pull cannot be done because "
1737 "other repository doesn't support "
1739 "other repository doesn't support "
1738 "changegroupsubset."))
1740 "changegroupsubset."))
1739 else:
1741 else:
1740 with pullop.remote.commandexecutor() as e:
1742 with pullop.remote.commandexecutor() as e:
1741 cg = e.callcommand('changegroupsubset', {
1743 cg = e.callcommand('changegroupsubset', {
1742 'bases': pullop.fetch,
1744 'bases': pullop.fetch,
1743 'heads': pullop.heads,
1745 'heads': pullop.heads,
1744 'source': 'pull',
1746 'source': 'pull',
1745 }).result()
1747 }).result()
1746
1748
1747 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1749 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1748 pullop.remote.url())
1750 pullop.remote.url())
1749 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1751 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1750
1752
1751 def _pullphase(pullop):
1753 def _pullphase(pullop):
1752 # Get remote phases data from remote
1754 # Get remote phases data from remote
1753 if 'phases' in pullop.stepsdone:
1755 if 'phases' in pullop.stepsdone:
1754 return
1756 return
1755 remotephases = listkeys(pullop.remote, 'phases')
1757 remotephases = listkeys(pullop.remote, 'phases')
1756 _pullapplyphases(pullop, remotephases)
1758 _pullapplyphases(pullop, remotephases)
1757
1759
1758 def _pullapplyphases(pullop, remotephases):
1760 def _pullapplyphases(pullop, remotephases):
1759 """apply phase movement from observed remote state"""
1761 """apply phase movement from observed remote state"""
1760 if 'phases' in pullop.stepsdone:
1762 if 'phases' in pullop.stepsdone:
1761 return
1763 return
1762 pullop.stepsdone.add('phases')
1764 pullop.stepsdone.add('phases')
1763 publishing = bool(remotephases.get('publishing', False))
1765 publishing = bool(remotephases.get('publishing', False))
1764 if remotephases and not publishing:
1766 if remotephases and not publishing:
1765 # remote is new and non-publishing
1767 # remote is new and non-publishing
1766 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1768 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1767 pullop.pulledsubset,
1769 pullop.pulledsubset,
1768 remotephases)
1770 remotephases)
1769 dheads = pullop.pulledsubset
1771 dheads = pullop.pulledsubset
1770 else:
1772 else:
1771 # Remote is old or publishing all common changesets
1773 # Remote is old or publishing all common changesets
1772 # should be seen as public
1774 # should be seen as public
1773 pheads = pullop.pulledsubset
1775 pheads = pullop.pulledsubset
1774 dheads = []
1776 dheads = []
1775 unfi = pullop.repo.unfiltered()
1777 unfi = pullop.repo.unfiltered()
1776 phase = unfi._phasecache.phase
1778 phase = unfi._phasecache.phase
1777 rev = unfi.changelog.nodemap.get
1779 rev = unfi.changelog.nodemap.get
1778 public = phases.public
1780 public = phases.public
1779 draft = phases.draft
1781 draft = phases.draft
1780
1782
1781 # exclude changesets already public locally and update the others
1783 # exclude changesets already public locally and update the others
1782 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1784 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1783 if pheads:
1785 if pheads:
1784 tr = pullop.gettransaction()
1786 tr = pullop.gettransaction()
1785 phases.advanceboundary(pullop.repo, tr, public, pheads)
1787 phases.advanceboundary(pullop.repo, tr, public, pheads)
1786
1788
1787 # exclude changesets already draft locally and update the others
1789 # exclude changesets already draft locally and update the others
1788 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1790 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1789 if dheads:
1791 if dheads:
1790 tr = pullop.gettransaction()
1792 tr = pullop.gettransaction()
1791 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1793 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1792
1794
1793 def _pullbookmarks(pullop):
1795 def _pullbookmarks(pullop):
1794 """process the remote bookmark information to update the local one"""
1796 """process the remote bookmark information to update the local one"""
1795 if 'bookmarks' in pullop.stepsdone:
1797 if 'bookmarks' in pullop.stepsdone:
1796 return
1798 return
1797 pullop.stepsdone.add('bookmarks')
1799 pullop.stepsdone.add('bookmarks')
1798 repo = pullop.repo
1800 repo = pullop.repo
1799 remotebookmarks = pullop.remotebookmarks
1801 remotebookmarks = pullop.remotebookmarks
1800 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1802 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1801 pullop.remote.url(),
1803 pullop.remote.url(),
1802 pullop.gettransaction,
1804 pullop.gettransaction,
1803 explicit=pullop.explicitbookmarks)
1805 explicit=pullop.explicitbookmarks)
1804
1806
1805 def _pullobsolete(pullop):
1807 def _pullobsolete(pullop):
1806 """utility function to pull obsolete markers from a remote
1808 """utility function to pull obsolete markers from a remote
1807
1809
1808 The `gettransaction` is function that return the pull transaction, creating
1810 The `gettransaction` is function that return the pull transaction, creating
1809 one if necessary. We return the transaction to inform the calling code that
1811 one if necessary. We return the transaction to inform the calling code that
1810 a new transaction have been created (when applicable).
1812 a new transaction have been created (when applicable).
1811
1813
1812 Exists mostly to allow overriding for experimentation purpose"""
1814 Exists mostly to allow overriding for experimentation purpose"""
1813 if 'obsmarkers' in pullop.stepsdone:
1815 if 'obsmarkers' in pullop.stepsdone:
1814 return
1816 return
1815 pullop.stepsdone.add('obsmarkers')
1817 pullop.stepsdone.add('obsmarkers')
1816 tr = None
1818 tr = None
1817 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1819 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1818 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1820 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1819 remoteobs = listkeys(pullop.remote, 'obsolete')
1821 remoteobs = listkeys(pullop.remote, 'obsolete')
1820 if 'dump0' in remoteobs:
1822 if 'dump0' in remoteobs:
1821 tr = pullop.gettransaction()
1823 tr = pullop.gettransaction()
1822 markers = []
1824 markers = []
1823 for key in sorted(remoteobs, reverse=True):
1825 for key in sorted(remoteobs, reverse=True):
1824 if key.startswith('dump'):
1826 if key.startswith('dump'):
1825 data = util.b85decode(remoteobs[key])
1827 data = util.b85decode(remoteobs[key])
1826 version, newmarks = obsolete._readmarkers(data)
1828 version, newmarks = obsolete._readmarkers(data)
1827 markers += newmarks
1829 markers += newmarks
1828 if markers:
1830 if markers:
1829 pullop.repo.obsstore.add(tr, markers)
1831 pullop.repo.obsstore.add(tr, markers)
1830 pullop.repo.invalidatevolatilesets()
1832 pullop.repo.invalidatevolatilesets()
1831 return tr
1833 return tr
1832
1834
1833 def caps20to10(repo, role):
1835 def caps20to10(repo, role):
1834 """return a set with appropriate options to use bundle20 during getbundle"""
1836 """return a set with appropriate options to use bundle20 during getbundle"""
1835 caps = {'HG20'}
1837 caps = {'HG20'}
1836 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1838 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1837 caps.add('bundle2=' + urlreq.quote(capsblob))
1839 caps.add('bundle2=' + urlreq.quote(capsblob))
1838 return caps
1840 return caps
1839
1841
1840 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1842 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1841 getbundle2partsorder = []
1843 getbundle2partsorder = []
1842
1844
1843 # Mapping between step name and function
1845 # Mapping between step name and function
1844 #
1846 #
1845 # This exists to help extensions wrap steps if necessary
1847 # This exists to help extensions wrap steps if necessary
1846 getbundle2partsmapping = {}
1848 getbundle2partsmapping = {}
1847
1849
1848 def getbundle2partsgenerator(stepname, idx=None):
1850 def getbundle2partsgenerator(stepname, idx=None):
1849 """decorator for function generating bundle2 part for getbundle
1851 """decorator for function generating bundle2 part for getbundle
1850
1852
1851 The function is added to the step -> function mapping and appended to the
1853 The function is added to the step -> function mapping and appended to the
1852 list of steps. Beware that decorated functions will be added in order
1854 list of steps. Beware that decorated functions will be added in order
1853 (this may matter).
1855 (this may matter).
1854
1856
1855 You can only use this decorator for new steps, if you want to wrap a step
1857 You can only use this decorator for new steps, if you want to wrap a step
1856 from an extension, attack the getbundle2partsmapping dictionary directly."""
1858 from an extension, attack the getbundle2partsmapping dictionary directly."""
1857 def dec(func):
1859 def dec(func):
1858 assert stepname not in getbundle2partsmapping
1860 assert stepname not in getbundle2partsmapping
1859 getbundle2partsmapping[stepname] = func
1861 getbundle2partsmapping[stepname] = func
1860 if idx is None:
1862 if idx is None:
1861 getbundle2partsorder.append(stepname)
1863 getbundle2partsorder.append(stepname)
1862 else:
1864 else:
1863 getbundle2partsorder.insert(idx, stepname)
1865 getbundle2partsorder.insert(idx, stepname)
1864 return func
1866 return func
1865 return dec
1867 return dec
1866
1868
1867 def bundle2requested(bundlecaps):
1869 def bundle2requested(bundlecaps):
1868 if bundlecaps is not None:
1870 if bundlecaps is not None:
1869 return any(cap.startswith('HG2') for cap in bundlecaps)
1871 return any(cap.startswith('HG2') for cap in bundlecaps)
1870 return False
1872 return False
1871
1873
1872 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1874 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1873 **kwargs):
1875 **kwargs):
1874 """Return chunks constituting a bundle's raw data.
1876 """Return chunks constituting a bundle's raw data.
1875
1877
1876 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1878 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1877 passed.
1879 passed.
1878
1880
1879 Returns a 2-tuple of a dict with metadata about the generated bundle
1881 Returns a 2-tuple of a dict with metadata about the generated bundle
1880 and an iterator over raw chunks (of varying sizes).
1882 and an iterator over raw chunks (of varying sizes).
1881 """
1883 """
1882 kwargs = pycompat.byteskwargs(kwargs)
1884 kwargs = pycompat.byteskwargs(kwargs)
1883 info = {}
1885 info = {}
1884 usebundle2 = bundle2requested(bundlecaps)
1886 usebundle2 = bundle2requested(bundlecaps)
1885 # bundle10 case
1887 # bundle10 case
1886 if not usebundle2:
1888 if not usebundle2:
1887 if bundlecaps and not kwargs.get('cg', True):
1889 if bundlecaps and not kwargs.get('cg', True):
1888 raise ValueError(_('request for bundle10 must include changegroup'))
1890 raise ValueError(_('request for bundle10 must include changegroup'))
1889
1891
1890 if kwargs:
1892 if kwargs:
1891 raise ValueError(_('unsupported getbundle arguments: %s')
1893 raise ValueError(_('unsupported getbundle arguments: %s')
1892 % ', '.join(sorted(kwargs.keys())))
1894 % ', '.join(sorted(kwargs.keys())))
1893 outgoing = _computeoutgoing(repo, heads, common)
1895 outgoing = _computeoutgoing(repo, heads, common)
1894 info['bundleversion'] = 1
1896 info['bundleversion'] = 1
1895 return info, changegroup.makestream(repo, outgoing, '01', source,
1897 return info, changegroup.makestream(repo, outgoing, '01', source,
1896 bundlecaps=bundlecaps)
1898 bundlecaps=bundlecaps)
1897
1899
1898 # bundle20 case
1900 # bundle20 case
1899 info['bundleversion'] = 2
1901 info['bundleversion'] = 2
1900 b2caps = {}
1902 b2caps = {}
1901 for bcaps in bundlecaps:
1903 for bcaps in bundlecaps:
1902 if bcaps.startswith('bundle2='):
1904 if bcaps.startswith('bundle2='):
1903 blob = urlreq.unquote(bcaps[len('bundle2='):])
1905 blob = urlreq.unquote(bcaps[len('bundle2='):])
1904 b2caps.update(bundle2.decodecaps(blob))
1906 b2caps.update(bundle2.decodecaps(blob))
1905 bundler = bundle2.bundle20(repo.ui, b2caps)
1907 bundler = bundle2.bundle20(repo.ui, b2caps)
1906
1908
1907 kwargs['heads'] = heads
1909 kwargs['heads'] = heads
1908 kwargs['common'] = common
1910 kwargs['common'] = common
1909
1911
1910 for name in getbundle2partsorder:
1912 for name in getbundle2partsorder:
1911 func = getbundle2partsmapping[name]
1913 func = getbundle2partsmapping[name]
1912 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1914 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1913 **pycompat.strkwargs(kwargs))
1915 **pycompat.strkwargs(kwargs))
1914
1916
1915 info['prefercompressed'] = bundler.prefercompressed
1917 info['prefercompressed'] = bundler.prefercompressed
1916
1918
1917 return info, bundler.getchunks()
1919 return info, bundler.getchunks()
1918
1920
1919 @getbundle2partsgenerator('stream2')
1921 @getbundle2partsgenerator('stream2')
1920 def _getbundlestream2(bundler, repo, *args, **kwargs):
1922 def _getbundlestream2(bundler, repo, *args, **kwargs):
1921 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1923 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1922
1924
1923 @getbundle2partsgenerator('changegroup')
1925 @getbundle2partsgenerator('changegroup')
1924 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1926 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1925 b2caps=None, heads=None, common=None, **kwargs):
1927 b2caps=None, heads=None, common=None, **kwargs):
1926 """add a changegroup part to the requested bundle"""
1928 """add a changegroup part to the requested bundle"""
1927 cgstream = None
1929 cgstream = None
1928 if kwargs.get(r'cg', True):
1930 if kwargs.get(r'cg', True):
1929 # build changegroup bundle here.
1931 # build changegroup bundle here.
1930 version = '01'
1932 version = '01'
1931 cgversions = b2caps.get('changegroup')
1933 cgversions = b2caps.get('changegroup')
1932 if cgversions: # 3.1 and 3.2 ship with an empty value
1934 if cgversions: # 3.1 and 3.2 ship with an empty value
1933 cgversions = [v for v in cgversions
1935 cgversions = [v for v in cgversions
1934 if v in changegroup.supportedoutgoingversions(repo)]
1936 if v in changegroup.supportedoutgoingversions(repo)]
1935 if not cgversions:
1937 if not cgversions:
1936 raise ValueError(_('no common changegroup version'))
1938 raise ValueError(_('no common changegroup version'))
1937 version = max(cgversions)
1939 version = max(cgversions)
1938 outgoing = _computeoutgoing(repo, heads, common)
1940 outgoing = _computeoutgoing(repo, heads, common)
1939 if outgoing.missing:
1941 if outgoing.missing:
1940 cgstream = changegroup.makestream(repo, outgoing, version, source,
1942 cgstream = changegroup.makestream(repo, outgoing, version, source,
1941 bundlecaps=bundlecaps)
1943 bundlecaps=bundlecaps)
1942
1944
1943 if cgstream:
1945 if cgstream:
1944 part = bundler.newpart('changegroup', data=cgstream)
1946 part = bundler.newpart('changegroup', data=cgstream)
1945 if cgversions:
1947 if cgversions:
1946 part.addparam('version', version)
1948 part.addparam('version', version)
1947 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1949 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1948 mandatory=False)
1950 mandatory=False)
1949 if 'treemanifest' in repo.requirements:
1951 if 'treemanifest' in repo.requirements:
1950 part.addparam('treemanifest', '1')
1952 part.addparam('treemanifest', '1')
1951
1953
1952 @getbundle2partsgenerator('bookmarks')
1954 @getbundle2partsgenerator('bookmarks')
1953 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1955 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1954 b2caps=None, **kwargs):
1956 b2caps=None, **kwargs):
1955 """add a bookmark part to the requested bundle"""
1957 """add a bookmark part to the requested bundle"""
1956 if not kwargs.get(r'bookmarks', False):
1958 if not kwargs.get(r'bookmarks', False):
1957 return
1959 return
1958 if 'bookmarks' not in b2caps:
1960 if 'bookmarks' not in b2caps:
1959 raise ValueError(_('no common bookmarks exchange method'))
1961 raise ValueError(_('no common bookmarks exchange method'))
1960 books = bookmod.listbinbookmarks(repo)
1962 books = bookmod.listbinbookmarks(repo)
1961 data = bookmod.binaryencode(books)
1963 data = bookmod.binaryencode(books)
1962 if data:
1964 if data:
1963 bundler.newpart('bookmarks', data=data)
1965 bundler.newpart('bookmarks', data=data)
1964
1966
1965 @getbundle2partsgenerator('listkeys')
1967 @getbundle2partsgenerator('listkeys')
1966 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1968 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1967 b2caps=None, **kwargs):
1969 b2caps=None, **kwargs):
1968 """add parts containing listkeys namespaces to the requested bundle"""
1970 """add parts containing listkeys namespaces to the requested bundle"""
1969 listkeys = kwargs.get(r'listkeys', ())
1971 listkeys = kwargs.get(r'listkeys', ())
1970 for namespace in listkeys:
1972 for namespace in listkeys:
1971 part = bundler.newpart('listkeys')
1973 part = bundler.newpart('listkeys')
1972 part.addparam('namespace', namespace)
1974 part.addparam('namespace', namespace)
1973 keys = repo.listkeys(namespace).items()
1975 keys = repo.listkeys(namespace).items()
1974 part.data = pushkey.encodekeys(keys)
1976 part.data = pushkey.encodekeys(keys)
1975
1977
1976 @getbundle2partsgenerator('obsmarkers')
1978 @getbundle2partsgenerator('obsmarkers')
1977 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1979 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1978 b2caps=None, heads=None, **kwargs):
1980 b2caps=None, heads=None, **kwargs):
1979 """add an obsolescence markers part to the requested bundle"""
1981 """add an obsolescence markers part to the requested bundle"""
1980 if kwargs.get(r'obsmarkers', False):
1982 if kwargs.get(r'obsmarkers', False):
1981 if heads is None:
1983 if heads is None:
1982 heads = repo.heads()
1984 heads = repo.heads()
1983 subset = [c.node() for c in repo.set('::%ln', heads)]
1985 subset = [c.node() for c in repo.set('::%ln', heads)]
1984 markers = repo.obsstore.relevantmarkers(subset)
1986 markers = repo.obsstore.relevantmarkers(subset)
1985 markers = sorted(markers)
1987 markers = sorted(markers)
1986 bundle2.buildobsmarkerspart(bundler, markers)
1988 bundle2.buildobsmarkerspart(bundler, markers)
1987
1989
1988 @getbundle2partsgenerator('phases')
1990 @getbundle2partsgenerator('phases')
1989 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1991 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1990 b2caps=None, heads=None, **kwargs):
1992 b2caps=None, heads=None, **kwargs):
1991 """add phase heads part to the requested bundle"""
1993 """add phase heads part to the requested bundle"""
1992 if kwargs.get(r'phases', False):
1994 if kwargs.get(r'phases', False):
1993 if not 'heads' in b2caps.get('phases'):
1995 if not 'heads' in b2caps.get('phases'):
1994 raise ValueError(_('no common phases exchange method'))
1996 raise ValueError(_('no common phases exchange method'))
1995 if heads is None:
1997 if heads is None:
1996 heads = repo.heads()
1998 heads = repo.heads()
1997
1999
1998 headsbyphase = collections.defaultdict(set)
2000 headsbyphase = collections.defaultdict(set)
1999 if repo.publishing():
2001 if repo.publishing():
2000 headsbyphase[phases.public] = heads
2002 headsbyphase[phases.public] = heads
2001 else:
2003 else:
2002 # find the appropriate heads to move
2004 # find the appropriate heads to move
2003
2005
2004 phase = repo._phasecache.phase
2006 phase = repo._phasecache.phase
2005 node = repo.changelog.node
2007 node = repo.changelog.node
2006 rev = repo.changelog.rev
2008 rev = repo.changelog.rev
2007 for h in heads:
2009 for h in heads:
2008 headsbyphase[phase(repo, rev(h))].add(h)
2010 headsbyphase[phase(repo, rev(h))].add(h)
2009 seenphases = list(headsbyphase.keys())
2011 seenphases = list(headsbyphase.keys())
2010
2012
2011 # We do not handle anything but public and draft phase for now)
2013 # We do not handle anything but public and draft phase for now)
2012 if seenphases:
2014 if seenphases:
2013 assert max(seenphases) <= phases.draft
2015 assert max(seenphases) <= phases.draft
2014
2016
2015 # if client is pulling non-public changesets, we need to find
2017 # if client is pulling non-public changesets, we need to find
2016 # intermediate public heads.
2018 # intermediate public heads.
2017 draftheads = headsbyphase.get(phases.draft, set())
2019 draftheads = headsbyphase.get(phases.draft, set())
2018 if draftheads:
2020 if draftheads:
2019 publicheads = headsbyphase.get(phases.public, set())
2021 publicheads = headsbyphase.get(phases.public, set())
2020
2022
2021 revset = 'heads(only(%ln, %ln) and public())'
2023 revset = 'heads(only(%ln, %ln) and public())'
2022 extraheads = repo.revs(revset, draftheads, publicheads)
2024 extraheads = repo.revs(revset, draftheads, publicheads)
2023 for r in extraheads:
2025 for r in extraheads:
2024 headsbyphase[phases.public].add(node(r))
2026 headsbyphase[phases.public].add(node(r))
2025
2027
2026 # transform data in a format used by the encoding function
2028 # transform data in a format used by the encoding function
2027 phasemapping = []
2029 phasemapping = []
2028 for phase in phases.allphases:
2030 for phase in phases.allphases:
2029 phasemapping.append(sorted(headsbyphase[phase]))
2031 phasemapping.append(sorted(headsbyphase[phase]))
2030
2032
2031 # generate the actual part
2033 # generate the actual part
2032 phasedata = phases.binaryencode(phasemapping)
2034 phasedata = phases.binaryencode(phasemapping)
2033 bundler.newpart('phase-heads', data=phasedata)
2035 bundler.newpart('phase-heads', data=phasedata)
2034
2036
2035 @getbundle2partsgenerator('hgtagsfnodes')
2037 @getbundle2partsgenerator('hgtagsfnodes')
2036 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2038 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2037 b2caps=None, heads=None, common=None,
2039 b2caps=None, heads=None, common=None,
2038 **kwargs):
2040 **kwargs):
2039 """Transfer the .hgtags filenodes mapping.
2041 """Transfer the .hgtags filenodes mapping.
2040
2042
2041 Only values for heads in this bundle will be transferred.
2043 Only values for heads in this bundle will be transferred.
2042
2044
2043 The part data consists of pairs of 20 byte changeset node and .hgtags
2045 The part data consists of pairs of 20 byte changeset node and .hgtags
2044 filenodes raw values.
2046 filenodes raw values.
2045 """
2047 """
2046 # Don't send unless:
2048 # Don't send unless:
2047 # - changeset are being exchanged,
2049 # - changeset are being exchanged,
2048 # - the client supports it.
2050 # - the client supports it.
2049 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2051 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2050 return
2052 return
2051
2053
2052 outgoing = _computeoutgoing(repo, heads, common)
2054 outgoing = _computeoutgoing(repo, heads, common)
2053 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2055 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2054
2056
2055 @getbundle2partsgenerator('cache:rev-branch-cache')
2057 @getbundle2partsgenerator('cache:rev-branch-cache')
2056 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2058 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2057 b2caps=None, heads=None, common=None,
2059 b2caps=None, heads=None, common=None,
2058 **kwargs):
2060 **kwargs):
2059 """Transfer the rev-branch-cache mapping
2061 """Transfer the rev-branch-cache mapping
2060
2062
2061 The payload is a series of data related to each branch
2063 The payload is a series of data related to each branch
2062
2064
2063 1) branch name length
2065 1) branch name length
2064 2) number of open heads
2066 2) number of open heads
2065 3) number of closed heads
2067 3) number of closed heads
2066 4) open heads nodes
2068 4) open heads nodes
2067 5) closed heads nodes
2069 5) closed heads nodes
2068 """
2070 """
2069 # Don't send unless:
2071 # Don't send unless:
2070 # - changeset are being exchanged,
2072 # - changeset are being exchanged,
2071 # - the client supports it.
2073 # - the client supports it.
2072 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2074 # - narrow bundle isn't in play (not currently compatible).
2075 if (not kwargs.get(r'cg', True)
2076 or 'rev-branch-cache' not in b2caps
2077 or kwargs.get(r'narrow', False)
2078 or repo.ui.has_section(_NARROWACL_SECTION)):
2073 return
2079 return
2080
2074 outgoing = _computeoutgoing(repo, heads, common)
2081 outgoing = _computeoutgoing(repo, heads, common)
2075 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2082 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2076
2083
2077 def check_heads(repo, their_heads, context):
2084 def check_heads(repo, their_heads, context):
2078 """check if the heads of a repo have been modified
2085 """check if the heads of a repo have been modified
2079
2086
2080 Used by peer for unbundling.
2087 Used by peer for unbundling.
2081 """
2088 """
2082 heads = repo.heads()
2089 heads = repo.heads()
2083 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2090 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2084 if not (their_heads == ['force'] or their_heads == heads or
2091 if not (their_heads == ['force'] or their_heads == heads or
2085 their_heads == ['hashed', heads_hash]):
2092 their_heads == ['hashed', heads_hash]):
2086 # someone else committed/pushed/unbundled while we
2093 # someone else committed/pushed/unbundled while we
2087 # were transferring data
2094 # were transferring data
2088 raise error.PushRaced('repository changed while %s - '
2095 raise error.PushRaced('repository changed while %s - '
2089 'please try again' % context)
2096 'please try again' % context)
2090
2097
2091 def unbundle(repo, cg, heads, source, url):
2098 def unbundle(repo, cg, heads, source, url):
2092 """Apply a bundle to a repo.
2099 """Apply a bundle to a repo.
2093
2100
2094 this function makes sure the repo is locked during the application and have
2101 this function makes sure the repo is locked during the application and have
2095 mechanism to check that no push race occurred between the creation of the
2102 mechanism to check that no push race occurred between the creation of the
2096 bundle and its application.
2103 bundle and its application.
2097
2104
2098 If the push was raced as PushRaced exception is raised."""
2105 If the push was raced as PushRaced exception is raised."""
2099 r = 0
2106 r = 0
2100 # need a transaction when processing a bundle2 stream
2107 # need a transaction when processing a bundle2 stream
2101 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2108 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2102 lockandtr = [None, None, None]
2109 lockandtr = [None, None, None]
2103 recordout = None
2110 recordout = None
2104 # quick fix for output mismatch with bundle2 in 3.4
2111 # quick fix for output mismatch with bundle2 in 3.4
2105 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2112 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2106 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2113 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2107 captureoutput = True
2114 captureoutput = True
2108 try:
2115 try:
2109 # note: outside bundle1, 'heads' is expected to be empty and this
2116 # note: outside bundle1, 'heads' is expected to be empty and this
2110 # 'check_heads' call wil be a no-op
2117 # 'check_heads' call wil be a no-op
2111 check_heads(repo, heads, 'uploading changes')
2118 check_heads(repo, heads, 'uploading changes')
2112 # push can proceed
2119 # push can proceed
2113 if not isinstance(cg, bundle2.unbundle20):
2120 if not isinstance(cg, bundle2.unbundle20):
2114 # legacy case: bundle1 (changegroup 01)
2121 # legacy case: bundle1 (changegroup 01)
2115 txnname = "\n".join([source, util.hidepassword(url)])
2122 txnname = "\n".join([source, util.hidepassword(url)])
2116 with repo.lock(), repo.transaction(txnname) as tr:
2123 with repo.lock(), repo.transaction(txnname) as tr:
2117 op = bundle2.applybundle(repo, cg, tr, source, url)
2124 op = bundle2.applybundle(repo, cg, tr, source, url)
2118 r = bundle2.combinechangegroupresults(op)
2125 r = bundle2.combinechangegroupresults(op)
2119 else:
2126 else:
2120 r = None
2127 r = None
2121 try:
2128 try:
2122 def gettransaction():
2129 def gettransaction():
2123 if not lockandtr[2]:
2130 if not lockandtr[2]:
2124 lockandtr[0] = repo.wlock()
2131 lockandtr[0] = repo.wlock()
2125 lockandtr[1] = repo.lock()
2132 lockandtr[1] = repo.lock()
2126 lockandtr[2] = repo.transaction(source)
2133 lockandtr[2] = repo.transaction(source)
2127 lockandtr[2].hookargs['source'] = source
2134 lockandtr[2].hookargs['source'] = source
2128 lockandtr[2].hookargs['url'] = url
2135 lockandtr[2].hookargs['url'] = url
2129 lockandtr[2].hookargs['bundle2'] = '1'
2136 lockandtr[2].hookargs['bundle2'] = '1'
2130 return lockandtr[2]
2137 return lockandtr[2]
2131
2138
2132 # Do greedy locking by default until we're satisfied with lazy
2139 # Do greedy locking by default until we're satisfied with lazy
2133 # locking.
2140 # locking.
2134 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2141 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2135 gettransaction()
2142 gettransaction()
2136
2143
2137 op = bundle2.bundleoperation(repo, gettransaction,
2144 op = bundle2.bundleoperation(repo, gettransaction,
2138 captureoutput=captureoutput,
2145 captureoutput=captureoutput,
2139 source='push')
2146 source='push')
2140 try:
2147 try:
2141 op = bundle2.processbundle(repo, cg, op=op)
2148 op = bundle2.processbundle(repo, cg, op=op)
2142 finally:
2149 finally:
2143 r = op.reply
2150 r = op.reply
2144 if captureoutput and r is not None:
2151 if captureoutput and r is not None:
2145 repo.ui.pushbuffer(error=True, subproc=True)
2152 repo.ui.pushbuffer(error=True, subproc=True)
2146 def recordout(output):
2153 def recordout(output):
2147 r.newpart('output', data=output, mandatory=False)
2154 r.newpart('output', data=output, mandatory=False)
2148 if lockandtr[2] is not None:
2155 if lockandtr[2] is not None:
2149 lockandtr[2].close()
2156 lockandtr[2].close()
2150 except BaseException as exc:
2157 except BaseException as exc:
2151 exc.duringunbundle2 = True
2158 exc.duringunbundle2 = True
2152 if captureoutput and r is not None:
2159 if captureoutput and r is not None:
2153 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2160 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2154 def recordout(output):
2161 def recordout(output):
2155 part = bundle2.bundlepart('output', data=output,
2162 part = bundle2.bundlepart('output', data=output,
2156 mandatory=False)
2163 mandatory=False)
2157 parts.append(part)
2164 parts.append(part)
2158 raise
2165 raise
2159 finally:
2166 finally:
2160 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2167 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2161 if recordout is not None:
2168 if recordout is not None:
2162 recordout(repo.ui.popbuffer())
2169 recordout(repo.ui.popbuffer())
2163 return r
2170 return r
2164
2171
2165 def _maybeapplyclonebundle(pullop):
2172 def _maybeapplyclonebundle(pullop):
2166 """Apply a clone bundle from a remote, if possible."""
2173 """Apply a clone bundle from a remote, if possible."""
2167
2174
2168 repo = pullop.repo
2175 repo = pullop.repo
2169 remote = pullop.remote
2176 remote = pullop.remote
2170
2177
2171 if not repo.ui.configbool('ui', 'clonebundles'):
2178 if not repo.ui.configbool('ui', 'clonebundles'):
2172 return
2179 return
2173
2180
2174 # Only run if local repo is empty.
2181 # Only run if local repo is empty.
2175 if len(repo):
2182 if len(repo):
2176 return
2183 return
2177
2184
2178 if pullop.heads:
2185 if pullop.heads:
2179 return
2186 return
2180
2187
2181 if not remote.capable('clonebundles'):
2188 if not remote.capable('clonebundles'):
2182 return
2189 return
2183
2190
2184 with remote.commandexecutor() as e:
2191 with remote.commandexecutor() as e:
2185 res = e.callcommand('clonebundles', {}).result()
2192 res = e.callcommand('clonebundles', {}).result()
2186
2193
2187 # If we call the wire protocol command, that's good enough to record the
2194 # If we call the wire protocol command, that's good enough to record the
2188 # attempt.
2195 # attempt.
2189 pullop.clonebundleattempted = True
2196 pullop.clonebundleattempted = True
2190
2197
2191 entries = parseclonebundlesmanifest(repo, res)
2198 entries = parseclonebundlesmanifest(repo, res)
2192 if not entries:
2199 if not entries:
2193 repo.ui.note(_('no clone bundles available on remote; '
2200 repo.ui.note(_('no clone bundles available on remote; '
2194 'falling back to regular clone\n'))
2201 'falling back to regular clone\n'))
2195 return
2202 return
2196
2203
2197 entries = filterclonebundleentries(
2204 entries = filterclonebundleentries(
2198 repo, entries, streamclonerequested=pullop.streamclonerequested)
2205 repo, entries, streamclonerequested=pullop.streamclonerequested)
2199
2206
2200 if not entries:
2207 if not entries:
2201 # There is a thundering herd concern here. However, if a server
2208 # There is a thundering herd concern here. However, if a server
2202 # operator doesn't advertise bundles appropriate for its clients,
2209 # operator doesn't advertise bundles appropriate for its clients,
2203 # they deserve what's coming. Furthermore, from a client's
2210 # they deserve what's coming. Furthermore, from a client's
2204 # perspective, no automatic fallback would mean not being able to
2211 # perspective, no automatic fallback would mean not being able to
2205 # clone!
2212 # clone!
2206 repo.ui.warn(_('no compatible clone bundles available on server; '
2213 repo.ui.warn(_('no compatible clone bundles available on server; '
2207 'falling back to regular clone\n'))
2214 'falling back to regular clone\n'))
2208 repo.ui.warn(_('(you may want to report this to the server '
2215 repo.ui.warn(_('(you may want to report this to the server '
2209 'operator)\n'))
2216 'operator)\n'))
2210 return
2217 return
2211
2218
2212 entries = sortclonebundleentries(repo.ui, entries)
2219 entries = sortclonebundleentries(repo.ui, entries)
2213
2220
2214 url = entries[0]['URL']
2221 url = entries[0]['URL']
2215 repo.ui.status(_('applying clone bundle from %s\n') % url)
2222 repo.ui.status(_('applying clone bundle from %s\n') % url)
2216 if trypullbundlefromurl(repo.ui, repo, url):
2223 if trypullbundlefromurl(repo.ui, repo, url):
2217 repo.ui.status(_('finished applying clone bundle\n'))
2224 repo.ui.status(_('finished applying clone bundle\n'))
2218 # Bundle failed.
2225 # Bundle failed.
2219 #
2226 #
2220 # We abort by default to avoid the thundering herd of
2227 # We abort by default to avoid the thundering herd of
2221 # clients flooding a server that was expecting expensive
2228 # clients flooding a server that was expecting expensive
2222 # clone load to be offloaded.
2229 # clone load to be offloaded.
2223 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2230 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2224 repo.ui.warn(_('falling back to normal clone\n'))
2231 repo.ui.warn(_('falling back to normal clone\n'))
2225 else:
2232 else:
2226 raise error.Abort(_('error applying bundle'),
2233 raise error.Abort(_('error applying bundle'),
2227 hint=_('if this error persists, consider contacting '
2234 hint=_('if this error persists, consider contacting '
2228 'the server operator or disable clone '
2235 'the server operator or disable clone '
2229 'bundles via '
2236 'bundles via '
2230 '"--config ui.clonebundles=false"'))
2237 '"--config ui.clonebundles=false"'))
2231
2238
2232 def parseclonebundlesmanifest(repo, s):
2239 def parseclonebundlesmanifest(repo, s):
2233 """Parses the raw text of a clone bundles manifest.
2240 """Parses the raw text of a clone bundles manifest.
2234
2241
2235 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2242 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2236 to the URL and other keys are the attributes for the entry.
2243 to the URL and other keys are the attributes for the entry.
2237 """
2244 """
2238 m = []
2245 m = []
2239 for line in s.splitlines():
2246 for line in s.splitlines():
2240 fields = line.split()
2247 fields = line.split()
2241 if not fields:
2248 if not fields:
2242 continue
2249 continue
2243 attrs = {'URL': fields[0]}
2250 attrs = {'URL': fields[0]}
2244 for rawattr in fields[1:]:
2251 for rawattr in fields[1:]:
2245 key, value = rawattr.split('=', 1)
2252 key, value = rawattr.split('=', 1)
2246 key = urlreq.unquote(key)
2253 key = urlreq.unquote(key)
2247 value = urlreq.unquote(value)
2254 value = urlreq.unquote(value)
2248 attrs[key] = value
2255 attrs[key] = value
2249
2256
2250 # Parse BUNDLESPEC into components. This makes client-side
2257 # Parse BUNDLESPEC into components. This makes client-side
2251 # preferences easier to specify since you can prefer a single
2258 # preferences easier to specify since you can prefer a single
2252 # component of the BUNDLESPEC.
2259 # component of the BUNDLESPEC.
2253 if key == 'BUNDLESPEC':
2260 if key == 'BUNDLESPEC':
2254 try:
2261 try:
2255 bundlespec = parsebundlespec(repo, value)
2262 bundlespec = parsebundlespec(repo, value)
2256 attrs['COMPRESSION'] = bundlespec.compression
2263 attrs['COMPRESSION'] = bundlespec.compression
2257 attrs['VERSION'] = bundlespec.version
2264 attrs['VERSION'] = bundlespec.version
2258 except error.InvalidBundleSpecification:
2265 except error.InvalidBundleSpecification:
2259 pass
2266 pass
2260 except error.UnsupportedBundleSpecification:
2267 except error.UnsupportedBundleSpecification:
2261 pass
2268 pass
2262
2269
2263 m.append(attrs)
2270 m.append(attrs)
2264
2271
2265 return m
2272 return m
2266
2273
2267 def isstreamclonespec(bundlespec):
2274 def isstreamclonespec(bundlespec):
2268 # Stream clone v1
2275 # Stream clone v1
2269 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2276 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2270 return True
2277 return True
2271
2278
2272 # Stream clone v2
2279 # Stream clone v2
2273 if (bundlespec.wirecompression == 'UN' and \
2280 if (bundlespec.wirecompression == 'UN' and \
2274 bundlespec.wireversion == '02' and \
2281 bundlespec.wireversion == '02' and \
2275 bundlespec.contentopts.get('streamv2')):
2282 bundlespec.contentopts.get('streamv2')):
2276 return True
2283 return True
2277
2284
2278 return False
2285 return False
2279
2286
2280 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2287 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2281 """Remove incompatible clone bundle manifest entries.
2288 """Remove incompatible clone bundle manifest entries.
2282
2289
2283 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2290 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2284 and returns a new list consisting of only the entries that this client
2291 and returns a new list consisting of only the entries that this client
2285 should be able to apply.
2292 should be able to apply.
2286
2293
2287 There is no guarantee we'll be able to apply all returned entries because
2294 There is no guarantee we'll be able to apply all returned entries because
2288 the metadata we use to filter on may be missing or wrong.
2295 the metadata we use to filter on may be missing or wrong.
2289 """
2296 """
2290 newentries = []
2297 newentries = []
2291 for entry in entries:
2298 for entry in entries:
2292 spec = entry.get('BUNDLESPEC')
2299 spec = entry.get('BUNDLESPEC')
2293 if spec:
2300 if spec:
2294 try:
2301 try:
2295 bundlespec = parsebundlespec(repo, spec, strict=True)
2302 bundlespec = parsebundlespec(repo, spec, strict=True)
2296
2303
2297 # If a stream clone was requested, filter out non-streamclone
2304 # If a stream clone was requested, filter out non-streamclone
2298 # entries.
2305 # entries.
2299 if streamclonerequested and not isstreamclonespec(bundlespec):
2306 if streamclonerequested and not isstreamclonespec(bundlespec):
2300 repo.ui.debug('filtering %s because not a stream clone\n' %
2307 repo.ui.debug('filtering %s because not a stream clone\n' %
2301 entry['URL'])
2308 entry['URL'])
2302 continue
2309 continue
2303
2310
2304 except error.InvalidBundleSpecification as e:
2311 except error.InvalidBundleSpecification as e:
2305 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2312 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2306 continue
2313 continue
2307 except error.UnsupportedBundleSpecification as e:
2314 except error.UnsupportedBundleSpecification as e:
2308 repo.ui.debug('filtering %s because unsupported bundle '
2315 repo.ui.debug('filtering %s because unsupported bundle '
2309 'spec: %s\n' % (
2316 'spec: %s\n' % (
2310 entry['URL'], stringutil.forcebytestr(e)))
2317 entry['URL'], stringutil.forcebytestr(e)))
2311 continue
2318 continue
2312 # If we don't have a spec and requested a stream clone, we don't know
2319 # If we don't have a spec and requested a stream clone, we don't know
2313 # what the entry is so don't attempt to apply it.
2320 # what the entry is so don't attempt to apply it.
2314 elif streamclonerequested:
2321 elif streamclonerequested:
2315 repo.ui.debug('filtering %s because cannot determine if a stream '
2322 repo.ui.debug('filtering %s because cannot determine if a stream '
2316 'clone bundle\n' % entry['URL'])
2323 'clone bundle\n' % entry['URL'])
2317 continue
2324 continue
2318
2325
2319 if 'REQUIRESNI' in entry and not sslutil.hassni:
2326 if 'REQUIRESNI' in entry and not sslutil.hassni:
2320 repo.ui.debug('filtering %s because SNI not supported\n' %
2327 repo.ui.debug('filtering %s because SNI not supported\n' %
2321 entry['URL'])
2328 entry['URL'])
2322 continue
2329 continue
2323
2330
2324 newentries.append(entry)
2331 newentries.append(entry)
2325
2332
2326 return newentries
2333 return newentries
2327
2334
2328 class clonebundleentry(object):
2335 class clonebundleentry(object):
2329 """Represents an item in a clone bundles manifest.
2336 """Represents an item in a clone bundles manifest.
2330
2337
2331 This rich class is needed to support sorting since sorted() in Python 3
2338 This rich class is needed to support sorting since sorted() in Python 3
2332 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2339 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2333 won't work.
2340 won't work.
2334 """
2341 """
2335
2342
2336 def __init__(self, value, prefers):
2343 def __init__(self, value, prefers):
2337 self.value = value
2344 self.value = value
2338 self.prefers = prefers
2345 self.prefers = prefers
2339
2346
2340 def _cmp(self, other):
2347 def _cmp(self, other):
2341 for prefkey, prefvalue in self.prefers:
2348 for prefkey, prefvalue in self.prefers:
2342 avalue = self.value.get(prefkey)
2349 avalue = self.value.get(prefkey)
2343 bvalue = other.value.get(prefkey)
2350 bvalue = other.value.get(prefkey)
2344
2351
2345 # Special case for b missing attribute and a matches exactly.
2352 # Special case for b missing attribute and a matches exactly.
2346 if avalue is not None and bvalue is None and avalue == prefvalue:
2353 if avalue is not None and bvalue is None and avalue == prefvalue:
2347 return -1
2354 return -1
2348
2355
2349 # Special case for a missing attribute and b matches exactly.
2356 # Special case for a missing attribute and b matches exactly.
2350 if bvalue is not None and avalue is None and bvalue == prefvalue:
2357 if bvalue is not None and avalue is None and bvalue == prefvalue:
2351 return 1
2358 return 1
2352
2359
2353 # We can't compare unless attribute present on both.
2360 # We can't compare unless attribute present on both.
2354 if avalue is None or bvalue is None:
2361 if avalue is None or bvalue is None:
2355 continue
2362 continue
2356
2363
2357 # Same values should fall back to next attribute.
2364 # Same values should fall back to next attribute.
2358 if avalue == bvalue:
2365 if avalue == bvalue:
2359 continue
2366 continue
2360
2367
2361 # Exact matches come first.
2368 # Exact matches come first.
2362 if avalue == prefvalue:
2369 if avalue == prefvalue:
2363 return -1
2370 return -1
2364 if bvalue == prefvalue:
2371 if bvalue == prefvalue:
2365 return 1
2372 return 1
2366
2373
2367 # Fall back to next attribute.
2374 # Fall back to next attribute.
2368 continue
2375 continue
2369
2376
2370 # If we got here we couldn't sort by attributes and prefers. Fall
2377 # If we got here we couldn't sort by attributes and prefers. Fall
2371 # back to index order.
2378 # back to index order.
2372 return 0
2379 return 0
2373
2380
2374 def __lt__(self, other):
2381 def __lt__(self, other):
2375 return self._cmp(other) < 0
2382 return self._cmp(other) < 0
2376
2383
2377 def __gt__(self, other):
2384 def __gt__(self, other):
2378 return self._cmp(other) > 0
2385 return self._cmp(other) > 0
2379
2386
2380 def __eq__(self, other):
2387 def __eq__(self, other):
2381 return self._cmp(other) == 0
2388 return self._cmp(other) == 0
2382
2389
2383 def __le__(self, other):
2390 def __le__(self, other):
2384 return self._cmp(other) <= 0
2391 return self._cmp(other) <= 0
2385
2392
2386 def __ge__(self, other):
2393 def __ge__(self, other):
2387 return self._cmp(other) >= 0
2394 return self._cmp(other) >= 0
2388
2395
2389 def __ne__(self, other):
2396 def __ne__(self, other):
2390 return self._cmp(other) != 0
2397 return self._cmp(other) != 0
2391
2398
2392 def sortclonebundleentries(ui, entries):
2399 def sortclonebundleentries(ui, entries):
2393 prefers = ui.configlist('ui', 'clonebundleprefers')
2400 prefers = ui.configlist('ui', 'clonebundleprefers')
2394 if not prefers:
2401 if not prefers:
2395 return list(entries)
2402 return list(entries)
2396
2403
2397 prefers = [p.split('=', 1) for p in prefers]
2404 prefers = [p.split('=', 1) for p in prefers]
2398
2405
2399 items = sorted(clonebundleentry(v, prefers) for v in entries)
2406 items = sorted(clonebundleentry(v, prefers) for v in entries)
2400 return [i.value for i in items]
2407 return [i.value for i in items]
2401
2408
2402 def trypullbundlefromurl(ui, repo, url):
2409 def trypullbundlefromurl(ui, repo, url):
2403 """Attempt to apply a bundle from a URL."""
2410 """Attempt to apply a bundle from a URL."""
2404 with repo.lock(), repo.transaction('bundleurl') as tr:
2411 with repo.lock(), repo.transaction('bundleurl') as tr:
2405 try:
2412 try:
2406 fh = urlmod.open(ui, url)
2413 fh = urlmod.open(ui, url)
2407 cg = readbundle(ui, fh, 'stream')
2414 cg = readbundle(ui, fh, 'stream')
2408
2415
2409 if isinstance(cg, streamclone.streamcloneapplier):
2416 if isinstance(cg, streamclone.streamcloneapplier):
2410 cg.apply(repo)
2417 cg.apply(repo)
2411 else:
2418 else:
2412 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2419 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2413 return True
2420 return True
2414 except urlerr.httperror as e:
2421 except urlerr.httperror as e:
2415 ui.warn(_('HTTP error fetching bundle: %s\n') %
2422 ui.warn(_('HTTP error fetching bundle: %s\n') %
2416 stringutil.forcebytestr(e))
2423 stringutil.forcebytestr(e))
2417 except urlerr.urlerror as e:
2424 except urlerr.urlerror as e:
2418 ui.warn(_('error fetching bundle: %s\n') %
2425 ui.warn(_('error fetching bundle: %s\n') %
2419 stringutil.forcebytestr(e.reason))
2426 stringutil.forcebytestr(e.reason))
2420
2427
2421 return False
2428 return False
General Comments 0
You need to be logged in to leave comments. Login now