##// END OF EJS Templates
narrow: send specs as bundle2 data instead of param (issue5952) (issue6019)...
Pulkit Goyal -
r42307:8a37c9b6 default draft
parent child Browse files
Show More
@@ -1,280 +1,289 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import (
14 from mercurial.node import (
15 bin,
15 bin,
16 nullid,
16 nullid,
17 )
17 )
18 from mercurial import (
18 from mercurial import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 error,
21 error,
22 exchange,
22 exchange,
23 localrepo,
23 localrepo,
24 narrowspec,
24 narrowspec,
25 repair,
25 repair,
26 repository,
26 repository,
27 util,
27 util,
28 wireprototypes,
28 wireprototypes,
29 )
29 )
30 from mercurial.utils import (
30 from mercurial.utils import (
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 _NARROWACL_SECTION = 'narrowhgacl'
34 _NARROWACL_SECTION = 'narrowhgacl'
35 _CHANGESPECPART = 'narrow:changespec'
35 _CHANGESPECPART = 'narrow:changespec'
36 _SPECPART = 'narrow:spec'
36 _SPECPART = 'narrow:spec'
37 _SPECPART_INCLUDE = 'include'
37 _SPECPART_INCLUDE = 'include'
38 _SPECPART_EXCLUDE = 'exclude'
38 _SPECPART_EXCLUDE = 'exclude'
39 _KILLNODESIGNAL = 'KILL'
39 _KILLNODESIGNAL = 'KILL'
40 _DONESIGNAL = 'DONE'
40 _DONESIGNAL = 'DONE'
41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
45
45
46 # Serve a changegroup for a client with a narrow clone.
46 # Serve a changegroup for a client with a narrow clone.
47 def getbundlechangegrouppart_narrow(bundler, repo, source,
47 def getbundlechangegrouppart_narrow(bundler, repo, source,
48 bundlecaps=None, b2caps=None, heads=None,
48 bundlecaps=None, b2caps=None, heads=None,
49 common=None, **kwargs):
49 common=None, **kwargs):
50 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
50 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
51
51
52 cgversions = b2caps.get('changegroup')
52 cgversions = b2caps.get('changegroup')
53 if cgversions: # 3.1 and 3.2 ship with an empty value
53 if cgversions: # 3.1 and 3.2 ship with an empty value
54 cgversions = [v for v in cgversions
54 cgversions = [v for v in cgversions
55 if v in changegroup.supportedoutgoingversions(repo)]
55 if v in changegroup.supportedoutgoingversions(repo)]
56 if not cgversions:
56 if not cgversions:
57 raise ValueError(_('no common changegroup version'))
57 raise ValueError(_('no common changegroup version'))
58 version = max(cgversions)
58 version = max(cgversions)
59 else:
59 else:
60 raise ValueError(_("server does not advertise changegroup version,"
60 raise ValueError(_("server does not advertise changegroup version,"
61 " can't negotiate support for ellipsis nodes"))
61 " can't negotiate support for ellipsis nodes"))
62
62
63 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
63 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
64 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
64 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
65 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
65 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
66
66
67 depth = kwargs.get(r'depth', None)
67 depth = kwargs.get(r'depth', None)
68 if depth is not None:
68 if depth is not None:
69 depth = int(depth)
69 depth = int(depth)
70 if depth < 1:
70 if depth < 1:
71 raise error.Abort(_('depth must be positive, got %d') % depth)
71 raise error.Abort(_('depth must be positive, got %d') % depth)
72
72
73 heads = set(heads or repo.heads())
73 heads = set(heads or repo.heads())
74 common = set(common or [nullid])
74 common = set(common or [nullid])
75 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
75 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
76 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
76 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
77 known = {bin(n) for n in kwargs.get(r'known', [])}
77 known = {bin(n) for n in kwargs.get(r'known', [])}
78 if known and (oldinclude != include or oldexclude != exclude):
78 if known and (oldinclude != include or oldexclude != exclude):
79 # Steps:
79 # Steps:
80 # 1. Send kill for "$known & ::common"
80 # 1. Send kill for "$known & ::common"
81 #
81 #
82 # 2. Send changegroup for ::common
82 # 2. Send changegroup for ::common
83 #
83 #
84 # 3. Proceed.
84 # 3. Proceed.
85 #
85 #
86 # In the future, we can send kills for only the specific
86 # In the future, we can send kills for only the specific
87 # nodes we know should go away or change shape, and then
87 # nodes we know should go away or change shape, and then
88 # send a data stream that tells the client something like this:
88 # send a data stream that tells the client something like this:
89 #
89 #
90 # a) apply this changegroup
90 # a) apply this changegroup
91 # b) apply nodes XXX, YYY, ZZZ that you already have
91 # b) apply nodes XXX, YYY, ZZZ that you already have
92 # c) goto a
92 # c) goto a
93 #
93 #
94 # until they've built up the full new state.
94 # until they've built up the full new state.
95 # Convert to revnums and intersect with "common". The client should
95 # Convert to revnums and intersect with "common". The client should
96 # have made it a subset of "common" already, but let's be safe.
96 # have made it a subset of "common" already, but let's be safe.
97 known = set(repo.revs("%ln & ::%ln", known, common))
97 known = set(repo.revs("%ln & ::%ln", known, common))
98 # TODO: we could send only roots() of this set, and the
98 # TODO: we could send only roots() of this set, and the
99 # list of nodes in common, and the client could work out
99 # list of nodes in common, and the client could work out
100 # what to strip, instead of us explicitly sending every
100 # what to strip, instead of us explicitly sending every
101 # single node.
101 # single node.
102 deadrevs = known
102 deadrevs = known
103 def genkills():
103 def genkills():
104 for r in deadrevs:
104 for r in deadrevs:
105 yield _KILLNODESIGNAL
105 yield _KILLNODESIGNAL
106 yield repo.changelog.node(r)
106 yield repo.changelog.node(r)
107 yield _DONESIGNAL
107 yield _DONESIGNAL
108 bundler.newpart(_CHANGESPECPART, data=genkills())
108 bundler.newpart(_CHANGESPECPART, data=genkills())
109 newvisit, newfull, newellipsis = exchange._computeellipsis(
109 newvisit, newfull, newellipsis = exchange._computeellipsis(
110 repo, set(), common, known, newmatch)
110 repo, set(), common, known, newmatch)
111 if newvisit:
111 if newvisit:
112 packer = changegroup.getbundler(version, repo,
112 packer = changegroup.getbundler(version, repo,
113 matcher=newmatch,
113 matcher=newmatch,
114 ellipses=True,
114 ellipses=True,
115 shallow=depth is not None,
115 shallow=depth is not None,
116 ellipsisroots=newellipsis,
116 ellipsisroots=newellipsis,
117 fullnodes=newfull)
117 fullnodes=newfull)
118 cgdata = packer.generate(common, newvisit, False, 'narrow_widen')
118 cgdata = packer.generate(common, newvisit, False, 'narrow_widen')
119
119
120 part = bundler.newpart('changegroup', data=cgdata)
120 part = bundler.newpart('changegroup', data=cgdata)
121 part.addparam('version', version)
121 part.addparam('version', version)
122 if 'treemanifest' in repo.requirements:
122 if 'treemanifest' in repo.requirements:
123 part.addparam('treemanifest', '1')
123 part.addparam('treemanifest', '1')
124
124
125 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
125 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
126 repo, common, heads, set(), newmatch, depth=depth)
126 repo, common, heads, set(), newmatch, depth=depth)
127
127
128 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
128 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
129 if visitnodes:
129 if visitnodes:
130 packer = changegroup.getbundler(version, repo,
130 packer = changegroup.getbundler(version, repo,
131 matcher=newmatch,
131 matcher=newmatch,
132 ellipses=True,
132 ellipses=True,
133 shallow=depth is not None,
133 shallow=depth is not None,
134 ellipsisroots=ellipsisroots,
134 ellipsisroots=ellipsisroots,
135 fullnodes=relevant_nodes)
135 fullnodes=relevant_nodes)
136 cgdata = packer.generate(common, visitnodes, False, 'narrow_widen')
136 cgdata = packer.generate(common, visitnodes, False, 'narrow_widen')
137
137
138 part = bundler.newpart('changegroup', data=cgdata)
138 part = bundler.newpart('changegroup', data=cgdata)
139 part.addparam('version', version)
139 part.addparam('version', version)
140 if 'treemanifest' in repo.requirements:
140 if 'treemanifest' in repo.requirements:
141 part.addparam('treemanifest', '1')
141 part.addparam('treemanifest', '1')
142
142
143 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
143 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
144 def _handlechangespec_2(op, inpart):
144 def _handlechangespec_2(op, inpart):
145 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
145 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
146 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
146 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
147 data = inpart.read()
148 # old servers don't send includes and excludes using bundle2 data, they use
149 # bundle2 parameters instead.
150 if data:
151 inc, exc = data.split('\0')
152 if inc:
153 includepats |= set(inc.splitlines())
154 if exc:
155 excludepats |= set(exc.splitlines())
147 narrowspec.validatepatterns(includepats)
156 narrowspec.validatepatterns(includepats)
148 narrowspec.validatepatterns(excludepats)
157 narrowspec.validatepatterns(excludepats)
149
158
150 if not repository.NARROW_REQUIREMENT in op.repo.requirements:
159 if not repository.NARROW_REQUIREMENT in op.repo.requirements:
151 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
160 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
152 op.repo._writerequirements()
161 op.repo._writerequirements()
153 op.repo.setnarrowpats(includepats, excludepats)
162 op.repo.setnarrowpats(includepats, excludepats)
154 narrowspec.copytoworkingcopy(op.repo)
163 narrowspec.copytoworkingcopy(op.repo)
155
164
156 @bundle2.parthandler(_CHANGESPECPART)
165 @bundle2.parthandler(_CHANGESPECPART)
157 def _handlechangespec(op, inpart):
166 def _handlechangespec(op, inpart):
158 repo = op.repo
167 repo = op.repo
159 cl = repo.changelog
168 cl = repo.changelog
160
169
161 # changesets which need to be stripped entirely. either they're no longer
170 # changesets which need to be stripped entirely. either they're no longer
162 # needed in the new narrow spec, or the server is sending a replacement
171 # needed in the new narrow spec, or the server is sending a replacement
163 # in the changegroup part.
172 # in the changegroup part.
164 clkills = set()
173 clkills = set()
165
174
166 # A changespec part contains all the updates to ellipsis nodes
175 # A changespec part contains all the updates to ellipsis nodes
167 # that will happen as a result of widening or narrowing a
176 # that will happen as a result of widening or narrowing a
168 # repo. All the changes that this block encounters are ellipsis
177 # repo. All the changes that this block encounters are ellipsis
169 # nodes or flags to kill an existing ellipsis.
178 # nodes or flags to kill an existing ellipsis.
170 chunksignal = changegroup.readexactly(inpart, 4)
179 chunksignal = changegroup.readexactly(inpart, 4)
171 while chunksignal != _DONESIGNAL:
180 while chunksignal != _DONESIGNAL:
172 if chunksignal == _KILLNODESIGNAL:
181 if chunksignal == _KILLNODESIGNAL:
173 # a node used to be an ellipsis but isn't anymore
182 # a node used to be an ellipsis but isn't anymore
174 ck = changegroup.readexactly(inpart, 20)
183 ck = changegroup.readexactly(inpart, 20)
175 if cl.hasnode(ck):
184 if cl.hasnode(ck):
176 clkills.add(ck)
185 clkills.add(ck)
177 else:
186 else:
178 raise error.Abort(
187 raise error.Abort(
179 _('unexpected changespec node chunk type: %s') % chunksignal)
188 _('unexpected changespec node chunk type: %s') % chunksignal)
180 chunksignal = changegroup.readexactly(inpart, 4)
189 chunksignal = changegroup.readexactly(inpart, 4)
181
190
182 if clkills:
191 if clkills:
183 # preserve bookmarks that repair.strip() would otherwise strip
192 # preserve bookmarks that repair.strip() would otherwise strip
184 op._bookmarksbackup = repo._bookmarks
193 op._bookmarksbackup = repo._bookmarks
185 class dummybmstore(dict):
194 class dummybmstore(dict):
186 def applychanges(self, repo, tr, changes):
195 def applychanges(self, repo, tr, changes):
187 pass
196 pass
188 localrepo.localrepository._bookmarks.set(repo, dummybmstore())
197 localrepo.localrepository._bookmarks.set(repo, dummybmstore())
189 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
198 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
190 topic='widen')
199 topic='widen')
191 if chgrpfile:
200 if chgrpfile:
192 op._widen_uninterr = repo.ui.uninterruptible()
201 op._widen_uninterr = repo.ui.uninterruptible()
193 op._widen_uninterr.__enter__()
202 op._widen_uninterr.__enter__()
194 # presence of _widen_bundle attribute activates widen handler later
203 # presence of _widen_bundle attribute activates widen handler later
195 op._widen_bundle = chgrpfile
204 op._widen_bundle = chgrpfile
196 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
205 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
197 # will currently always be there when using the core+narrowhg server, but
206 # will currently always be there when using the core+narrowhg server, but
198 # other servers may include a changespec part even when not widening (e.g.
207 # other servers may include a changespec part even when not widening (e.g.
199 # because we're deepening a shallow repo).
208 # because we're deepening a shallow repo).
200 if util.safehasattr(repo, 'setnewnarrowpats'):
209 if util.safehasattr(repo, 'setnewnarrowpats'):
201 repo.setnewnarrowpats()
210 repo.setnewnarrowpats()
202
211
203 def handlechangegroup_widen(op, inpart):
212 def handlechangegroup_widen(op, inpart):
204 """Changegroup exchange handler which restores temporarily-stripped nodes"""
213 """Changegroup exchange handler which restores temporarily-stripped nodes"""
205 # We saved a bundle with stripped node data we must now restore.
214 # We saved a bundle with stripped node data we must now restore.
206 # This approach is based on mercurial/repair.py@6ee26a53c111.
215 # This approach is based on mercurial/repair.py@6ee26a53c111.
207 repo = op.repo
216 repo = op.repo
208 ui = op.ui
217 ui = op.ui
209
218
210 chgrpfile = op._widen_bundle
219 chgrpfile = op._widen_bundle
211 del op._widen_bundle
220 del op._widen_bundle
212 vfs = repo.vfs
221 vfs = repo.vfs
213
222
214 ui.note(_("adding branch\n"))
223 ui.note(_("adding branch\n"))
215 f = vfs.open(chgrpfile, "rb")
224 f = vfs.open(chgrpfile, "rb")
216 try:
225 try:
217 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
226 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
218 if not ui.verbose:
227 if not ui.verbose:
219 # silence internal shuffling chatter
228 # silence internal shuffling chatter
220 ui.pushbuffer()
229 ui.pushbuffer()
221 if isinstance(gen, bundle2.unbundle20):
230 if isinstance(gen, bundle2.unbundle20):
222 with repo.transaction('strip') as tr:
231 with repo.transaction('strip') as tr:
223 bundle2.processbundle(repo, gen, lambda: tr)
232 bundle2.processbundle(repo, gen, lambda: tr)
224 else:
233 else:
225 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
234 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
226 if not ui.verbose:
235 if not ui.verbose:
227 ui.popbuffer()
236 ui.popbuffer()
228 finally:
237 finally:
229 f.close()
238 f.close()
230
239
231 # remove undo files
240 # remove undo files
232 for undovfs, undofile in repo.undofiles():
241 for undovfs, undofile in repo.undofiles():
233 try:
242 try:
234 undovfs.unlink(undofile)
243 undovfs.unlink(undofile)
235 except OSError as e:
244 except OSError as e:
236 if e.errno != errno.ENOENT:
245 if e.errno != errno.ENOENT:
237 ui.warn(_('error removing %s: %s\n') %
246 ui.warn(_('error removing %s: %s\n') %
238 (undovfs.join(undofile), stringutil.forcebytestr(e)))
247 (undovfs.join(undofile), stringutil.forcebytestr(e)))
239
248
240 # Remove partial backup only if there were no exceptions
249 # Remove partial backup only if there were no exceptions
241 op._widen_uninterr.__exit__(None, None, None)
250 op._widen_uninterr.__exit__(None, None, None)
242 vfs.unlink(chgrpfile)
251 vfs.unlink(chgrpfile)
243
252
244 def setup():
253 def setup():
245 """Enable narrow repo support in bundle2-related extension points."""
254 """Enable narrow repo support in bundle2-related extension points."""
246 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
255 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
247
256
248 getbundleargs['narrow'] = 'boolean'
257 getbundleargs['narrow'] = 'boolean'
249 getbundleargs['depth'] = 'plain'
258 getbundleargs['depth'] = 'plain'
250 getbundleargs['oldincludepats'] = 'csv'
259 getbundleargs['oldincludepats'] = 'csv'
251 getbundleargs['oldexcludepats'] = 'csv'
260 getbundleargs['oldexcludepats'] = 'csv'
252 getbundleargs['known'] = 'csv'
261 getbundleargs['known'] = 'csv'
253
262
254 # Extend changegroup serving to handle requests from narrow clients.
263 # Extend changegroup serving to handle requests from narrow clients.
255 origcgfn = exchange.getbundle2partsmapping['changegroup']
264 origcgfn = exchange.getbundle2partsmapping['changegroup']
256 def wrappedcgfn(*args, **kwargs):
265 def wrappedcgfn(*args, **kwargs):
257 repo = args[1]
266 repo = args[1]
258 if repo.ui.has_section(_NARROWACL_SECTION):
267 if repo.ui.has_section(_NARROWACL_SECTION):
259 kwargs = exchange.applynarrowacl(repo, kwargs)
268 kwargs = exchange.applynarrowacl(repo, kwargs)
260
269
261 if (kwargs.get(r'narrow', False) and
270 if (kwargs.get(r'narrow', False) and
262 repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
271 repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
263 getbundlechangegrouppart_narrow(*args, **kwargs)
272 getbundlechangegrouppart_narrow(*args, **kwargs)
264 else:
273 else:
265 origcgfn(*args, **kwargs)
274 origcgfn(*args, **kwargs)
266 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
275 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
267
276
268 # Extend changegroup receiver so client can fixup after widen requests.
277 # Extend changegroup receiver so client can fixup after widen requests.
269 origcghandler = bundle2.parthandlermapping['changegroup']
278 origcghandler = bundle2.parthandlermapping['changegroup']
270 def wrappedcghandler(op, inpart):
279 def wrappedcghandler(op, inpart):
271 origcghandler(op, inpart)
280 origcghandler(op, inpart)
272 if util.safehasattr(op, '_widen_bundle'):
281 if util.safehasattr(op, '_widen_bundle'):
273 handlechangegroup_widen(op, inpart)
282 handlechangegroup_widen(op, inpart)
274 if util.safehasattr(op, '_bookmarksbackup'):
283 if util.safehasattr(op, '_bookmarksbackup'):
275 localrepo.localrepository._bookmarks.set(op.repo,
284 localrepo.localrepository._bookmarks.set(op.repo,
276 op._bookmarksbackup)
285 op._bookmarksbackup)
277 del op._bookmarksbackup
286 del op._bookmarksbackup
278
287
279 wrappedcghandler.params = origcghandler.params
288 wrappedcghandler.params = origcghandler.params
280 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
289 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,2698 +1,2700 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchangev2,
29 exchangev2,
30 lock as lockmod,
30 lock as lockmod,
31 logexchange,
31 logexchange,
32 narrowspec,
32 narrowspec,
33 obsolete,
33 obsolete,
34 phases,
34 phases,
35 pushkey,
35 pushkey,
36 pycompat,
36 pycompat,
37 repository,
37 repository,
38 scmutil,
38 scmutil,
39 sslutil,
39 sslutil,
40 streamclone,
40 streamclone,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 wireprototypes,
43 wireprototypes,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 urlerr = util.urlerr
49 urlerr = util.urlerr
50 urlreq = util.urlreq
50 urlreq = util.urlreq
51
51
52 _NARROWACL_SECTION = 'narrowhgacl'
52 _NARROWACL_SECTION = 'narrowhgacl'
53
53
54 # Maps bundle version human names to changegroup versions.
54 # Maps bundle version human names to changegroup versions.
55 _bundlespeccgversions = {'v1': '01',
55 _bundlespeccgversions = {'v1': '01',
56 'v2': '02',
56 'v2': '02',
57 'packed1': 's1',
57 'packed1': 's1',
58 'bundle2': '02', #legacy
58 'bundle2': '02', #legacy
59 }
59 }
60
60
61 # Maps bundle version with content opts to choose which part to bundle
61 # Maps bundle version with content opts to choose which part to bundle
62 _bundlespeccontentopts = {
62 _bundlespeccontentopts = {
63 'v1': {
63 'v1': {
64 'changegroup': True,
64 'changegroup': True,
65 'cg.version': '01',
65 'cg.version': '01',
66 'obsolescence': False,
66 'obsolescence': False,
67 'phases': False,
67 'phases': False,
68 'tagsfnodescache': False,
68 'tagsfnodescache': False,
69 'revbranchcache': False
69 'revbranchcache': False
70 },
70 },
71 'v2': {
71 'v2': {
72 'changegroup': True,
72 'changegroup': True,
73 'cg.version': '02',
73 'cg.version': '02',
74 'obsolescence': False,
74 'obsolescence': False,
75 'phases': False,
75 'phases': False,
76 'tagsfnodescache': True,
76 'tagsfnodescache': True,
77 'revbranchcache': True
77 'revbranchcache': True
78 },
78 },
79 'packed1' : {
79 'packed1' : {
80 'cg.version': 's1'
80 'cg.version': 's1'
81 }
81 }
82 }
82 }
83 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
84
84
85 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
86 "tagsfnodescache": False,
86 "tagsfnodescache": False,
87 "revbranchcache": False}}
87 "revbranchcache": False}}
88
88
89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
91
91
92 @attr.s
92 @attr.s
93 class bundlespec(object):
93 class bundlespec(object):
94 compression = attr.ib()
94 compression = attr.ib()
95 wirecompression = attr.ib()
95 wirecompression = attr.ib()
96 version = attr.ib()
96 version = attr.ib()
97 wireversion = attr.ib()
97 wireversion = attr.ib()
98 params = attr.ib()
98 params = attr.ib()
99 contentopts = attr.ib()
99 contentopts = attr.ib()
100
100
101 def parsebundlespec(repo, spec, strict=True):
101 def parsebundlespec(repo, spec, strict=True):
102 """Parse a bundle string specification into parts.
102 """Parse a bundle string specification into parts.
103
103
104 Bundle specifications denote a well-defined bundle/exchange format.
104 Bundle specifications denote a well-defined bundle/exchange format.
105 The content of a given specification should not change over time in
105 The content of a given specification should not change over time in
106 order to ensure that bundles produced by a newer version of Mercurial are
106 order to ensure that bundles produced by a newer version of Mercurial are
107 readable from an older version.
107 readable from an older version.
108
108
109 The string currently has the form:
109 The string currently has the form:
110
110
111 <compression>-<type>[;<parameter0>[;<parameter1>]]
111 <compression>-<type>[;<parameter0>[;<parameter1>]]
112
112
113 Where <compression> is one of the supported compression formats
113 Where <compression> is one of the supported compression formats
114 and <type> is (currently) a version string. A ";" can follow the type and
114 and <type> is (currently) a version string. A ";" can follow the type and
115 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 all text afterwards is interpreted as URI encoded, ";" delimited key=value
116 pairs.
116 pairs.
117
117
118 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 If ``strict`` is True (the default) <compression> is required. Otherwise,
119 it is optional.
119 it is optional.
120
120
121 Returns a bundlespec object of (compression, version, parameters).
121 Returns a bundlespec object of (compression, version, parameters).
122 Compression will be ``None`` if not in strict mode and a compression isn't
122 Compression will be ``None`` if not in strict mode and a compression isn't
123 defined.
123 defined.
124
124
125 An ``InvalidBundleSpecification`` is raised when the specification is
125 An ``InvalidBundleSpecification`` is raised when the specification is
126 not syntactically well formed.
126 not syntactically well formed.
127
127
128 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 An ``UnsupportedBundleSpecification`` is raised when the compression or
129 bundle type/version is not recognized.
129 bundle type/version is not recognized.
130
130
131 Note: this function will likely eventually return a more complex data
131 Note: this function will likely eventually return a more complex data
132 structure, including bundle2 part information.
132 structure, including bundle2 part information.
133 """
133 """
134 def parseparams(s):
134 def parseparams(s):
135 if ';' not in s:
135 if ';' not in s:
136 return s, {}
136 return s, {}
137
137
138 params = {}
138 params = {}
139 version, paramstr = s.split(';', 1)
139 version, paramstr = s.split(';', 1)
140
140
141 for p in paramstr.split(';'):
141 for p in paramstr.split(';'):
142 if '=' not in p:
142 if '=' not in p:
143 raise error.InvalidBundleSpecification(
143 raise error.InvalidBundleSpecification(
144 _('invalid bundle specification: '
144 _('invalid bundle specification: '
145 'missing "=" in parameter: %s') % p)
145 'missing "=" in parameter: %s') % p)
146
146
147 key, value = p.split('=', 1)
147 key, value = p.split('=', 1)
148 key = urlreq.unquote(key)
148 key = urlreq.unquote(key)
149 value = urlreq.unquote(value)
149 value = urlreq.unquote(value)
150 params[key] = value
150 params[key] = value
151
151
152 return version, params
152 return version, params
153
153
154
154
155 if strict and '-' not in spec:
155 if strict and '-' not in spec:
156 raise error.InvalidBundleSpecification(
156 raise error.InvalidBundleSpecification(
157 _('invalid bundle specification; '
157 _('invalid bundle specification; '
158 'must be prefixed with compression: %s') % spec)
158 'must be prefixed with compression: %s') % spec)
159
159
160 if '-' in spec:
160 if '-' in spec:
161 compression, version = spec.split('-', 1)
161 compression, version = spec.split('-', 1)
162
162
163 if compression not in util.compengines.supportedbundlenames:
163 if compression not in util.compengines.supportedbundlenames:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('%s compression is not supported') % compression)
165 _('%s compression is not supported') % compression)
166
166
167 version, params = parseparams(version)
167 version, params = parseparams(version)
168
168
169 if version not in _bundlespeccgversions:
169 if version not in _bundlespeccgversions:
170 raise error.UnsupportedBundleSpecification(
170 raise error.UnsupportedBundleSpecification(
171 _('%s is not a recognized bundle version') % version)
171 _('%s is not a recognized bundle version') % version)
172 else:
172 else:
173 # Value could be just the compression or just the version, in which
173 # Value could be just the compression or just the version, in which
174 # case some defaults are assumed (but only when not in strict mode).
174 # case some defaults are assumed (but only when not in strict mode).
175 assert not strict
175 assert not strict
176
176
177 spec, params = parseparams(spec)
177 spec, params = parseparams(spec)
178
178
179 if spec in util.compengines.supportedbundlenames:
179 if spec in util.compengines.supportedbundlenames:
180 compression = spec
180 compression = spec
181 version = 'v1'
181 version = 'v1'
182 # Generaldelta repos require v2.
182 # Generaldelta repos require v2.
183 if 'generaldelta' in repo.requirements:
183 if 'generaldelta' in repo.requirements:
184 version = 'v2'
184 version = 'v2'
185 # Modern compression engines require v2.
185 # Modern compression engines require v2.
186 if compression not in _bundlespecv1compengines:
186 if compression not in _bundlespecv1compengines:
187 version = 'v2'
187 version = 'v2'
188 elif spec in _bundlespeccgversions:
188 elif spec in _bundlespeccgversions:
189 if spec == 'packed1':
189 if spec == 'packed1':
190 compression = 'none'
190 compression = 'none'
191 else:
191 else:
192 compression = 'bzip2'
192 compression = 'bzip2'
193 version = spec
193 version = spec
194 else:
194 else:
195 raise error.UnsupportedBundleSpecification(
195 raise error.UnsupportedBundleSpecification(
196 _('%s is not a recognized bundle specification') % spec)
196 _('%s is not a recognized bundle specification') % spec)
197
197
198 # Bundle version 1 only supports a known set of compression engines.
198 # Bundle version 1 only supports a known set of compression engines.
199 if version == 'v1' and compression not in _bundlespecv1compengines:
199 if version == 'v1' and compression not in _bundlespecv1compengines:
200 raise error.UnsupportedBundleSpecification(
200 raise error.UnsupportedBundleSpecification(
201 _('compression engine %s is not supported on v1 bundles') %
201 _('compression engine %s is not supported on v1 bundles') %
202 compression)
202 compression)
203
203
204 # The specification for packed1 can optionally declare the data formats
204 # The specification for packed1 can optionally declare the data formats
205 # required to apply it. If we see this metadata, compare against what the
205 # required to apply it. If we see this metadata, compare against what the
206 # repo supports and error if the bundle isn't compatible.
206 # repo supports and error if the bundle isn't compatible.
207 if version == 'packed1' and 'requirements' in params:
207 if version == 'packed1' and 'requirements' in params:
208 requirements = set(params['requirements'].split(','))
208 requirements = set(params['requirements'].split(','))
209 missingreqs = requirements - repo.supportedformats
209 missingreqs = requirements - repo.supportedformats
210 if missingreqs:
210 if missingreqs:
211 raise error.UnsupportedBundleSpecification(
211 raise error.UnsupportedBundleSpecification(
212 _('missing support for repository features: %s') %
212 _('missing support for repository features: %s') %
213 ', '.join(sorted(missingreqs)))
213 ', '.join(sorted(missingreqs)))
214
214
215 # Compute contentopts based on the version
215 # Compute contentopts based on the version
216 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216 contentopts = _bundlespeccontentopts.get(version, {}).copy()
217
217
218 # Process the variants
218 # Process the variants
219 if "stream" in params and params["stream"] == "v2":
219 if "stream" in params and params["stream"] == "v2":
220 variant = _bundlespecvariants["streamv2"]
220 variant = _bundlespecvariants["streamv2"]
221 contentopts.update(variant)
221 contentopts.update(variant)
222
222
223 engine = util.compengines.forbundlename(compression)
223 engine = util.compengines.forbundlename(compression)
224 compression, wirecompression = engine.bundletype()
224 compression, wirecompression = engine.bundletype()
225 wireversion = _bundlespeccgversions[version]
225 wireversion = _bundlespeccgversions[version]
226
226
227 return bundlespec(compression, wirecompression, version, wireversion,
227 return bundlespec(compression, wirecompression, version, wireversion,
228 params, contentopts)
228 params, contentopts)
229
229
230 def readbundle(ui, fh, fname, vfs=None):
230 def readbundle(ui, fh, fname, vfs=None):
231 header = changegroup.readexactly(fh, 4)
231 header = changegroup.readexactly(fh, 4)
232
232
233 alg = None
233 alg = None
234 if not fname:
234 if not fname:
235 fname = "stream"
235 fname = "stream"
236 if not header.startswith('HG') and header.startswith('\0'):
236 if not header.startswith('HG') and header.startswith('\0'):
237 fh = changegroup.headerlessfixup(fh, header)
237 fh = changegroup.headerlessfixup(fh, header)
238 header = "HG10"
238 header = "HG10"
239 alg = 'UN'
239 alg = 'UN'
240 elif vfs:
240 elif vfs:
241 fname = vfs.join(fname)
241 fname = vfs.join(fname)
242
242
243 magic, version = header[0:2], header[2:4]
243 magic, version = header[0:2], header[2:4]
244
244
245 if magic != 'HG':
245 if magic != 'HG':
246 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
247 if version == '10':
247 if version == '10':
248 if alg is None:
248 if alg is None:
249 alg = changegroup.readexactly(fh, 2)
249 alg = changegroup.readexactly(fh, 2)
250 return changegroup.cg1unpacker(fh, alg)
250 return changegroup.cg1unpacker(fh, alg)
251 elif version.startswith('2'):
251 elif version.startswith('2'):
252 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
253 elif version == 'S1':
253 elif version == 'S1':
254 return streamclone.streamcloneapplier(fh)
254 return streamclone.streamcloneapplier(fh)
255 else:
255 else:
256 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
257
257
258 def getbundlespec(ui, fh):
258 def getbundlespec(ui, fh):
259 """Infer the bundlespec from a bundle file handle.
259 """Infer the bundlespec from a bundle file handle.
260
260
261 The input file handle is seeked and the original seek position is not
261 The input file handle is seeked and the original seek position is not
262 restored.
262 restored.
263 """
263 """
264 def speccompression(alg):
264 def speccompression(alg):
265 try:
265 try:
266 return util.compengines.forbundletype(alg).bundletype()[0]
266 return util.compengines.forbundletype(alg).bundletype()[0]
267 except KeyError:
267 except KeyError:
268 return None
268 return None
269
269
270 b = readbundle(ui, fh, None)
270 b = readbundle(ui, fh, None)
271 if isinstance(b, changegroup.cg1unpacker):
271 if isinstance(b, changegroup.cg1unpacker):
272 alg = b._type
272 alg = b._type
273 if alg == '_truncatedBZ':
273 if alg == '_truncatedBZ':
274 alg = 'BZ'
274 alg = 'BZ'
275 comp = speccompression(alg)
275 comp = speccompression(alg)
276 if not comp:
276 if not comp:
277 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 raise error.Abort(_('unknown compression algorithm: %s') % alg)
278 return '%s-v1' % comp
278 return '%s-v1' % comp
279 elif isinstance(b, bundle2.unbundle20):
279 elif isinstance(b, bundle2.unbundle20):
280 if 'Compression' in b.params:
280 if 'Compression' in b.params:
281 comp = speccompression(b.params['Compression'])
281 comp = speccompression(b.params['Compression'])
282 if not comp:
282 if not comp:
283 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 raise error.Abort(_('unknown compression algorithm: %s') % comp)
284 else:
284 else:
285 comp = 'none'
285 comp = 'none'
286
286
287 version = None
287 version = None
288 for part in b.iterparts():
288 for part in b.iterparts():
289 if part.type == 'changegroup':
289 if part.type == 'changegroup':
290 version = part.params['version']
290 version = part.params['version']
291 if version in ('01', '02'):
291 if version in ('01', '02'):
292 version = 'v2'
292 version = 'v2'
293 else:
293 else:
294 raise error.Abort(_('changegroup version %s does not have '
294 raise error.Abort(_('changegroup version %s does not have '
295 'a known bundlespec') % version,
295 'a known bundlespec') % version,
296 hint=_('try upgrading your Mercurial '
296 hint=_('try upgrading your Mercurial '
297 'client'))
297 'client'))
298 elif part.type == 'stream2' and version is None:
298 elif part.type == 'stream2' and version is None:
299 # A stream2 part requires to be part of a v2 bundle
299 # A stream2 part requires to be part of a v2 bundle
300 requirements = urlreq.unquote(part.params['requirements'])
300 requirements = urlreq.unquote(part.params['requirements'])
301 splitted = requirements.split()
301 splitted = requirements.split()
302 params = bundle2._formatrequirementsparams(splitted)
302 params = bundle2._formatrequirementsparams(splitted)
303 return 'none-v2;stream=v2;%s' % params
303 return 'none-v2;stream=v2;%s' % params
304
304
305 if not version:
305 if not version:
306 raise error.Abort(_('could not identify changegroup version in '
306 raise error.Abort(_('could not identify changegroup version in '
307 'bundle'))
307 'bundle'))
308
308
309 return '%s-%s' % (comp, version)
309 return '%s-%s' % (comp, version)
310 elif isinstance(b, streamclone.streamcloneapplier):
310 elif isinstance(b, streamclone.streamcloneapplier):
311 requirements = streamclone.readbundle1header(fh)[2]
311 requirements = streamclone.readbundle1header(fh)[2]
312 formatted = bundle2._formatrequirementsparams(requirements)
312 formatted = bundle2._formatrequirementsparams(requirements)
313 return 'none-packed1;%s' % formatted
313 return 'none-packed1;%s' % formatted
314 else:
314 else:
315 raise error.Abort(_('unknown bundle type: %s') % b)
315 raise error.Abort(_('unknown bundle type: %s') % b)
316
316
317 def _computeoutgoing(repo, heads, common):
317 def _computeoutgoing(repo, heads, common):
318 """Computes which revs are outgoing given a set of common
318 """Computes which revs are outgoing given a set of common
319 and a set of heads.
319 and a set of heads.
320
320
321 This is a separate function so extensions can have access to
321 This is a separate function so extensions can have access to
322 the logic.
322 the logic.
323
323
324 Returns a discovery.outgoing object.
324 Returns a discovery.outgoing object.
325 """
325 """
326 cl = repo.changelog
326 cl = repo.changelog
327 if common:
327 if common:
328 hasnode = cl.hasnode
328 hasnode = cl.hasnode
329 common = [n for n in common if hasnode(n)]
329 common = [n for n in common if hasnode(n)]
330 else:
330 else:
331 common = [nullid]
331 common = [nullid]
332 if not heads:
332 if not heads:
333 heads = cl.heads()
333 heads = cl.heads()
334 return discovery.outgoing(repo, common, heads)
334 return discovery.outgoing(repo, common, heads)
335
335
336 def _checkpublish(pushop):
336 def _checkpublish(pushop):
337 repo = pushop.repo
337 repo = pushop.repo
338 ui = repo.ui
338 ui = repo.ui
339 behavior = ui.config('experimental', 'auto-publish')
339 behavior = ui.config('experimental', 'auto-publish')
340 if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
340 if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
341 return
341 return
342 remotephases = listkeys(pushop.remote, 'phases')
342 remotephases = listkeys(pushop.remote, 'phases')
343 if not remotephases.get('publishing', False):
343 if not remotephases.get('publishing', False):
344 return
344 return
345
345
346 if pushop.revs is None:
346 if pushop.revs is None:
347 published = repo.filtered('served').revs('not public()')
347 published = repo.filtered('served').revs('not public()')
348 else:
348 else:
349 published = repo.revs('::%ln - public()', pushop.revs)
349 published = repo.revs('::%ln - public()', pushop.revs)
350 if published:
350 if published:
351 if behavior == 'warn':
351 if behavior == 'warn':
352 ui.warn(_('%i changesets about to be published\n')
352 ui.warn(_('%i changesets about to be published\n')
353 % len(published))
353 % len(published))
354 elif behavior == 'confirm':
354 elif behavior == 'confirm':
355 if ui.promptchoice(_('push and publish %i changesets (yn)?'
355 if ui.promptchoice(_('push and publish %i changesets (yn)?'
356 '$$ &Yes $$ &No') % len(published)):
356 '$$ &Yes $$ &No') % len(published)):
357 raise error.Abort(_('user quit'))
357 raise error.Abort(_('user quit'))
358 elif behavior == 'abort':
358 elif behavior == 'abort':
359 msg = _('push would publish %i changesets') % len(published)
359 msg = _('push would publish %i changesets') % len(published)
360 hint = _("use --publish or adjust 'experimental.auto-publish'"
360 hint = _("use --publish or adjust 'experimental.auto-publish'"
361 " config")
361 " config")
362 raise error.Abort(msg, hint=hint)
362 raise error.Abort(msg, hint=hint)
363
363
364 def _forcebundle1(op):
364 def _forcebundle1(op):
365 """return true if a pull/push must use bundle1
365 """return true if a pull/push must use bundle1
366
366
367 This function is used to allow testing of the older bundle version"""
367 This function is used to allow testing of the older bundle version"""
368 ui = op.repo.ui
368 ui = op.repo.ui
369 # The goal is this config is to allow developer to choose the bundle
369 # The goal is this config is to allow developer to choose the bundle
370 # version used during exchanged. This is especially handy during test.
370 # version used during exchanged. This is especially handy during test.
371 # Value is a list of bundle version to be picked from, highest version
371 # Value is a list of bundle version to be picked from, highest version
372 # should be used.
372 # should be used.
373 #
373 #
374 # developer config: devel.legacy.exchange
374 # developer config: devel.legacy.exchange
375 exchange = ui.configlist('devel', 'legacy.exchange')
375 exchange = ui.configlist('devel', 'legacy.exchange')
376 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
376 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
377 return forcebundle1 or not op.remote.capable('bundle2')
377 return forcebundle1 or not op.remote.capable('bundle2')
378
378
379 class pushoperation(object):
379 class pushoperation(object):
380 """A object that represent a single push operation
380 """A object that represent a single push operation
381
381
382 Its purpose is to carry push related state and very common operations.
382 Its purpose is to carry push related state and very common operations.
383
383
384 A new pushoperation should be created at the beginning of each push and
384 A new pushoperation should be created at the beginning of each push and
385 discarded afterward.
385 discarded afterward.
386 """
386 """
387
387
388 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
388 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
389 bookmarks=(), publish=False, pushvars=None):
389 bookmarks=(), publish=False, pushvars=None):
390 # repo we push from
390 # repo we push from
391 self.repo = repo
391 self.repo = repo
392 self.ui = repo.ui
392 self.ui = repo.ui
393 # repo we push to
393 # repo we push to
394 self.remote = remote
394 self.remote = remote
395 # force option provided
395 # force option provided
396 self.force = force
396 self.force = force
397 # revs to be pushed (None is "all")
397 # revs to be pushed (None is "all")
398 self.revs = revs
398 self.revs = revs
399 # bookmark explicitly pushed
399 # bookmark explicitly pushed
400 self.bookmarks = bookmarks
400 self.bookmarks = bookmarks
401 # allow push of new branch
401 # allow push of new branch
402 self.newbranch = newbranch
402 self.newbranch = newbranch
403 # step already performed
403 # step already performed
404 # (used to check what steps have been already performed through bundle2)
404 # (used to check what steps have been already performed through bundle2)
405 self.stepsdone = set()
405 self.stepsdone = set()
406 # Integer version of the changegroup push result
406 # Integer version of the changegroup push result
407 # - None means nothing to push
407 # - None means nothing to push
408 # - 0 means HTTP error
408 # - 0 means HTTP error
409 # - 1 means we pushed and remote head count is unchanged *or*
409 # - 1 means we pushed and remote head count is unchanged *or*
410 # we have outgoing changesets but refused to push
410 # we have outgoing changesets but refused to push
411 # - other values as described by addchangegroup()
411 # - other values as described by addchangegroup()
412 self.cgresult = None
412 self.cgresult = None
413 # Boolean value for the bookmark push
413 # Boolean value for the bookmark push
414 self.bkresult = None
414 self.bkresult = None
415 # discover.outgoing object (contains common and outgoing data)
415 # discover.outgoing object (contains common and outgoing data)
416 self.outgoing = None
416 self.outgoing = None
417 # all remote topological heads before the push
417 # all remote topological heads before the push
418 self.remoteheads = None
418 self.remoteheads = None
419 # Details of the remote branch pre and post push
419 # Details of the remote branch pre and post push
420 #
420 #
421 # mapping: {'branch': ([remoteheads],
421 # mapping: {'branch': ([remoteheads],
422 # [newheads],
422 # [newheads],
423 # [unsyncedheads],
423 # [unsyncedheads],
424 # [discardedheads])}
424 # [discardedheads])}
425 # - branch: the branch name
425 # - branch: the branch name
426 # - remoteheads: the list of remote heads known locally
426 # - remoteheads: the list of remote heads known locally
427 # None if the branch is new
427 # None if the branch is new
428 # - newheads: the new remote heads (known locally) with outgoing pushed
428 # - newheads: the new remote heads (known locally) with outgoing pushed
429 # - unsyncedheads: the list of remote heads unknown locally.
429 # - unsyncedheads: the list of remote heads unknown locally.
430 # - discardedheads: the list of remote heads made obsolete by the push
430 # - discardedheads: the list of remote heads made obsolete by the push
431 self.pushbranchmap = None
431 self.pushbranchmap = None
432 # testable as a boolean indicating if any nodes are missing locally.
432 # testable as a boolean indicating if any nodes are missing locally.
433 self.incoming = None
433 self.incoming = None
434 # summary of the remote phase situation
434 # summary of the remote phase situation
435 self.remotephases = None
435 self.remotephases = None
436 # phases changes that must be pushed along side the changesets
436 # phases changes that must be pushed along side the changesets
437 self.outdatedphases = None
437 self.outdatedphases = None
438 # phases changes that must be pushed if changeset push fails
438 # phases changes that must be pushed if changeset push fails
439 self.fallbackoutdatedphases = None
439 self.fallbackoutdatedphases = None
440 # outgoing obsmarkers
440 # outgoing obsmarkers
441 self.outobsmarkers = set()
441 self.outobsmarkers = set()
442 # outgoing bookmarks
442 # outgoing bookmarks
443 self.outbookmarks = []
443 self.outbookmarks = []
444 # transaction manager
444 # transaction manager
445 self.trmanager = None
445 self.trmanager = None
446 # map { pushkey partid -> callback handling failure}
446 # map { pushkey partid -> callback handling failure}
447 # used to handle exception from mandatory pushkey part failure
447 # used to handle exception from mandatory pushkey part failure
448 self.pkfailcb = {}
448 self.pkfailcb = {}
449 # an iterable of pushvars or None
449 # an iterable of pushvars or None
450 self.pushvars = pushvars
450 self.pushvars = pushvars
451 # publish pushed changesets
451 # publish pushed changesets
452 self.publish = publish
452 self.publish = publish
453
453
454 @util.propertycache
454 @util.propertycache
455 def futureheads(self):
455 def futureheads(self):
456 """future remote heads if the changeset push succeeds"""
456 """future remote heads if the changeset push succeeds"""
457 return self.outgoing.missingheads
457 return self.outgoing.missingheads
458
458
459 @util.propertycache
459 @util.propertycache
460 def fallbackheads(self):
460 def fallbackheads(self):
461 """future remote heads if the changeset push fails"""
461 """future remote heads if the changeset push fails"""
462 if self.revs is None:
462 if self.revs is None:
463 # not target to push, all common are relevant
463 # not target to push, all common are relevant
464 return self.outgoing.commonheads
464 return self.outgoing.commonheads
465 unfi = self.repo.unfiltered()
465 unfi = self.repo.unfiltered()
466 # I want cheads = heads(::missingheads and ::commonheads)
466 # I want cheads = heads(::missingheads and ::commonheads)
467 # (missingheads is revs with secret changeset filtered out)
467 # (missingheads is revs with secret changeset filtered out)
468 #
468 #
469 # This can be expressed as:
469 # This can be expressed as:
470 # cheads = ( (missingheads and ::commonheads)
470 # cheads = ( (missingheads and ::commonheads)
471 # + (commonheads and ::missingheads))"
471 # + (commonheads and ::missingheads))"
472 # )
472 # )
473 #
473 #
474 # while trying to push we already computed the following:
474 # while trying to push we already computed the following:
475 # common = (::commonheads)
475 # common = (::commonheads)
476 # missing = ((commonheads::missingheads) - commonheads)
476 # missing = ((commonheads::missingheads) - commonheads)
477 #
477 #
478 # We can pick:
478 # We can pick:
479 # * missingheads part of common (::commonheads)
479 # * missingheads part of common (::commonheads)
480 common = self.outgoing.common
480 common = self.outgoing.common
481 nm = self.repo.changelog.nodemap
481 nm = self.repo.changelog.nodemap
482 cheads = [node for node in self.revs if nm[node] in common]
482 cheads = [node for node in self.revs if nm[node] in common]
483 # and
483 # and
484 # * commonheads parents on missing
484 # * commonheads parents on missing
485 revset = unfi.set('%ln and parents(roots(%ln))',
485 revset = unfi.set('%ln and parents(roots(%ln))',
486 self.outgoing.commonheads,
486 self.outgoing.commonheads,
487 self.outgoing.missing)
487 self.outgoing.missing)
488 cheads.extend(c.node() for c in revset)
488 cheads.extend(c.node() for c in revset)
489 return cheads
489 return cheads
490
490
491 @property
491 @property
492 def commonheads(self):
492 def commonheads(self):
493 """set of all common heads after changeset bundle push"""
493 """set of all common heads after changeset bundle push"""
494 if self.cgresult:
494 if self.cgresult:
495 return self.futureheads
495 return self.futureheads
496 else:
496 else:
497 return self.fallbackheads
497 return self.fallbackheads
498
498
499 # mapping of message used when pushing bookmark
499 # mapping of message used when pushing bookmark
500 bookmsgmap = {'update': (_("updating bookmark %s\n"),
500 bookmsgmap = {'update': (_("updating bookmark %s\n"),
501 _('updating bookmark %s failed!\n')),
501 _('updating bookmark %s failed!\n')),
502 'export': (_("exporting bookmark %s\n"),
502 'export': (_("exporting bookmark %s\n"),
503 _('exporting bookmark %s failed!\n')),
503 _('exporting bookmark %s failed!\n')),
504 'delete': (_("deleting remote bookmark %s\n"),
504 'delete': (_("deleting remote bookmark %s\n"),
505 _('deleting remote bookmark %s failed!\n')),
505 _('deleting remote bookmark %s failed!\n')),
506 }
506 }
507
507
508
508
509 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
509 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
510 publish=False, opargs=None):
510 publish=False, opargs=None):
511 '''Push outgoing changesets (limited by revs) from a local
511 '''Push outgoing changesets (limited by revs) from a local
512 repository to remote. Return an integer:
512 repository to remote. Return an integer:
513 - None means nothing to push
513 - None means nothing to push
514 - 0 means HTTP error
514 - 0 means HTTP error
515 - 1 means we pushed and remote head count is unchanged *or*
515 - 1 means we pushed and remote head count is unchanged *or*
516 we have outgoing changesets but refused to push
516 we have outgoing changesets but refused to push
517 - other values as described by addchangegroup()
517 - other values as described by addchangegroup()
518 '''
518 '''
519 if opargs is None:
519 if opargs is None:
520 opargs = {}
520 opargs = {}
521 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
521 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
522 publish, **pycompat.strkwargs(opargs))
522 publish, **pycompat.strkwargs(opargs))
523 if pushop.remote.local():
523 if pushop.remote.local():
524 missing = (set(pushop.repo.requirements)
524 missing = (set(pushop.repo.requirements)
525 - pushop.remote.local().supported)
525 - pushop.remote.local().supported)
526 if missing:
526 if missing:
527 msg = _("required features are not"
527 msg = _("required features are not"
528 " supported in the destination:"
528 " supported in the destination:"
529 " %s") % (', '.join(sorted(missing)))
529 " %s") % (', '.join(sorted(missing)))
530 raise error.Abort(msg)
530 raise error.Abort(msg)
531
531
532 if not pushop.remote.canpush():
532 if not pushop.remote.canpush():
533 raise error.Abort(_("destination does not support push"))
533 raise error.Abort(_("destination does not support push"))
534
534
535 if not pushop.remote.capable('unbundle'):
535 if not pushop.remote.capable('unbundle'):
536 raise error.Abort(_('cannot push: destination does not support the '
536 raise error.Abort(_('cannot push: destination does not support the '
537 'unbundle wire protocol command'))
537 'unbundle wire protocol command'))
538
538
539 # get lock as we might write phase data
539 # get lock as we might write phase data
540 wlock = lock = None
540 wlock = lock = None
541 try:
541 try:
542 # bundle2 push may receive a reply bundle touching bookmarks or other
542 # bundle2 push may receive a reply bundle touching bookmarks or other
543 # things requiring the wlock. Take it now to ensure proper ordering.
543 # things requiring the wlock. Take it now to ensure proper ordering.
544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
544 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
545 if (not _forcebundle1(pushop)) and maypushback:
545 if (not _forcebundle1(pushop)) and maypushback:
546 wlock = pushop.repo.wlock()
546 wlock = pushop.repo.wlock()
547 lock = pushop.repo.lock()
547 lock = pushop.repo.lock()
548 pushop.trmanager = transactionmanager(pushop.repo,
548 pushop.trmanager = transactionmanager(pushop.repo,
549 'push-response',
549 'push-response',
550 pushop.remote.url())
550 pushop.remote.url())
551 except error.LockUnavailable as err:
551 except error.LockUnavailable as err:
552 # source repo cannot be locked.
552 # source repo cannot be locked.
553 # We do not abort the push, but just disable the local phase
553 # We do not abort the push, but just disable the local phase
554 # synchronisation.
554 # synchronisation.
555 msg = ('cannot lock source repository: %s\n'
555 msg = ('cannot lock source repository: %s\n'
556 % stringutil.forcebytestr(err))
556 % stringutil.forcebytestr(err))
557 pushop.ui.debug(msg)
557 pushop.ui.debug(msg)
558
558
559 with wlock or util.nullcontextmanager():
559 with wlock or util.nullcontextmanager():
560 with lock or util.nullcontextmanager():
560 with lock or util.nullcontextmanager():
561 with pushop.trmanager or util.nullcontextmanager():
561 with pushop.trmanager or util.nullcontextmanager():
562 pushop.repo.checkpush(pushop)
562 pushop.repo.checkpush(pushop)
563 _checkpublish(pushop)
563 _checkpublish(pushop)
564 _pushdiscovery(pushop)
564 _pushdiscovery(pushop)
565 if not _forcebundle1(pushop):
565 if not _forcebundle1(pushop):
566 _pushbundle2(pushop)
566 _pushbundle2(pushop)
567 _pushchangeset(pushop)
567 _pushchangeset(pushop)
568 _pushsyncphase(pushop)
568 _pushsyncphase(pushop)
569 _pushobsolete(pushop)
569 _pushobsolete(pushop)
570 _pushbookmark(pushop)
570 _pushbookmark(pushop)
571
571
572 if repo.ui.configbool('experimental', 'remotenames'):
572 if repo.ui.configbool('experimental', 'remotenames'):
573 logexchange.pullremotenames(repo, remote)
573 logexchange.pullremotenames(repo, remote)
574
574
575 return pushop
575 return pushop
576
576
577 # list of steps to perform discovery before push
577 # list of steps to perform discovery before push
578 pushdiscoveryorder = []
578 pushdiscoveryorder = []
579
579
580 # Mapping between step name and function
580 # Mapping between step name and function
581 #
581 #
582 # This exists to help extensions wrap steps if necessary
582 # This exists to help extensions wrap steps if necessary
583 pushdiscoverymapping = {}
583 pushdiscoverymapping = {}
584
584
585 def pushdiscovery(stepname):
585 def pushdiscovery(stepname):
586 """decorator for function performing discovery before push
586 """decorator for function performing discovery before push
587
587
588 The function is added to the step -> function mapping and appended to the
588 The function is added to the step -> function mapping and appended to the
589 list of steps. Beware that decorated function will be added in order (this
589 list of steps. Beware that decorated function will be added in order (this
590 may matter).
590 may matter).
591
591
592 You can only use this decorator for a new step, if you want to wrap a step
592 You can only use this decorator for a new step, if you want to wrap a step
593 from an extension, change the pushdiscovery dictionary directly."""
593 from an extension, change the pushdiscovery dictionary directly."""
594 def dec(func):
594 def dec(func):
595 assert stepname not in pushdiscoverymapping
595 assert stepname not in pushdiscoverymapping
596 pushdiscoverymapping[stepname] = func
596 pushdiscoverymapping[stepname] = func
597 pushdiscoveryorder.append(stepname)
597 pushdiscoveryorder.append(stepname)
598 return func
598 return func
599 return dec
599 return dec
600
600
601 def _pushdiscovery(pushop):
601 def _pushdiscovery(pushop):
602 """Run all discovery steps"""
602 """Run all discovery steps"""
603 for stepname in pushdiscoveryorder:
603 for stepname in pushdiscoveryorder:
604 step = pushdiscoverymapping[stepname]
604 step = pushdiscoverymapping[stepname]
605 step(pushop)
605 step(pushop)
606
606
607 @pushdiscovery('changeset')
607 @pushdiscovery('changeset')
608 def _pushdiscoverychangeset(pushop):
608 def _pushdiscoverychangeset(pushop):
609 """discover the changeset that need to be pushed"""
609 """discover the changeset that need to be pushed"""
610 fci = discovery.findcommonincoming
610 fci = discovery.findcommonincoming
611 if pushop.revs:
611 if pushop.revs:
612 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
612 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
613 ancestorsof=pushop.revs)
613 ancestorsof=pushop.revs)
614 else:
614 else:
615 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
615 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
616 common, inc, remoteheads = commoninc
616 common, inc, remoteheads = commoninc
617 fco = discovery.findcommonoutgoing
617 fco = discovery.findcommonoutgoing
618 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
618 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
619 commoninc=commoninc, force=pushop.force)
619 commoninc=commoninc, force=pushop.force)
620 pushop.outgoing = outgoing
620 pushop.outgoing = outgoing
621 pushop.remoteheads = remoteheads
621 pushop.remoteheads = remoteheads
622 pushop.incoming = inc
622 pushop.incoming = inc
623
623
624 @pushdiscovery('phase')
624 @pushdiscovery('phase')
625 def _pushdiscoveryphase(pushop):
625 def _pushdiscoveryphase(pushop):
626 """discover the phase that needs to be pushed
626 """discover the phase that needs to be pushed
627
627
628 (computed for both success and failure case for changesets push)"""
628 (computed for both success and failure case for changesets push)"""
629 outgoing = pushop.outgoing
629 outgoing = pushop.outgoing
630 unfi = pushop.repo.unfiltered()
630 unfi = pushop.repo.unfiltered()
631 remotephases = listkeys(pushop.remote, 'phases')
631 remotephases = listkeys(pushop.remote, 'phases')
632
632
633 if (pushop.ui.configbool('ui', '_usedassubrepo')
633 if (pushop.ui.configbool('ui', '_usedassubrepo')
634 and remotephases # server supports phases
634 and remotephases # server supports phases
635 and not pushop.outgoing.missing # no changesets to be pushed
635 and not pushop.outgoing.missing # no changesets to be pushed
636 and remotephases.get('publishing', False)):
636 and remotephases.get('publishing', False)):
637 # When:
637 # When:
638 # - this is a subrepo push
638 # - this is a subrepo push
639 # - and remote support phase
639 # - and remote support phase
640 # - and no changeset are to be pushed
640 # - and no changeset are to be pushed
641 # - and remote is publishing
641 # - and remote is publishing
642 # We may be in issue 3781 case!
642 # We may be in issue 3781 case!
643 # We drop the possible phase synchronisation done by
643 # We drop the possible phase synchronisation done by
644 # courtesy to publish changesets possibly locally draft
644 # courtesy to publish changesets possibly locally draft
645 # on the remote.
645 # on the remote.
646 pushop.outdatedphases = []
646 pushop.outdatedphases = []
647 pushop.fallbackoutdatedphases = []
647 pushop.fallbackoutdatedphases = []
648 return
648 return
649
649
650 pushop.remotephases = phases.remotephasessummary(pushop.repo,
650 pushop.remotephases = phases.remotephasessummary(pushop.repo,
651 pushop.fallbackheads,
651 pushop.fallbackheads,
652 remotephases)
652 remotephases)
653 droots = pushop.remotephases.draftroots
653 droots = pushop.remotephases.draftroots
654
654
655 extracond = ''
655 extracond = ''
656 if not pushop.remotephases.publishing:
656 if not pushop.remotephases.publishing:
657 extracond = ' and public()'
657 extracond = ' and public()'
658 revset = 'heads((%%ln::%%ln) %s)' % extracond
658 revset = 'heads((%%ln::%%ln) %s)' % extracond
659 # Get the list of all revs draft on remote by public here.
659 # Get the list of all revs draft on remote by public here.
660 # XXX Beware that revset break if droots is not strictly
660 # XXX Beware that revset break if droots is not strictly
661 # XXX root we may want to ensure it is but it is costly
661 # XXX root we may want to ensure it is but it is costly
662 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
662 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
663 if not pushop.remotephases.publishing and pushop.publish:
663 if not pushop.remotephases.publishing and pushop.publish:
664 future = list(unfi.set('%ln and (not public() or %ln::)',
664 future = list(unfi.set('%ln and (not public() or %ln::)',
665 pushop.futureheads, droots))
665 pushop.futureheads, droots))
666 elif not outgoing.missing:
666 elif not outgoing.missing:
667 future = fallback
667 future = fallback
668 else:
668 else:
669 # adds changeset we are going to push as draft
669 # adds changeset we are going to push as draft
670 #
670 #
671 # should not be necessary for publishing server, but because of an
671 # should not be necessary for publishing server, but because of an
672 # issue fixed in xxxxx we have to do it anyway.
672 # issue fixed in xxxxx we have to do it anyway.
673 fdroots = list(unfi.set('roots(%ln + %ln::)',
673 fdroots = list(unfi.set('roots(%ln + %ln::)',
674 outgoing.missing, droots))
674 outgoing.missing, droots))
675 fdroots = [f.node() for f in fdroots]
675 fdroots = [f.node() for f in fdroots]
676 future = list(unfi.set(revset, fdroots, pushop.futureheads))
676 future = list(unfi.set(revset, fdroots, pushop.futureheads))
677 pushop.outdatedphases = future
677 pushop.outdatedphases = future
678 pushop.fallbackoutdatedphases = fallback
678 pushop.fallbackoutdatedphases = fallback
679
679
680 @pushdiscovery('obsmarker')
680 @pushdiscovery('obsmarker')
681 def _pushdiscoveryobsmarkers(pushop):
681 def _pushdiscoveryobsmarkers(pushop):
682 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
682 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
683 return
683 return
684
684
685 if not pushop.repo.obsstore:
685 if not pushop.repo.obsstore:
686 return
686 return
687
687
688 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
688 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
689 return
689 return
690
690
691 repo = pushop.repo
691 repo = pushop.repo
692 # very naive computation, that can be quite expensive on big repo.
692 # very naive computation, that can be quite expensive on big repo.
693 # However: evolution is currently slow on them anyway.
693 # However: evolution is currently slow on them anyway.
694 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
694 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
695 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
695 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
696
696
697 @pushdiscovery('bookmarks')
697 @pushdiscovery('bookmarks')
698 def _pushdiscoverybookmarks(pushop):
698 def _pushdiscoverybookmarks(pushop):
699 ui = pushop.ui
699 ui = pushop.ui
700 repo = pushop.repo.unfiltered()
700 repo = pushop.repo.unfiltered()
701 remote = pushop.remote
701 remote = pushop.remote
702 ui.debug("checking for updated bookmarks\n")
702 ui.debug("checking for updated bookmarks\n")
703 ancestors = ()
703 ancestors = ()
704 if pushop.revs:
704 if pushop.revs:
705 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
705 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
706 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
706 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
707
707
708 remotebookmark = listkeys(remote, 'bookmarks')
708 remotebookmark = listkeys(remote, 'bookmarks')
709
709
710 explicit = {repo._bookmarks.expandname(bookmark)
710 explicit = {repo._bookmarks.expandname(bookmark)
711 for bookmark in pushop.bookmarks}
711 for bookmark in pushop.bookmarks}
712
712
713 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
713 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
714 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
714 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
715
715
716 def safehex(x):
716 def safehex(x):
717 if x is None:
717 if x is None:
718 return x
718 return x
719 return hex(x)
719 return hex(x)
720
720
721 def hexifycompbookmarks(bookmarks):
721 def hexifycompbookmarks(bookmarks):
722 return [(b, safehex(scid), safehex(dcid))
722 return [(b, safehex(scid), safehex(dcid))
723 for (b, scid, dcid) in bookmarks]
723 for (b, scid, dcid) in bookmarks]
724
724
725 comp = [hexifycompbookmarks(marks) for marks in comp]
725 comp = [hexifycompbookmarks(marks) for marks in comp]
726 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
726 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
727
727
728 def _processcompared(pushop, pushed, explicit, remotebms, comp):
728 def _processcompared(pushop, pushed, explicit, remotebms, comp):
729 """take decision on bookmark to pull from the remote bookmark
729 """take decision on bookmark to pull from the remote bookmark
730
730
731 Exist to help extensions who want to alter this behavior.
731 Exist to help extensions who want to alter this behavior.
732 """
732 """
733 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
733 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
734
734
735 repo = pushop.repo
735 repo = pushop.repo
736
736
737 for b, scid, dcid in advsrc:
737 for b, scid, dcid in advsrc:
738 if b in explicit:
738 if b in explicit:
739 explicit.remove(b)
739 explicit.remove(b)
740 if not pushed or repo[scid].rev() in pushed:
740 if not pushed or repo[scid].rev() in pushed:
741 pushop.outbookmarks.append((b, dcid, scid))
741 pushop.outbookmarks.append((b, dcid, scid))
742 # search added bookmark
742 # search added bookmark
743 for b, scid, dcid in addsrc:
743 for b, scid, dcid in addsrc:
744 if b in explicit:
744 if b in explicit:
745 explicit.remove(b)
745 explicit.remove(b)
746 pushop.outbookmarks.append((b, '', scid))
746 pushop.outbookmarks.append((b, '', scid))
747 # search for overwritten bookmark
747 # search for overwritten bookmark
748 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
748 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
749 if b in explicit:
749 if b in explicit:
750 explicit.remove(b)
750 explicit.remove(b)
751 pushop.outbookmarks.append((b, dcid, scid))
751 pushop.outbookmarks.append((b, dcid, scid))
752 # search for bookmark to delete
752 # search for bookmark to delete
753 for b, scid, dcid in adddst:
753 for b, scid, dcid in adddst:
754 if b in explicit:
754 if b in explicit:
755 explicit.remove(b)
755 explicit.remove(b)
756 # treat as "deleted locally"
756 # treat as "deleted locally"
757 pushop.outbookmarks.append((b, dcid, ''))
757 pushop.outbookmarks.append((b, dcid, ''))
758 # identical bookmarks shouldn't get reported
758 # identical bookmarks shouldn't get reported
759 for b, scid, dcid in same:
759 for b, scid, dcid in same:
760 if b in explicit:
760 if b in explicit:
761 explicit.remove(b)
761 explicit.remove(b)
762
762
763 if explicit:
763 if explicit:
764 explicit = sorted(explicit)
764 explicit = sorted(explicit)
765 # we should probably list all of them
765 # we should probably list all of them
766 pushop.ui.warn(_('bookmark %s does not exist on the local '
766 pushop.ui.warn(_('bookmark %s does not exist on the local '
767 'or remote repository!\n') % explicit[0])
767 'or remote repository!\n') % explicit[0])
768 pushop.bkresult = 2
768 pushop.bkresult = 2
769
769
770 pushop.outbookmarks.sort()
770 pushop.outbookmarks.sort()
771
771
772 def _pushcheckoutgoing(pushop):
772 def _pushcheckoutgoing(pushop):
773 outgoing = pushop.outgoing
773 outgoing = pushop.outgoing
774 unfi = pushop.repo.unfiltered()
774 unfi = pushop.repo.unfiltered()
775 if not outgoing.missing:
775 if not outgoing.missing:
776 # nothing to push
776 # nothing to push
777 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
777 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
778 return False
778 return False
779 # something to push
779 # something to push
780 if not pushop.force:
780 if not pushop.force:
781 # if repo.obsstore == False --> no obsolete
781 # if repo.obsstore == False --> no obsolete
782 # then, save the iteration
782 # then, save the iteration
783 if unfi.obsstore:
783 if unfi.obsstore:
784 # this message are here for 80 char limit reason
784 # this message are here for 80 char limit reason
785 mso = _("push includes obsolete changeset: %s!")
785 mso = _("push includes obsolete changeset: %s!")
786 mspd = _("push includes phase-divergent changeset: %s!")
786 mspd = _("push includes phase-divergent changeset: %s!")
787 mscd = _("push includes content-divergent changeset: %s!")
787 mscd = _("push includes content-divergent changeset: %s!")
788 mst = {"orphan": _("push includes orphan changeset: %s!"),
788 mst = {"orphan": _("push includes orphan changeset: %s!"),
789 "phase-divergent": mspd,
789 "phase-divergent": mspd,
790 "content-divergent": mscd}
790 "content-divergent": mscd}
791 # If we are to push if there is at least one
791 # If we are to push if there is at least one
792 # obsolete or unstable changeset in missing, at
792 # obsolete or unstable changeset in missing, at
793 # least one of the missinghead will be obsolete or
793 # least one of the missinghead will be obsolete or
794 # unstable. So checking heads only is ok
794 # unstable. So checking heads only is ok
795 for node in outgoing.missingheads:
795 for node in outgoing.missingheads:
796 ctx = unfi[node]
796 ctx = unfi[node]
797 if ctx.obsolete():
797 if ctx.obsolete():
798 raise error.Abort(mso % ctx)
798 raise error.Abort(mso % ctx)
799 elif ctx.isunstable():
799 elif ctx.isunstable():
800 # TODO print more than one instability in the abort
800 # TODO print more than one instability in the abort
801 # message
801 # message
802 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
802 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
803
803
804 discovery.checkheads(pushop)
804 discovery.checkheads(pushop)
805 return True
805 return True
806
806
807 # List of names of steps to perform for an outgoing bundle2, order matters.
807 # List of names of steps to perform for an outgoing bundle2, order matters.
808 b2partsgenorder = []
808 b2partsgenorder = []
809
809
810 # Mapping between step name and function
810 # Mapping between step name and function
811 #
811 #
812 # This exists to help extensions wrap steps if necessary
812 # This exists to help extensions wrap steps if necessary
813 b2partsgenmapping = {}
813 b2partsgenmapping = {}
814
814
815 def b2partsgenerator(stepname, idx=None):
815 def b2partsgenerator(stepname, idx=None):
816 """decorator for function generating bundle2 part
816 """decorator for function generating bundle2 part
817
817
818 The function is added to the step -> function mapping and appended to the
818 The function is added to the step -> function mapping and appended to the
819 list of steps. Beware that decorated functions will be added in order
819 list of steps. Beware that decorated functions will be added in order
820 (this may matter).
820 (this may matter).
821
821
822 You can only use this decorator for new steps, if you want to wrap a step
822 You can only use this decorator for new steps, if you want to wrap a step
823 from an extension, attack the b2partsgenmapping dictionary directly."""
823 from an extension, attack the b2partsgenmapping dictionary directly."""
824 def dec(func):
824 def dec(func):
825 assert stepname not in b2partsgenmapping
825 assert stepname not in b2partsgenmapping
826 b2partsgenmapping[stepname] = func
826 b2partsgenmapping[stepname] = func
827 if idx is None:
827 if idx is None:
828 b2partsgenorder.append(stepname)
828 b2partsgenorder.append(stepname)
829 else:
829 else:
830 b2partsgenorder.insert(idx, stepname)
830 b2partsgenorder.insert(idx, stepname)
831 return func
831 return func
832 return dec
832 return dec
833
833
834 def _pushb2ctxcheckheads(pushop, bundler):
834 def _pushb2ctxcheckheads(pushop, bundler):
835 """Generate race condition checking parts
835 """Generate race condition checking parts
836
836
837 Exists as an independent function to aid extensions
837 Exists as an independent function to aid extensions
838 """
838 """
839 # * 'force' do not check for push race,
839 # * 'force' do not check for push race,
840 # * if we don't push anything, there are nothing to check.
840 # * if we don't push anything, there are nothing to check.
841 if not pushop.force and pushop.outgoing.missingheads:
841 if not pushop.force and pushop.outgoing.missingheads:
842 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
842 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
843 emptyremote = pushop.pushbranchmap is None
843 emptyremote = pushop.pushbranchmap is None
844 if not allowunrelated or emptyremote:
844 if not allowunrelated or emptyremote:
845 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
845 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
846 else:
846 else:
847 affected = set()
847 affected = set()
848 for branch, heads in pushop.pushbranchmap.iteritems():
848 for branch, heads in pushop.pushbranchmap.iteritems():
849 remoteheads, newheads, unsyncedheads, discardedheads = heads
849 remoteheads, newheads, unsyncedheads, discardedheads = heads
850 if remoteheads is not None:
850 if remoteheads is not None:
851 remote = set(remoteheads)
851 remote = set(remoteheads)
852 affected |= set(discardedheads) & remote
852 affected |= set(discardedheads) & remote
853 affected |= remote - set(newheads)
853 affected |= remote - set(newheads)
854 if affected:
854 if affected:
855 data = iter(sorted(affected))
855 data = iter(sorted(affected))
856 bundler.newpart('check:updated-heads', data=data)
856 bundler.newpart('check:updated-heads', data=data)
857
857
858 def _pushing(pushop):
858 def _pushing(pushop):
859 """return True if we are pushing anything"""
859 """return True if we are pushing anything"""
860 return bool(pushop.outgoing.missing
860 return bool(pushop.outgoing.missing
861 or pushop.outdatedphases
861 or pushop.outdatedphases
862 or pushop.outobsmarkers
862 or pushop.outobsmarkers
863 or pushop.outbookmarks)
863 or pushop.outbookmarks)
864
864
865 @b2partsgenerator('check-bookmarks')
865 @b2partsgenerator('check-bookmarks')
866 def _pushb2checkbookmarks(pushop, bundler):
866 def _pushb2checkbookmarks(pushop, bundler):
867 """insert bookmark move checking"""
867 """insert bookmark move checking"""
868 if not _pushing(pushop) or pushop.force:
868 if not _pushing(pushop) or pushop.force:
869 return
869 return
870 b2caps = bundle2.bundle2caps(pushop.remote)
870 b2caps = bundle2.bundle2caps(pushop.remote)
871 hasbookmarkcheck = 'bookmarks' in b2caps
871 hasbookmarkcheck = 'bookmarks' in b2caps
872 if not (pushop.outbookmarks and hasbookmarkcheck):
872 if not (pushop.outbookmarks and hasbookmarkcheck):
873 return
873 return
874 data = []
874 data = []
875 for book, old, new in pushop.outbookmarks:
875 for book, old, new in pushop.outbookmarks:
876 old = bin(old)
876 old = bin(old)
877 data.append((book, old))
877 data.append((book, old))
878 checkdata = bookmod.binaryencode(data)
878 checkdata = bookmod.binaryencode(data)
879 bundler.newpart('check:bookmarks', data=checkdata)
879 bundler.newpart('check:bookmarks', data=checkdata)
880
880
881 @b2partsgenerator('check-phases')
881 @b2partsgenerator('check-phases')
882 def _pushb2checkphases(pushop, bundler):
882 def _pushb2checkphases(pushop, bundler):
883 """insert phase move checking"""
883 """insert phase move checking"""
884 if not _pushing(pushop) or pushop.force:
884 if not _pushing(pushop) or pushop.force:
885 return
885 return
886 b2caps = bundle2.bundle2caps(pushop.remote)
886 b2caps = bundle2.bundle2caps(pushop.remote)
887 hasphaseheads = 'heads' in b2caps.get('phases', ())
887 hasphaseheads = 'heads' in b2caps.get('phases', ())
888 if pushop.remotephases is not None and hasphaseheads:
888 if pushop.remotephases is not None and hasphaseheads:
889 # check that the remote phase has not changed
889 # check that the remote phase has not changed
890 checks = [[] for p in phases.allphases]
890 checks = [[] for p in phases.allphases]
891 checks[phases.public].extend(pushop.remotephases.publicheads)
891 checks[phases.public].extend(pushop.remotephases.publicheads)
892 checks[phases.draft].extend(pushop.remotephases.draftroots)
892 checks[phases.draft].extend(pushop.remotephases.draftroots)
893 if any(checks):
893 if any(checks):
894 for nodes in checks:
894 for nodes in checks:
895 nodes.sort()
895 nodes.sort()
896 checkdata = phases.binaryencode(checks)
896 checkdata = phases.binaryencode(checks)
897 bundler.newpart('check:phases', data=checkdata)
897 bundler.newpart('check:phases', data=checkdata)
898
898
899 @b2partsgenerator('changeset')
899 @b2partsgenerator('changeset')
900 def _pushb2ctx(pushop, bundler):
900 def _pushb2ctx(pushop, bundler):
901 """handle changegroup push through bundle2
901 """handle changegroup push through bundle2
902
902
903 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
903 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
904 """
904 """
905 if 'changesets' in pushop.stepsdone:
905 if 'changesets' in pushop.stepsdone:
906 return
906 return
907 pushop.stepsdone.add('changesets')
907 pushop.stepsdone.add('changesets')
908 # Send known heads to the server for race detection.
908 # Send known heads to the server for race detection.
909 if not _pushcheckoutgoing(pushop):
909 if not _pushcheckoutgoing(pushop):
910 return
910 return
911 pushop.repo.prepushoutgoinghooks(pushop)
911 pushop.repo.prepushoutgoinghooks(pushop)
912
912
913 _pushb2ctxcheckheads(pushop, bundler)
913 _pushb2ctxcheckheads(pushop, bundler)
914
914
915 b2caps = bundle2.bundle2caps(pushop.remote)
915 b2caps = bundle2.bundle2caps(pushop.remote)
916 version = '01'
916 version = '01'
917 cgversions = b2caps.get('changegroup')
917 cgversions = b2caps.get('changegroup')
918 if cgversions: # 3.1 and 3.2 ship with an empty value
918 if cgversions: # 3.1 and 3.2 ship with an empty value
919 cgversions = [v for v in cgversions
919 cgversions = [v for v in cgversions
920 if v in changegroup.supportedoutgoingversions(
920 if v in changegroup.supportedoutgoingversions(
921 pushop.repo)]
921 pushop.repo)]
922 if not cgversions:
922 if not cgversions:
923 raise error.Abort(_('no common changegroup version'))
923 raise error.Abort(_('no common changegroup version'))
924 version = max(cgversions)
924 version = max(cgversions)
925 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
925 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
926 'push')
926 'push')
927 cgpart = bundler.newpart('changegroup', data=cgstream)
927 cgpart = bundler.newpart('changegroup', data=cgstream)
928 if cgversions:
928 if cgversions:
929 cgpart.addparam('version', version)
929 cgpart.addparam('version', version)
930 if 'treemanifest' in pushop.repo.requirements:
930 if 'treemanifest' in pushop.repo.requirements:
931 cgpart.addparam('treemanifest', '1')
931 cgpart.addparam('treemanifest', '1')
932 def handlereply(op):
932 def handlereply(op):
933 """extract addchangegroup returns from server reply"""
933 """extract addchangegroup returns from server reply"""
934 cgreplies = op.records.getreplies(cgpart.id)
934 cgreplies = op.records.getreplies(cgpart.id)
935 assert len(cgreplies['changegroup']) == 1
935 assert len(cgreplies['changegroup']) == 1
936 pushop.cgresult = cgreplies['changegroup'][0]['return']
936 pushop.cgresult = cgreplies['changegroup'][0]['return']
937 return handlereply
937 return handlereply
938
938
939 @b2partsgenerator('phase')
939 @b2partsgenerator('phase')
940 def _pushb2phases(pushop, bundler):
940 def _pushb2phases(pushop, bundler):
941 """handle phase push through bundle2"""
941 """handle phase push through bundle2"""
942 if 'phases' in pushop.stepsdone:
942 if 'phases' in pushop.stepsdone:
943 return
943 return
944 b2caps = bundle2.bundle2caps(pushop.remote)
944 b2caps = bundle2.bundle2caps(pushop.remote)
945 ui = pushop.repo.ui
945 ui = pushop.repo.ui
946
946
947 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
947 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
948 haspushkey = 'pushkey' in b2caps
948 haspushkey = 'pushkey' in b2caps
949 hasphaseheads = 'heads' in b2caps.get('phases', ())
949 hasphaseheads = 'heads' in b2caps.get('phases', ())
950
950
951 if hasphaseheads and not legacyphase:
951 if hasphaseheads and not legacyphase:
952 return _pushb2phaseheads(pushop, bundler)
952 return _pushb2phaseheads(pushop, bundler)
953 elif haspushkey:
953 elif haspushkey:
954 return _pushb2phasespushkey(pushop, bundler)
954 return _pushb2phasespushkey(pushop, bundler)
955
955
956 def _pushb2phaseheads(pushop, bundler):
956 def _pushb2phaseheads(pushop, bundler):
957 """push phase information through a bundle2 - binary part"""
957 """push phase information through a bundle2 - binary part"""
958 pushop.stepsdone.add('phases')
958 pushop.stepsdone.add('phases')
959 if pushop.outdatedphases:
959 if pushop.outdatedphases:
960 updates = [[] for p in phases.allphases]
960 updates = [[] for p in phases.allphases]
961 updates[0].extend(h.node() for h in pushop.outdatedphases)
961 updates[0].extend(h.node() for h in pushop.outdatedphases)
962 phasedata = phases.binaryencode(updates)
962 phasedata = phases.binaryencode(updates)
963 bundler.newpart('phase-heads', data=phasedata)
963 bundler.newpart('phase-heads', data=phasedata)
964
964
965 def _pushb2phasespushkey(pushop, bundler):
965 def _pushb2phasespushkey(pushop, bundler):
966 """push phase information through a bundle2 - pushkey part"""
966 """push phase information through a bundle2 - pushkey part"""
967 pushop.stepsdone.add('phases')
967 pushop.stepsdone.add('phases')
968 part2node = []
968 part2node = []
969
969
970 def handlefailure(pushop, exc):
970 def handlefailure(pushop, exc):
971 targetid = int(exc.partid)
971 targetid = int(exc.partid)
972 for partid, node in part2node:
972 for partid, node in part2node:
973 if partid == targetid:
973 if partid == targetid:
974 raise error.Abort(_('updating %s to public failed') % node)
974 raise error.Abort(_('updating %s to public failed') % node)
975
975
976 enc = pushkey.encode
976 enc = pushkey.encode
977 for newremotehead in pushop.outdatedphases:
977 for newremotehead in pushop.outdatedphases:
978 part = bundler.newpart('pushkey')
978 part = bundler.newpart('pushkey')
979 part.addparam('namespace', enc('phases'))
979 part.addparam('namespace', enc('phases'))
980 part.addparam('key', enc(newremotehead.hex()))
980 part.addparam('key', enc(newremotehead.hex()))
981 part.addparam('old', enc('%d' % phases.draft))
981 part.addparam('old', enc('%d' % phases.draft))
982 part.addparam('new', enc('%d' % phases.public))
982 part.addparam('new', enc('%d' % phases.public))
983 part2node.append((part.id, newremotehead))
983 part2node.append((part.id, newremotehead))
984 pushop.pkfailcb[part.id] = handlefailure
984 pushop.pkfailcb[part.id] = handlefailure
985
985
986 def handlereply(op):
986 def handlereply(op):
987 for partid, node in part2node:
987 for partid, node in part2node:
988 partrep = op.records.getreplies(partid)
988 partrep = op.records.getreplies(partid)
989 results = partrep['pushkey']
989 results = partrep['pushkey']
990 assert len(results) <= 1
990 assert len(results) <= 1
991 msg = None
991 msg = None
992 if not results:
992 if not results:
993 msg = _('server ignored update of %s to public!\n') % node
993 msg = _('server ignored update of %s to public!\n') % node
994 elif not int(results[0]['return']):
994 elif not int(results[0]['return']):
995 msg = _('updating %s to public failed!\n') % node
995 msg = _('updating %s to public failed!\n') % node
996 if msg is not None:
996 if msg is not None:
997 pushop.ui.warn(msg)
997 pushop.ui.warn(msg)
998 return handlereply
998 return handlereply
999
999
1000 @b2partsgenerator('obsmarkers')
1000 @b2partsgenerator('obsmarkers')
1001 def _pushb2obsmarkers(pushop, bundler):
1001 def _pushb2obsmarkers(pushop, bundler):
1002 if 'obsmarkers' in pushop.stepsdone:
1002 if 'obsmarkers' in pushop.stepsdone:
1003 return
1003 return
1004 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1004 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1005 if obsolete.commonversion(remoteversions) is None:
1005 if obsolete.commonversion(remoteversions) is None:
1006 return
1006 return
1007 pushop.stepsdone.add('obsmarkers')
1007 pushop.stepsdone.add('obsmarkers')
1008 if pushop.outobsmarkers:
1008 if pushop.outobsmarkers:
1009 markers = sorted(pushop.outobsmarkers)
1009 markers = sorted(pushop.outobsmarkers)
1010 bundle2.buildobsmarkerspart(bundler, markers)
1010 bundle2.buildobsmarkerspart(bundler, markers)
1011
1011
1012 @b2partsgenerator('bookmarks')
1012 @b2partsgenerator('bookmarks')
1013 def _pushb2bookmarks(pushop, bundler):
1013 def _pushb2bookmarks(pushop, bundler):
1014 """handle bookmark push through bundle2"""
1014 """handle bookmark push through bundle2"""
1015 if 'bookmarks' in pushop.stepsdone:
1015 if 'bookmarks' in pushop.stepsdone:
1016 return
1016 return
1017 b2caps = bundle2.bundle2caps(pushop.remote)
1017 b2caps = bundle2.bundle2caps(pushop.remote)
1018
1018
1019 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
1019 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
1020 legacybooks = 'bookmarks' in legacy
1020 legacybooks = 'bookmarks' in legacy
1021
1021
1022 if not legacybooks and 'bookmarks' in b2caps:
1022 if not legacybooks and 'bookmarks' in b2caps:
1023 return _pushb2bookmarkspart(pushop, bundler)
1023 return _pushb2bookmarkspart(pushop, bundler)
1024 elif 'pushkey' in b2caps:
1024 elif 'pushkey' in b2caps:
1025 return _pushb2bookmarkspushkey(pushop, bundler)
1025 return _pushb2bookmarkspushkey(pushop, bundler)
1026
1026
1027 def _bmaction(old, new):
1027 def _bmaction(old, new):
1028 """small utility for bookmark pushing"""
1028 """small utility for bookmark pushing"""
1029 if not old:
1029 if not old:
1030 return 'export'
1030 return 'export'
1031 elif not new:
1031 elif not new:
1032 return 'delete'
1032 return 'delete'
1033 return 'update'
1033 return 'update'
1034
1034
1035 def _pushb2bookmarkspart(pushop, bundler):
1035 def _pushb2bookmarkspart(pushop, bundler):
1036 pushop.stepsdone.add('bookmarks')
1036 pushop.stepsdone.add('bookmarks')
1037 if not pushop.outbookmarks:
1037 if not pushop.outbookmarks:
1038 return
1038 return
1039
1039
1040 allactions = []
1040 allactions = []
1041 data = []
1041 data = []
1042 for book, old, new in pushop.outbookmarks:
1042 for book, old, new in pushop.outbookmarks:
1043 new = bin(new)
1043 new = bin(new)
1044 data.append((book, new))
1044 data.append((book, new))
1045 allactions.append((book, _bmaction(old, new)))
1045 allactions.append((book, _bmaction(old, new)))
1046 checkdata = bookmod.binaryencode(data)
1046 checkdata = bookmod.binaryencode(data)
1047 bundler.newpart('bookmarks', data=checkdata)
1047 bundler.newpart('bookmarks', data=checkdata)
1048
1048
1049 def handlereply(op):
1049 def handlereply(op):
1050 ui = pushop.ui
1050 ui = pushop.ui
1051 # if success
1051 # if success
1052 for book, action in allactions:
1052 for book, action in allactions:
1053 ui.status(bookmsgmap[action][0] % book)
1053 ui.status(bookmsgmap[action][0] % book)
1054
1054
1055 return handlereply
1055 return handlereply
1056
1056
1057 def _pushb2bookmarkspushkey(pushop, bundler):
1057 def _pushb2bookmarkspushkey(pushop, bundler):
1058 pushop.stepsdone.add('bookmarks')
1058 pushop.stepsdone.add('bookmarks')
1059 part2book = []
1059 part2book = []
1060 enc = pushkey.encode
1060 enc = pushkey.encode
1061
1061
1062 def handlefailure(pushop, exc):
1062 def handlefailure(pushop, exc):
1063 targetid = int(exc.partid)
1063 targetid = int(exc.partid)
1064 for partid, book, action in part2book:
1064 for partid, book, action in part2book:
1065 if partid == targetid:
1065 if partid == targetid:
1066 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1066 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1067 # we should not be called for part we did not generated
1067 # we should not be called for part we did not generated
1068 assert False
1068 assert False
1069
1069
1070 for book, old, new in pushop.outbookmarks:
1070 for book, old, new in pushop.outbookmarks:
1071 part = bundler.newpart('pushkey')
1071 part = bundler.newpart('pushkey')
1072 part.addparam('namespace', enc('bookmarks'))
1072 part.addparam('namespace', enc('bookmarks'))
1073 part.addparam('key', enc(book))
1073 part.addparam('key', enc(book))
1074 part.addparam('old', enc(old))
1074 part.addparam('old', enc(old))
1075 part.addparam('new', enc(new))
1075 part.addparam('new', enc(new))
1076 action = 'update'
1076 action = 'update'
1077 if not old:
1077 if not old:
1078 action = 'export'
1078 action = 'export'
1079 elif not new:
1079 elif not new:
1080 action = 'delete'
1080 action = 'delete'
1081 part2book.append((part.id, book, action))
1081 part2book.append((part.id, book, action))
1082 pushop.pkfailcb[part.id] = handlefailure
1082 pushop.pkfailcb[part.id] = handlefailure
1083
1083
1084 def handlereply(op):
1084 def handlereply(op):
1085 ui = pushop.ui
1085 ui = pushop.ui
1086 for partid, book, action in part2book:
1086 for partid, book, action in part2book:
1087 partrep = op.records.getreplies(partid)
1087 partrep = op.records.getreplies(partid)
1088 results = partrep['pushkey']
1088 results = partrep['pushkey']
1089 assert len(results) <= 1
1089 assert len(results) <= 1
1090 if not results:
1090 if not results:
1091 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1091 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1092 else:
1092 else:
1093 ret = int(results[0]['return'])
1093 ret = int(results[0]['return'])
1094 if ret:
1094 if ret:
1095 ui.status(bookmsgmap[action][0] % book)
1095 ui.status(bookmsgmap[action][0] % book)
1096 else:
1096 else:
1097 ui.warn(bookmsgmap[action][1] % book)
1097 ui.warn(bookmsgmap[action][1] % book)
1098 if pushop.bkresult is not None:
1098 if pushop.bkresult is not None:
1099 pushop.bkresult = 1
1099 pushop.bkresult = 1
1100 return handlereply
1100 return handlereply
1101
1101
1102 @b2partsgenerator('pushvars', idx=0)
1102 @b2partsgenerator('pushvars', idx=0)
1103 def _getbundlesendvars(pushop, bundler):
1103 def _getbundlesendvars(pushop, bundler):
1104 '''send shellvars via bundle2'''
1104 '''send shellvars via bundle2'''
1105 pushvars = pushop.pushvars
1105 pushvars = pushop.pushvars
1106 if pushvars:
1106 if pushvars:
1107 shellvars = {}
1107 shellvars = {}
1108 for raw in pushvars:
1108 for raw in pushvars:
1109 if '=' not in raw:
1109 if '=' not in raw:
1110 msg = ("unable to parse variable '%s', should follow "
1110 msg = ("unable to parse variable '%s', should follow "
1111 "'KEY=VALUE' or 'KEY=' format")
1111 "'KEY=VALUE' or 'KEY=' format")
1112 raise error.Abort(msg % raw)
1112 raise error.Abort(msg % raw)
1113 k, v = raw.split('=', 1)
1113 k, v = raw.split('=', 1)
1114 shellvars[k] = v
1114 shellvars[k] = v
1115
1115
1116 part = bundler.newpart('pushvars')
1116 part = bundler.newpart('pushvars')
1117
1117
1118 for key, value in shellvars.iteritems():
1118 for key, value in shellvars.iteritems():
1119 part.addparam(key, value, mandatory=False)
1119 part.addparam(key, value, mandatory=False)
1120
1120
1121 def _pushbundle2(pushop):
1121 def _pushbundle2(pushop):
1122 """push data to the remote using bundle2
1122 """push data to the remote using bundle2
1123
1123
1124 The only currently supported type of data is changegroup but this will
1124 The only currently supported type of data is changegroup but this will
1125 evolve in the future."""
1125 evolve in the future."""
1126 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1126 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1127 pushback = (pushop.trmanager
1127 pushback = (pushop.trmanager
1128 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1128 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1129
1129
1130 # create reply capability
1130 # create reply capability
1131 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1131 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1132 allowpushback=pushback,
1132 allowpushback=pushback,
1133 role='client'))
1133 role='client'))
1134 bundler.newpart('replycaps', data=capsblob)
1134 bundler.newpart('replycaps', data=capsblob)
1135 replyhandlers = []
1135 replyhandlers = []
1136 for partgenname in b2partsgenorder:
1136 for partgenname in b2partsgenorder:
1137 partgen = b2partsgenmapping[partgenname]
1137 partgen = b2partsgenmapping[partgenname]
1138 ret = partgen(pushop, bundler)
1138 ret = partgen(pushop, bundler)
1139 if callable(ret):
1139 if callable(ret):
1140 replyhandlers.append(ret)
1140 replyhandlers.append(ret)
1141 # do not push if nothing to push
1141 # do not push if nothing to push
1142 if bundler.nbparts <= 1:
1142 if bundler.nbparts <= 1:
1143 return
1143 return
1144 stream = util.chunkbuffer(bundler.getchunks())
1144 stream = util.chunkbuffer(bundler.getchunks())
1145 try:
1145 try:
1146 try:
1146 try:
1147 with pushop.remote.commandexecutor() as e:
1147 with pushop.remote.commandexecutor() as e:
1148 reply = e.callcommand('unbundle', {
1148 reply = e.callcommand('unbundle', {
1149 'bundle': stream,
1149 'bundle': stream,
1150 'heads': ['force'],
1150 'heads': ['force'],
1151 'url': pushop.remote.url(),
1151 'url': pushop.remote.url(),
1152 }).result()
1152 }).result()
1153 except error.BundleValueError as exc:
1153 except error.BundleValueError as exc:
1154 raise error.Abort(_('missing support for %s') % exc)
1154 raise error.Abort(_('missing support for %s') % exc)
1155 try:
1155 try:
1156 trgetter = None
1156 trgetter = None
1157 if pushback:
1157 if pushback:
1158 trgetter = pushop.trmanager.transaction
1158 trgetter = pushop.trmanager.transaction
1159 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1159 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1160 except error.BundleValueError as exc:
1160 except error.BundleValueError as exc:
1161 raise error.Abort(_('missing support for %s') % exc)
1161 raise error.Abort(_('missing support for %s') % exc)
1162 except bundle2.AbortFromPart as exc:
1162 except bundle2.AbortFromPart as exc:
1163 pushop.ui.status(_('remote: %s\n') % exc)
1163 pushop.ui.status(_('remote: %s\n') % exc)
1164 if exc.hint is not None:
1164 if exc.hint is not None:
1165 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1165 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1166 raise error.Abort(_('push failed on remote'))
1166 raise error.Abort(_('push failed on remote'))
1167 except error.PushkeyFailed as exc:
1167 except error.PushkeyFailed as exc:
1168 partid = int(exc.partid)
1168 partid = int(exc.partid)
1169 if partid not in pushop.pkfailcb:
1169 if partid not in pushop.pkfailcb:
1170 raise
1170 raise
1171 pushop.pkfailcb[partid](pushop, exc)
1171 pushop.pkfailcb[partid](pushop, exc)
1172 for rephand in replyhandlers:
1172 for rephand in replyhandlers:
1173 rephand(op)
1173 rephand(op)
1174
1174
1175 def _pushchangeset(pushop):
1175 def _pushchangeset(pushop):
1176 """Make the actual push of changeset bundle to remote repo"""
1176 """Make the actual push of changeset bundle to remote repo"""
1177 if 'changesets' in pushop.stepsdone:
1177 if 'changesets' in pushop.stepsdone:
1178 return
1178 return
1179 pushop.stepsdone.add('changesets')
1179 pushop.stepsdone.add('changesets')
1180 if not _pushcheckoutgoing(pushop):
1180 if not _pushcheckoutgoing(pushop):
1181 return
1181 return
1182
1182
1183 # Should have verified this in push().
1183 # Should have verified this in push().
1184 assert pushop.remote.capable('unbundle')
1184 assert pushop.remote.capable('unbundle')
1185
1185
1186 pushop.repo.prepushoutgoinghooks(pushop)
1186 pushop.repo.prepushoutgoinghooks(pushop)
1187 outgoing = pushop.outgoing
1187 outgoing = pushop.outgoing
1188 # TODO: get bundlecaps from remote
1188 # TODO: get bundlecaps from remote
1189 bundlecaps = None
1189 bundlecaps = None
1190 # create a changegroup from local
1190 # create a changegroup from local
1191 if pushop.revs is None and not (outgoing.excluded
1191 if pushop.revs is None and not (outgoing.excluded
1192 or pushop.repo.changelog.filteredrevs):
1192 or pushop.repo.changelog.filteredrevs):
1193 # push everything,
1193 # push everything,
1194 # use the fast path, no race possible on push
1194 # use the fast path, no race possible on push
1195 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1195 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1196 fastpath=True, bundlecaps=bundlecaps)
1196 fastpath=True, bundlecaps=bundlecaps)
1197 else:
1197 else:
1198 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1198 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1199 'push', bundlecaps=bundlecaps)
1199 'push', bundlecaps=bundlecaps)
1200
1200
1201 # apply changegroup to remote
1201 # apply changegroup to remote
1202 # local repo finds heads on server, finds out what
1202 # local repo finds heads on server, finds out what
1203 # revs it must push. once revs transferred, if server
1203 # revs it must push. once revs transferred, if server
1204 # finds it has different heads (someone else won
1204 # finds it has different heads (someone else won
1205 # commit/push race), server aborts.
1205 # commit/push race), server aborts.
1206 if pushop.force:
1206 if pushop.force:
1207 remoteheads = ['force']
1207 remoteheads = ['force']
1208 else:
1208 else:
1209 remoteheads = pushop.remoteheads
1209 remoteheads = pushop.remoteheads
1210 # ssh: return remote's addchangegroup()
1210 # ssh: return remote's addchangegroup()
1211 # http: return remote's addchangegroup() or 0 for error
1211 # http: return remote's addchangegroup() or 0 for error
1212 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1212 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1213 pushop.repo.url())
1213 pushop.repo.url())
1214
1214
1215 def _pushsyncphase(pushop):
1215 def _pushsyncphase(pushop):
1216 """synchronise phase information locally and remotely"""
1216 """synchronise phase information locally and remotely"""
1217 cheads = pushop.commonheads
1217 cheads = pushop.commonheads
1218 # even when we don't push, exchanging phase data is useful
1218 # even when we don't push, exchanging phase data is useful
1219 remotephases = listkeys(pushop.remote, 'phases')
1219 remotephases = listkeys(pushop.remote, 'phases')
1220 if (pushop.ui.configbool('ui', '_usedassubrepo')
1220 if (pushop.ui.configbool('ui', '_usedassubrepo')
1221 and remotephases # server supports phases
1221 and remotephases # server supports phases
1222 and pushop.cgresult is None # nothing was pushed
1222 and pushop.cgresult is None # nothing was pushed
1223 and remotephases.get('publishing', False)):
1223 and remotephases.get('publishing', False)):
1224 # When:
1224 # When:
1225 # - this is a subrepo push
1225 # - this is a subrepo push
1226 # - and remote support phase
1226 # - and remote support phase
1227 # - and no changeset was pushed
1227 # - and no changeset was pushed
1228 # - and remote is publishing
1228 # - and remote is publishing
1229 # We may be in issue 3871 case!
1229 # We may be in issue 3871 case!
1230 # We drop the possible phase synchronisation done by
1230 # We drop the possible phase synchronisation done by
1231 # courtesy to publish changesets possibly locally draft
1231 # courtesy to publish changesets possibly locally draft
1232 # on the remote.
1232 # on the remote.
1233 remotephases = {'publishing': 'True'}
1233 remotephases = {'publishing': 'True'}
1234 if not remotephases: # old server or public only reply from non-publishing
1234 if not remotephases: # old server or public only reply from non-publishing
1235 _localphasemove(pushop, cheads)
1235 _localphasemove(pushop, cheads)
1236 # don't push any phase data as there is nothing to push
1236 # don't push any phase data as there is nothing to push
1237 else:
1237 else:
1238 ana = phases.analyzeremotephases(pushop.repo, cheads,
1238 ana = phases.analyzeremotephases(pushop.repo, cheads,
1239 remotephases)
1239 remotephases)
1240 pheads, droots = ana
1240 pheads, droots = ana
1241 ### Apply remote phase on local
1241 ### Apply remote phase on local
1242 if remotephases.get('publishing', False):
1242 if remotephases.get('publishing', False):
1243 _localphasemove(pushop, cheads)
1243 _localphasemove(pushop, cheads)
1244 else: # publish = False
1244 else: # publish = False
1245 _localphasemove(pushop, pheads)
1245 _localphasemove(pushop, pheads)
1246 _localphasemove(pushop, cheads, phases.draft)
1246 _localphasemove(pushop, cheads, phases.draft)
1247 ### Apply local phase on remote
1247 ### Apply local phase on remote
1248
1248
1249 if pushop.cgresult:
1249 if pushop.cgresult:
1250 if 'phases' in pushop.stepsdone:
1250 if 'phases' in pushop.stepsdone:
1251 # phases already pushed though bundle2
1251 # phases already pushed though bundle2
1252 return
1252 return
1253 outdated = pushop.outdatedphases
1253 outdated = pushop.outdatedphases
1254 else:
1254 else:
1255 outdated = pushop.fallbackoutdatedphases
1255 outdated = pushop.fallbackoutdatedphases
1256
1256
1257 pushop.stepsdone.add('phases')
1257 pushop.stepsdone.add('phases')
1258
1258
1259 # filter heads already turned public by the push
1259 # filter heads already turned public by the push
1260 outdated = [c for c in outdated if c.node() not in pheads]
1260 outdated = [c for c in outdated if c.node() not in pheads]
1261 # fallback to independent pushkey command
1261 # fallback to independent pushkey command
1262 for newremotehead in outdated:
1262 for newremotehead in outdated:
1263 with pushop.remote.commandexecutor() as e:
1263 with pushop.remote.commandexecutor() as e:
1264 r = e.callcommand('pushkey', {
1264 r = e.callcommand('pushkey', {
1265 'namespace': 'phases',
1265 'namespace': 'phases',
1266 'key': newremotehead.hex(),
1266 'key': newremotehead.hex(),
1267 'old': '%d' % phases.draft,
1267 'old': '%d' % phases.draft,
1268 'new': '%d' % phases.public
1268 'new': '%d' % phases.public
1269 }).result()
1269 }).result()
1270
1270
1271 if not r:
1271 if not r:
1272 pushop.ui.warn(_('updating %s to public failed!\n')
1272 pushop.ui.warn(_('updating %s to public failed!\n')
1273 % newremotehead)
1273 % newremotehead)
1274
1274
1275 def _localphasemove(pushop, nodes, phase=phases.public):
1275 def _localphasemove(pushop, nodes, phase=phases.public):
1276 """move <nodes> to <phase> in the local source repo"""
1276 """move <nodes> to <phase> in the local source repo"""
1277 if pushop.trmanager:
1277 if pushop.trmanager:
1278 phases.advanceboundary(pushop.repo,
1278 phases.advanceboundary(pushop.repo,
1279 pushop.trmanager.transaction(),
1279 pushop.trmanager.transaction(),
1280 phase,
1280 phase,
1281 nodes)
1281 nodes)
1282 else:
1282 else:
1283 # repo is not locked, do not change any phases!
1283 # repo is not locked, do not change any phases!
1284 # Informs the user that phases should have been moved when
1284 # Informs the user that phases should have been moved when
1285 # applicable.
1285 # applicable.
1286 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1286 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1287 phasestr = phases.phasenames[phase]
1287 phasestr = phases.phasenames[phase]
1288 if actualmoves:
1288 if actualmoves:
1289 pushop.ui.status(_('cannot lock source repo, skipping '
1289 pushop.ui.status(_('cannot lock source repo, skipping '
1290 'local %s phase update\n') % phasestr)
1290 'local %s phase update\n') % phasestr)
1291
1291
1292 def _pushobsolete(pushop):
1292 def _pushobsolete(pushop):
1293 """utility function to push obsolete markers to a remote"""
1293 """utility function to push obsolete markers to a remote"""
1294 if 'obsmarkers' in pushop.stepsdone:
1294 if 'obsmarkers' in pushop.stepsdone:
1295 return
1295 return
1296 repo = pushop.repo
1296 repo = pushop.repo
1297 remote = pushop.remote
1297 remote = pushop.remote
1298 pushop.stepsdone.add('obsmarkers')
1298 pushop.stepsdone.add('obsmarkers')
1299 if pushop.outobsmarkers:
1299 if pushop.outobsmarkers:
1300 pushop.ui.debug('try to push obsolete markers to remote\n')
1300 pushop.ui.debug('try to push obsolete markers to remote\n')
1301 rslts = []
1301 rslts = []
1302 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1302 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1303 for key in sorted(remotedata, reverse=True):
1303 for key in sorted(remotedata, reverse=True):
1304 # reverse sort to ensure we end with dump0
1304 # reverse sort to ensure we end with dump0
1305 data = remotedata[key]
1305 data = remotedata[key]
1306 rslts.append(remote.pushkey('obsolete', key, '', data))
1306 rslts.append(remote.pushkey('obsolete', key, '', data))
1307 if [r for r in rslts if not r]:
1307 if [r for r in rslts if not r]:
1308 msg = _('failed to push some obsolete markers!\n')
1308 msg = _('failed to push some obsolete markers!\n')
1309 repo.ui.warn(msg)
1309 repo.ui.warn(msg)
1310
1310
1311 def _pushbookmark(pushop):
1311 def _pushbookmark(pushop):
1312 """Update bookmark position on remote"""
1312 """Update bookmark position on remote"""
1313 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1313 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1314 return
1314 return
1315 pushop.stepsdone.add('bookmarks')
1315 pushop.stepsdone.add('bookmarks')
1316 ui = pushop.ui
1316 ui = pushop.ui
1317 remote = pushop.remote
1317 remote = pushop.remote
1318
1318
1319 for b, old, new in pushop.outbookmarks:
1319 for b, old, new in pushop.outbookmarks:
1320 action = 'update'
1320 action = 'update'
1321 if not old:
1321 if not old:
1322 action = 'export'
1322 action = 'export'
1323 elif not new:
1323 elif not new:
1324 action = 'delete'
1324 action = 'delete'
1325
1325
1326 with remote.commandexecutor() as e:
1326 with remote.commandexecutor() as e:
1327 r = e.callcommand('pushkey', {
1327 r = e.callcommand('pushkey', {
1328 'namespace': 'bookmarks',
1328 'namespace': 'bookmarks',
1329 'key': b,
1329 'key': b,
1330 'old': old,
1330 'old': old,
1331 'new': new,
1331 'new': new,
1332 }).result()
1332 }).result()
1333
1333
1334 if r:
1334 if r:
1335 ui.status(bookmsgmap[action][0] % b)
1335 ui.status(bookmsgmap[action][0] % b)
1336 else:
1336 else:
1337 ui.warn(bookmsgmap[action][1] % b)
1337 ui.warn(bookmsgmap[action][1] % b)
1338 # discovery can have set the value form invalid entry
1338 # discovery can have set the value form invalid entry
1339 if pushop.bkresult is not None:
1339 if pushop.bkresult is not None:
1340 pushop.bkresult = 1
1340 pushop.bkresult = 1
1341
1341
1342 class pulloperation(object):
1342 class pulloperation(object):
1343 """A object that represent a single pull operation
1343 """A object that represent a single pull operation
1344
1344
1345 It purpose is to carry pull related state and very common operation.
1345 It purpose is to carry pull related state and very common operation.
1346
1346
1347 A new should be created at the beginning of each pull and discarded
1347 A new should be created at the beginning of each pull and discarded
1348 afterward.
1348 afterward.
1349 """
1349 """
1350
1350
1351 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1351 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1352 remotebookmarks=None, streamclonerequested=None,
1352 remotebookmarks=None, streamclonerequested=None,
1353 includepats=None, excludepats=None, depth=None):
1353 includepats=None, excludepats=None, depth=None):
1354 # repo we pull into
1354 # repo we pull into
1355 self.repo = repo
1355 self.repo = repo
1356 # repo we pull from
1356 # repo we pull from
1357 self.remote = remote
1357 self.remote = remote
1358 # revision we try to pull (None is "all")
1358 # revision we try to pull (None is "all")
1359 self.heads = heads
1359 self.heads = heads
1360 # bookmark pulled explicitly
1360 # bookmark pulled explicitly
1361 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1361 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1362 for bookmark in bookmarks]
1362 for bookmark in bookmarks]
1363 # do we force pull?
1363 # do we force pull?
1364 self.force = force
1364 self.force = force
1365 # whether a streaming clone was requested
1365 # whether a streaming clone was requested
1366 self.streamclonerequested = streamclonerequested
1366 self.streamclonerequested = streamclonerequested
1367 # transaction manager
1367 # transaction manager
1368 self.trmanager = None
1368 self.trmanager = None
1369 # set of common changeset between local and remote before pull
1369 # set of common changeset between local and remote before pull
1370 self.common = None
1370 self.common = None
1371 # set of pulled head
1371 # set of pulled head
1372 self.rheads = None
1372 self.rheads = None
1373 # list of missing changeset to fetch remotely
1373 # list of missing changeset to fetch remotely
1374 self.fetch = None
1374 self.fetch = None
1375 # remote bookmarks data
1375 # remote bookmarks data
1376 self.remotebookmarks = remotebookmarks
1376 self.remotebookmarks = remotebookmarks
1377 # result of changegroup pulling (used as return code by pull)
1377 # result of changegroup pulling (used as return code by pull)
1378 self.cgresult = None
1378 self.cgresult = None
1379 # list of step already done
1379 # list of step already done
1380 self.stepsdone = set()
1380 self.stepsdone = set()
1381 # Whether we attempted a clone from pre-generated bundles.
1381 # Whether we attempted a clone from pre-generated bundles.
1382 self.clonebundleattempted = False
1382 self.clonebundleattempted = False
1383 # Set of file patterns to include.
1383 # Set of file patterns to include.
1384 self.includepats = includepats
1384 self.includepats = includepats
1385 # Set of file patterns to exclude.
1385 # Set of file patterns to exclude.
1386 self.excludepats = excludepats
1386 self.excludepats = excludepats
1387 # Number of ancestor changesets to pull from each pulled head.
1387 # Number of ancestor changesets to pull from each pulled head.
1388 self.depth = depth
1388 self.depth = depth
1389
1389
1390 @util.propertycache
1390 @util.propertycache
1391 def pulledsubset(self):
1391 def pulledsubset(self):
1392 """heads of the set of changeset target by the pull"""
1392 """heads of the set of changeset target by the pull"""
1393 # compute target subset
1393 # compute target subset
1394 if self.heads is None:
1394 if self.heads is None:
1395 # We pulled every thing possible
1395 # We pulled every thing possible
1396 # sync on everything common
1396 # sync on everything common
1397 c = set(self.common)
1397 c = set(self.common)
1398 ret = list(self.common)
1398 ret = list(self.common)
1399 for n in self.rheads:
1399 for n in self.rheads:
1400 if n not in c:
1400 if n not in c:
1401 ret.append(n)
1401 ret.append(n)
1402 return ret
1402 return ret
1403 else:
1403 else:
1404 # We pulled a specific subset
1404 # We pulled a specific subset
1405 # sync on this subset
1405 # sync on this subset
1406 return self.heads
1406 return self.heads
1407
1407
1408 @util.propertycache
1408 @util.propertycache
1409 def canusebundle2(self):
1409 def canusebundle2(self):
1410 return not _forcebundle1(self)
1410 return not _forcebundle1(self)
1411
1411
1412 @util.propertycache
1412 @util.propertycache
1413 def remotebundle2caps(self):
1413 def remotebundle2caps(self):
1414 return bundle2.bundle2caps(self.remote)
1414 return bundle2.bundle2caps(self.remote)
1415
1415
1416 def gettransaction(self):
1416 def gettransaction(self):
1417 # deprecated; talk to trmanager directly
1417 # deprecated; talk to trmanager directly
1418 return self.trmanager.transaction()
1418 return self.trmanager.transaction()
1419
1419
1420 class transactionmanager(util.transactional):
1420 class transactionmanager(util.transactional):
1421 """An object to manage the life cycle of a transaction
1421 """An object to manage the life cycle of a transaction
1422
1422
1423 It creates the transaction on demand and calls the appropriate hooks when
1423 It creates the transaction on demand and calls the appropriate hooks when
1424 closing the transaction."""
1424 closing the transaction."""
1425 def __init__(self, repo, source, url):
1425 def __init__(self, repo, source, url):
1426 self.repo = repo
1426 self.repo = repo
1427 self.source = source
1427 self.source = source
1428 self.url = url
1428 self.url = url
1429 self._tr = None
1429 self._tr = None
1430
1430
1431 def transaction(self):
1431 def transaction(self):
1432 """Return an open transaction object, constructing if necessary"""
1432 """Return an open transaction object, constructing if necessary"""
1433 if not self._tr:
1433 if not self._tr:
1434 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1434 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1435 self._tr = self.repo.transaction(trname)
1435 self._tr = self.repo.transaction(trname)
1436 self._tr.hookargs['source'] = self.source
1436 self._tr.hookargs['source'] = self.source
1437 self._tr.hookargs['url'] = self.url
1437 self._tr.hookargs['url'] = self.url
1438 return self._tr
1438 return self._tr
1439
1439
1440 def close(self):
1440 def close(self):
1441 """close transaction if created"""
1441 """close transaction if created"""
1442 if self._tr is not None:
1442 if self._tr is not None:
1443 self._tr.close()
1443 self._tr.close()
1444
1444
1445 def release(self):
1445 def release(self):
1446 """release transaction if created"""
1446 """release transaction if created"""
1447 if self._tr is not None:
1447 if self._tr is not None:
1448 self._tr.release()
1448 self._tr.release()
1449
1449
1450 def listkeys(remote, namespace):
1450 def listkeys(remote, namespace):
1451 with remote.commandexecutor() as e:
1451 with remote.commandexecutor() as e:
1452 return e.callcommand('listkeys', {'namespace': namespace}).result()
1452 return e.callcommand('listkeys', {'namespace': namespace}).result()
1453
1453
1454 def _fullpullbundle2(repo, pullop):
1454 def _fullpullbundle2(repo, pullop):
1455 # The server may send a partial reply, i.e. when inlining
1455 # The server may send a partial reply, i.e. when inlining
1456 # pre-computed bundles. In that case, update the common
1456 # pre-computed bundles. In that case, update the common
1457 # set based on the results and pull another bundle.
1457 # set based on the results and pull another bundle.
1458 #
1458 #
1459 # There are two indicators that the process is finished:
1459 # There are two indicators that the process is finished:
1460 # - no changeset has been added, or
1460 # - no changeset has been added, or
1461 # - all remote heads are known locally.
1461 # - all remote heads are known locally.
1462 # The head check must use the unfiltered view as obsoletion
1462 # The head check must use the unfiltered view as obsoletion
1463 # markers can hide heads.
1463 # markers can hide heads.
1464 unfi = repo.unfiltered()
1464 unfi = repo.unfiltered()
1465 unficl = unfi.changelog
1465 unficl = unfi.changelog
1466 def headsofdiff(h1, h2):
1466 def headsofdiff(h1, h2):
1467 """Returns heads(h1 % h2)"""
1467 """Returns heads(h1 % h2)"""
1468 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1468 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1469 return set(ctx.node() for ctx in res)
1469 return set(ctx.node() for ctx in res)
1470 def headsofunion(h1, h2):
1470 def headsofunion(h1, h2):
1471 """Returns heads((h1 + h2) - null)"""
1471 """Returns heads((h1 + h2) - null)"""
1472 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1472 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1473 return set(ctx.node() for ctx in res)
1473 return set(ctx.node() for ctx in res)
1474 while True:
1474 while True:
1475 old_heads = unficl.heads()
1475 old_heads = unficl.heads()
1476 clstart = len(unficl)
1476 clstart = len(unficl)
1477 _pullbundle2(pullop)
1477 _pullbundle2(pullop)
1478 if repository.NARROW_REQUIREMENT in repo.requirements:
1478 if repository.NARROW_REQUIREMENT in repo.requirements:
1479 # XXX narrow clones filter the heads on the server side during
1479 # XXX narrow clones filter the heads on the server side during
1480 # XXX getbundle and result in partial replies as well.
1480 # XXX getbundle and result in partial replies as well.
1481 # XXX Disable pull bundles in this case as band aid to avoid
1481 # XXX Disable pull bundles in this case as band aid to avoid
1482 # XXX extra round trips.
1482 # XXX extra round trips.
1483 break
1483 break
1484 if clstart == len(unficl):
1484 if clstart == len(unficl):
1485 break
1485 break
1486 if all(unficl.hasnode(n) for n in pullop.rheads):
1486 if all(unficl.hasnode(n) for n in pullop.rheads):
1487 break
1487 break
1488 new_heads = headsofdiff(unficl.heads(), old_heads)
1488 new_heads = headsofdiff(unficl.heads(), old_heads)
1489 pullop.common = headsofunion(new_heads, pullop.common)
1489 pullop.common = headsofunion(new_heads, pullop.common)
1490 pullop.rheads = set(pullop.rheads) - pullop.common
1490 pullop.rheads = set(pullop.rheads) - pullop.common
1491
1491
1492 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1492 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1493 streamclonerequested=None, includepats=None, excludepats=None,
1493 streamclonerequested=None, includepats=None, excludepats=None,
1494 depth=None):
1494 depth=None):
1495 """Fetch repository data from a remote.
1495 """Fetch repository data from a remote.
1496
1496
1497 This is the main function used to retrieve data from a remote repository.
1497 This is the main function used to retrieve data from a remote repository.
1498
1498
1499 ``repo`` is the local repository to clone into.
1499 ``repo`` is the local repository to clone into.
1500 ``remote`` is a peer instance.
1500 ``remote`` is a peer instance.
1501 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1501 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1502 default) means to pull everything from the remote.
1502 default) means to pull everything from the remote.
1503 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1503 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1504 default, all remote bookmarks are pulled.
1504 default, all remote bookmarks are pulled.
1505 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1505 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1506 initialization.
1506 initialization.
1507 ``streamclonerequested`` is a boolean indicating whether a "streaming
1507 ``streamclonerequested`` is a boolean indicating whether a "streaming
1508 clone" is requested. A "streaming clone" is essentially a raw file copy
1508 clone" is requested. A "streaming clone" is essentially a raw file copy
1509 of revlogs from the server. This only works when the local repository is
1509 of revlogs from the server. This only works when the local repository is
1510 empty. The default value of ``None`` means to respect the server
1510 empty. The default value of ``None`` means to respect the server
1511 configuration for preferring stream clones.
1511 configuration for preferring stream clones.
1512 ``includepats`` and ``excludepats`` define explicit file patterns to
1512 ``includepats`` and ``excludepats`` define explicit file patterns to
1513 include and exclude in storage, respectively. If not defined, narrow
1513 include and exclude in storage, respectively. If not defined, narrow
1514 patterns from the repo instance are used, if available.
1514 patterns from the repo instance are used, if available.
1515 ``depth`` is an integer indicating the DAG depth of history we're
1515 ``depth`` is an integer indicating the DAG depth of history we're
1516 interested in. If defined, for each revision specified in ``heads``, we
1516 interested in. If defined, for each revision specified in ``heads``, we
1517 will fetch up to this many of its ancestors and data associated with them.
1517 will fetch up to this many of its ancestors and data associated with them.
1518
1518
1519 Returns the ``pulloperation`` created for this pull.
1519 Returns the ``pulloperation`` created for this pull.
1520 """
1520 """
1521 if opargs is None:
1521 if opargs is None:
1522 opargs = {}
1522 opargs = {}
1523
1523
1524 # We allow the narrow patterns to be passed in explicitly to provide more
1524 # We allow the narrow patterns to be passed in explicitly to provide more
1525 # flexibility for API consumers.
1525 # flexibility for API consumers.
1526 if includepats or excludepats:
1526 if includepats or excludepats:
1527 includepats = includepats or set()
1527 includepats = includepats or set()
1528 excludepats = excludepats or set()
1528 excludepats = excludepats or set()
1529 else:
1529 else:
1530 includepats, excludepats = repo.narrowpats
1530 includepats, excludepats = repo.narrowpats
1531
1531
1532 narrowspec.validatepatterns(includepats)
1532 narrowspec.validatepatterns(includepats)
1533 narrowspec.validatepatterns(excludepats)
1533 narrowspec.validatepatterns(excludepats)
1534
1534
1535 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1535 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1536 streamclonerequested=streamclonerequested,
1536 streamclonerequested=streamclonerequested,
1537 includepats=includepats, excludepats=excludepats,
1537 includepats=includepats, excludepats=excludepats,
1538 depth=depth,
1538 depth=depth,
1539 **pycompat.strkwargs(opargs))
1539 **pycompat.strkwargs(opargs))
1540
1540
1541 peerlocal = pullop.remote.local()
1541 peerlocal = pullop.remote.local()
1542 if peerlocal:
1542 if peerlocal:
1543 missing = set(peerlocal.requirements) - pullop.repo.supported
1543 missing = set(peerlocal.requirements) - pullop.repo.supported
1544 if missing:
1544 if missing:
1545 msg = _("required features are not"
1545 msg = _("required features are not"
1546 " supported in the destination:"
1546 " supported in the destination:"
1547 " %s") % (', '.join(sorted(missing)))
1547 " %s") % (', '.join(sorted(missing)))
1548 raise error.Abort(msg)
1548 raise error.Abort(msg)
1549
1549
1550 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1550 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1551 with repo.wlock(), repo.lock(), pullop.trmanager:
1551 with repo.wlock(), repo.lock(), pullop.trmanager:
1552 # Use the modern wire protocol, if available.
1552 # Use the modern wire protocol, if available.
1553 if remote.capable('command-changesetdata'):
1553 if remote.capable('command-changesetdata'):
1554 exchangev2.pull(pullop)
1554 exchangev2.pull(pullop)
1555 else:
1555 else:
1556 # This should ideally be in _pullbundle2(). However, it needs to run
1556 # This should ideally be in _pullbundle2(). However, it needs to run
1557 # before discovery to avoid extra work.
1557 # before discovery to avoid extra work.
1558 _maybeapplyclonebundle(pullop)
1558 _maybeapplyclonebundle(pullop)
1559 streamclone.maybeperformlegacystreamclone(pullop)
1559 streamclone.maybeperformlegacystreamclone(pullop)
1560 _pulldiscovery(pullop)
1560 _pulldiscovery(pullop)
1561 if pullop.canusebundle2:
1561 if pullop.canusebundle2:
1562 _fullpullbundle2(repo, pullop)
1562 _fullpullbundle2(repo, pullop)
1563 _pullchangeset(pullop)
1563 _pullchangeset(pullop)
1564 _pullphase(pullop)
1564 _pullphase(pullop)
1565 _pullbookmarks(pullop)
1565 _pullbookmarks(pullop)
1566 _pullobsolete(pullop)
1566 _pullobsolete(pullop)
1567
1567
1568 # storing remotenames
1568 # storing remotenames
1569 if repo.ui.configbool('experimental', 'remotenames'):
1569 if repo.ui.configbool('experimental', 'remotenames'):
1570 logexchange.pullremotenames(repo, remote)
1570 logexchange.pullremotenames(repo, remote)
1571
1571
1572 return pullop
1572 return pullop
1573
1573
1574 # list of steps to perform discovery before pull
1574 # list of steps to perform discovery before pull
1575 pulldiscoveryorder = []
1575 pulldiscoveryorder = []
1576
1576
1577 # Mapping between step name and function
1577 # Mapping between step name and function
1578 #
1578 #
1579 # This exists to help extensions wrap steps if necessary
1579 # This exists to help extensions wrap steps if necessary
1580 pulldiscoverymapping = {}
1580 pulldiscoverymapping = {}
1581
1581
1582 def pulldiscovery(stepname):
1582 def pulldiscovery(stepname):
1583 """decorator for function performing discovery before pull
1583 """decorator for function performing discovery before pull
1584
1584
1585 The function is added to the step -> function mapping and appended to the
1585 The function is added to the step -> function mapping and appended to the
1586 list of steps. Beware that decorated function will be added in order (this
1586 list of steps. Beware that decorated function will be added in order (this
1587 may matter).
1587 may matter).
1588
1588
1589 You can only use this decorator for a new step, if you want to wrap a step
1589 You can only use this decorator for a new step, if you want to wrap a step
1590 from an extension, change the pulldiscovery dictionary directly."""
1590 from an extension, change the pulldiscovery dictionary directly."""
1591 def dec(func):
1591 def dec(func):
1592 assert stepname not in pulldiscoverymapping
1592 assert stepname not in pulldiscoverymapping
1593 pulldiscoverymapping[stepname] = func
1593 pulldiscoverymapping[stepname] = func
1594 pulldiscoveryorder.append(stepname)
1594 pulldiscoveryorder.append(stepname)
1595 return func
1595 return func
1596 return dec
1596 return dec
1597
1597
1598 def _pulldiscovery(pullop):
1598 def _pulldiscovery(pullop):
1599 """Run all discovery steps"""
1599 """Run all discovery steps"""
1600 for stepname in pulldiscoveryorder:
1600 for stepname in pulldiscoveryorder:
1601 step = pulldiscoverymapping[stepname]
1601 step = pulldiscoverymapping[stepname]
1602 step(pullop)
1602 step(pullop)
1603
1603
1604 @pulldiscovery('b1:bookmarks')
1604 @pulldiscovery('b1:bookmarks')
1605 def _pullbookmarkbundle1(pullop):
1605 def _pullbookmarkbundle1(pullop):
1606 """fetch bookmark data in bundle1 case
1606 """fetch bookmark data in bundle1 case
1607
1607
1608 If not using bundle2, we have to fetch bookmarks before changeset
1608 If not using bundle2, we have to fetch bookmarks before changeset
1609 discovery to reduce the chance and impact of race conditions."""
1609 discovery to reduce the chance and impact of race conditions."""
1610 if pullop.remotebookmarks is not None:
1610 if pullop.remotebookmarks is not None:
1611 return
1611 return
1612 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1612 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1613 # all known bundle2 servers now support listkeys, but lets be nice with
1613 # all known bundle2 servers now support listkeys, but lets be nice with
1614 # new implementation.
1614 # new implementation.
1615 return
1615 return
1616 books = listkeys(pullop.remote, 'bookmarks')
1616 books = listkeys(pullop.remote, 'bookmarks')
1617 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1617 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1618
1618
1619
1619
1620 @pulldiscovery('changegroup')
1620 @pulldiscovery('changegroup')
1621 def _pulldiscoverychangegroup(pullop):
1621 def _pulldiscoverychangegroup(pullop):
1622 """discovery phase for the pull
1622 """discovery phase for the pull
1623
1623
1624 Current handle changeset discovery only, will change handle all discovery
1624 Current handle changeset discovery only, will change handle all discovery
1625 at some point."""
1625 at some point."""
1626 tmp = discovery.findcommonincoming(pullop.repo,
1626 tmp = discovery.findcommonincoming(pullop.repo,
1627 pullop.remote,
1627 pullop.remote,
1628 heads=pullop.heads,
1628 heads=pullop.heads,
1629 force=pullop.force)
1629 force=pullop.force)
1630 common, fetch, rheads = tmp
1630 common, fetch, rheads = tmp
1631 nm = pullop.repo.unfiltered().changelog.nodemap
1631 nm = pullop.repo.unfiltered().changelog.nodemap
1632 if fetch and rheads:
1632 if fetch and rheads:
1633 # If a remote heads is filtered locally, put in back in common.
1633 # If a remote heads is filtered locally, put in back in common.
1634 #
1634 #
1635 # This is a hackish solution to catch most of "common but locally
1635 # This is a hackish solution to catch most of "common but locally
1636 # hidden situation". We do not performs discovery on unfiltered
1636 # hidden situation". We do not performs discovery on unfiltered
1637 # repository because it end up doing a pathological amount of round
1637 # repository because it end up doing a pathological amount of round
1638 # trip for w huge amount of changeset we do not care about.
1638 # trip for w huge amount of changeset we do not care about.
1639 #
1639 #
1640 # If a set of such "common but filtered" changeset exist on the server
1640 # If a set of such "common but filtered" changeset exist on the server
1641 # but are not including a remote heads, we'll not be able to detect it,
1641 # but are not including a remote heads, we'll not be able to detect it,
1642 scommon = set(common)
1642 scommon = set(common)
1643 for n in rheads:
1643 for n in rheads:
1644 if n in nm:
1644 if n in nm:
1645 if n not in scommon:
1645 if n not in scommon:
1646 common.append(n)
1646 common.append(n)
1647 if set(rheads).issubset(set(common)):
1647 if set(rheads).issubset(set(common)):
1648 fetch = []
1648 fetch = []
1649 pullop.common = common
1649 pullop.common = common
1650 pullop.fetch = fetch
1650 pullop.fetch = fetch
1651 pullop.rheads = rheads
1651 pullop.rheads = rheads
1652
1652
1653 def _pullbundle2(pullop):
1653 def _pullbundle2(pullop):
1654 """pull data using bundle2
1654 """pull data using bundle2
1655
1655
1656 For now, the only supported data are changegroup."""
1656 For now, the only supported data are changegroup."""
1657 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1657 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1658
1658
1659 # make ui easier to access
1659 # make ui easier to access
1660 ui = pullop.repo.ui
1660 ui = pullop.repo.ui
1661
1661
1662 # At the moment we don't do stream clones over bundle2. If that is
1662 # At the moment we don't do stream clones over bundle2. If that is
1663 # implemented then here's where the check for that will go.
1663 # implemented then here's where the check for that will go.
1664 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1664 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1665
1665
1666 # declare pull perimeters
1666 # declare pull perimeters
1667 kwargs['common'] = pullop.common
1667 kwargs['common'] = pullop.common
1668 kwargs['heads'] = pullop.heads or pullop.rheads
1668 kwargs['heads'] = pullop.heads or pullop.rheads
1669
1669
1670 # check server supports narrow and then adding includepats and excludepats
1670 # check server supports narrow and then adding includepats and excludepats
1671 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1671 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1672 if servernarrow and pullop.includepats:
1672 if servernarrow and pullop.includepats:
1673 kwargs['includepats'] = pullop.includepats
1673 kwargs['includepats'] = pullop.includepats
1674 if servernarrow and pullop.excludepats:
1674 if servernarrow and pullop.excludepats:
1675 kwargs['excludepats'] = pullop.excludepats
1675 kwargs['excludepats'] = pullop.excludepats
1676
1676
1677 if streaming:
1677 if streaming:
1678 kwargs['cg'] = False
1678 kwargs['cg'] = False
1679 kwargs['stream'] = True
1679 kwargs['stream'] = True
1680 pullop.stepsdone.add('changegroup')
1680 pullop.stepsdone.add('changegroup')
1681 pullop.stepsdone.add('phases')
1681 pullop.stepsdone.add('phases')
1682
1682
1683 else:
1683 else:
1684 # pulling changegroup
1684 # pulling changegroup
1685 pullop.stepsdone.add('changegroup')
1685 pullop.stepsdone.add('changegroup')
1686
1686
1687 kwargs['cg'] = pullop.fetch
1687 kwargs['cg'] = pullop.fetch
1688
1688
1689 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1689 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1690 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1690 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1691 if (not legacyphase and hasbinaryphase):
1691 if (not legacyphase and hasbinaryphase):
1692 kwargs['phases'] = True
1692 kwargs['phases'] = True
1693 pullop.stepsdone.add('phases')
1693 pullop.stepsdone.add('phases')
1694
1694
1695 if 'listkeys' in pullop.remotebundle2caps:
1695 if 'listkeys' in pullop.remotebundle2caps:
1696 if 'phases' not in pullop.stepsdone:
1696 if 'phases' not in pullop.stepsdone:
1697 kwargs['listkeys'] = ['phases']
1697 kwargs['listkeys'] = ['phases']
1698
1698
1699 bookmarksrequested = False
1699 bookmarksrequested = False
1700 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1700 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1701 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1701 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1702
1702
1703 if pullop.remotebookmarks is not None:
1703 if pullop.remotebookmarks is not None:
1704 pullop.stepsdone.add('request-bookmarks')
1704 pullop.stepsdone.add('request-bookmarks')
1705
1705
1706 if ('request-bookmarks' not in pullop.stepsdone
1706 if ('request-bookmarks' not in pullop.stepsdone
1707 and pullop.remotebookmarks is None
1707 and pullop.remotebookmarks is None
1708 and not legacybookmark and hasbinarybook):
1708 and not legacybookmark and hasbinarybook):
1709 kwargs['bookmarks'] = True
1709 kwargs['bookmarks'] = True
1710 bookmarksrequested = True
1710 bookmarksrequested = True
1711
1711
1712 if 'listkeys' in pullop.remotebundle2caps:
1712 if 'listkeys' in pullop.remotebundle2caps:
1713 if 'request-bookmarks' not in pullop.stepsdone:
1713 if 'request-bookmarks' not in pullop.stepsdone:
1714 # make sure to always includes bookmark data when migrating
1714 # make sure to always includes bookmark data when migrating
1715 # `hg incoming --bundle` to using this function.
1715 # `hg incoming --bundle` to using this function.
1716 pullop.stepsdone.add('request-bookmarks')
1716 pullop.stepsdone.add('request-bookmarks')
1717 kwargs.setdefault('listkeys', []).append('bookmarks')
1717 kwargs.setdefault('listkeys', []).append('bookmarks')
1718
1718
1719 # If this is a full pull / clone and the server supports the clone bundles
1719 # If this is a full pull / clone and the server supports the clone bundles
1720 # feature, tell the server whether we attempted a clone bundle. The
1720 # feature, tell the server whether we attempted a clone bundle. The
1721 # presence of this flag indicates the client supports clone bundles. This
1721 # presence of this flag indicates the client supports clone bundles. This
1722 # will enable the server to treat clients that support clone bundles
1722 # will enable the server to treat clients that support clone bundles
1723 # differently from those that don't.
1723 # differently from those that don't.
1724 if (pullop.remote.capable('clonebundles')
1724 if (pullop.remote.capable('clonebundles')
1725 and pullop.heads is None and list(pullop.common) == [nullid]):
1725 and pullop.heads is None and list(pullop.common) == [nullid]):
1726 kwargs['cbattempted'] = pullop.clonebundleattempted
1726 kwargs['cbattempted'] = pullop.clonebundleattempted
1727
1727
1728 if streaming:
1728 if streaming:
1729 pullop.repo.ui.status(_('streaming all changes\n'))
1729 pullop.repo.ui.status(_('streaming all changes\n'))
1730 elif not pullop.fetch:
1730 elif not pullop.fetch:
1731 pullop.repo.ui.status(_("no changes found\n"))
1731 pullop.repo.ui.status(_("no changes found\n"))
1732 pullop.cgresult = 0
1732 pullop.cgresult = 0
1733 else:
1733 else:
1734 if pullop.heads is None and list(pullop.common) == [nullid]:
1734 if pullop.heads is None and list(pullop.common) == [nullid]:
1735 pullop.repo.ui.status(_("requesting all changes\n"))
1735 pullop.repo.ui.status(_("requesting all changes\n"))
1736 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1736 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1737 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1737 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1738 if obsolete.commonversion(remoteversions) is not None:
1738 if obsolete.commonversion(remoteversions) is not None:
1739 kwargs['obsmarkers'] = True
1739 kwargs['obsmarkers'] = True
1740 pullop.stepsdone.add('obsmarkers')
1740 pullop.stepsdone.add('obsmarkers')
1741 _pullbundle2extraprepare(pullop, kwargs)
1741 _pullbundle2extraprepare(pullop, kwargs)
1742
1742
1743 with pullop.remote.commandexecutor() as e:
1743 with pullop.remote.commandexecutor() as e:
1744 args = dict(kwargs)
1744 args = dict(kwargs)
1745 args['source'] = 'pull'
1745 args['source'] = 'pull'
1746 bundle = e.callcommand('getbundle', args).result()
1746 bundle = e.callcommand('getbundle', args).result()
1747
1747
1748 try:
1748 try:
1749 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1749 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1750 source='pull')
1750 source='pull')
1751 op.modes['bookmarks'] = 'records'
1751 op.modes['bookmarks'] = 'records'
1752 bundle2.processbundle(pullop.repo, bundle, op=op)
1752 bundle2.processbundle(pullop.repo, bundle, op=op)
1753 except bundle2.AbortFromPart as exc:
1753 except bundle2.AbortFromPart as exc:
1754 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1754 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1755 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1755 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1756 except error.BundleValueError as exc:
1756 except error.BundleValueError as exc:
1757 raise error.Abort(_('missing support for %s') % exc)
1757 raise error.Abort(_('missing support for %s') % exc)
1758
1758
1759 if pullop.fetch:
1759 if pullop.fetch:
1760 pullop.cgresult = bundle2.combinechangegroupresults(op)
1760 pullop.cgresult = bundle2.combinechangegroupresults(op)
1761
1761
1762 # processing phases change
1762 # processing phases change
1763 for namespace, value in op.records['listkeys']:
1763 for namespace, value in op.records['listkeys']:
1764 if namespace == 'phases':
1764 if namespace == 'phases':
1765 _pullapplyphases(pullop, value)
1765 _pullapplyphases(pullop, value)
1766
1766
1767 # processing bookmark update
1767 # processing bookmark update
1768 if bookmarksrequested:
1768 if bookmarksrequested:
1769 books = {}
1769 books = {}
1770 for record in op.records['bookmarks']:
1770 for record in op.records['bookmarks']:
1771 books[record['bookmark']] = record["node"]
1771 books[record['bookmark']] = record["node"]
1772 pullop.remotebookmarks = books
1772 pullop.remotebookmarks = books
1773 else:
1773 else:
1774 for namespace, value in op.records['listkeys']:
1774 for namespace, value in op.records['listkeys']:
1775 if namespace == 'bookmarks':
1775 if namespace == 'bookmarks':
1776 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1776 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1777
1777
1778 # bookmark data were either already there or pulled in the bundle
1778 # bookmark data were either already there or pulled in the bundle
1779 if pullop.remotebookmarks is not None:
1779 if pullop.remotebookmarks is not None:
1780 _pullbookmarks(pullop)
1780 _pullbookmarks(pullop)
1781
1781
1782 def _pullbundle2extraprepare(pullop, kwargs):
1782 def _pullbundle2extraprepare(pullop, kwargs):
1783 """hook function so that extensions can extend the getbundle call"""
1783 """hook function so that extensions can extend the getbundle call"""
1784
1784
1785 def _pullchangeset(pullop):
1785 def _pullchangeset(pullop):
1786 """pull changeset from unbundle into the local repo"""
1786 """pull changeset from unbundle into the local repo"""
1787 # We delay the open of the transaction as late as possible so we
1787 # We delay the open of the transaction as late as possible so we
1788 # don't open transaction for nothing or you break future useful
1788 # don't open transaction for nothing or you break future useful
1789 # rollback call
1789 # rollback call
1790 if 'changegroup' in pullop.stepsdone:
1790 if 'changegroup' in pullop.stepsdone:
1791 return
1791 return
1792 pullop.stepsdone.add('changegroup')
1792 pullop.stepsdone.add('changegroup')
1793 if not pullop.fetch:
1793 if not pullop.fetch:
1794 pullop.repo.ui.status(_("no changes found\n"))
1794 pullop.repo.ui.status(_("no changes found\n"))
1795 pullop.cgresult = 0
1795 pullop.cgresult = 0
1796 return
1796 return
1797 tr = pullop.gettransaction()
1797 tr = pullop.gettransaction()
1798 if pullop.heads is None and list(pullop.common) == [nullid]:
1798 if pullop.heads is None and list(pullop.common) == [nullid]:
1799 pullop.repo.ui.status(_("requesting all changes\n"))
1799 pullop.repo.ui.status(_("requesting all changes\n"))
1800 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1800 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1801 # issue1320, avoid a race if remote changed after discovery
1801 # issue1320, avoid a race if remote changed after discovery
1802 pullop.heads = pullop.rheads
1802 pullop.heads = pullop.rheads
1803
1803
1804 if pullop.remote.capable('getbundle'):
1804 if pullop.remote.capable('getbundle'):
1805 # TODO: get bundlecaps from remote
1805 # TODO: get bundlecaps from remote
1806 cg = pullop.remote.getbundle('pull', common=pullop.common,
1806 cg = pullop.remote.getbundle('pull', common=pullop.common,
1807 heads=pullop.heads or pullop.rheads)
1807 heads=pullop.heads or pullop.rheads)
1808 elif pullop.heads is None:
1808 elif pullop.heads is None:
1809 with pullop.remote.commandexecutor() as e:
1809 with pullop.remote.commandexecutor() as e:
1810 cg = e.callcommand('changegroup', {
1810 cg = e.callcommand('changegroup', {
1811 'nodes': pullop.fetch,
1811 'nodes': pullop.fetch,
1812 'source': 'pull',
1812 'source': 'pull',
1813 }).result()
1813 }).result()
1814
1814
1815 elif not pullop.remote.capable('changegroupsubset'):
1815 elif not pullop.remote.capable('changegroupsubset'):
1816 raise error.Abort(_("partial pull cannot be done because "
1816 raise error.Abort(_("partial pull cannot be done because "
1817 "other repository doesn't support "
1817 "other repository doesn't support "
1818 "changegroupsubset."))
1818 "changegroupsubset."))
1819 else:
1819 else:
1820 with pullop.remote.commandexecutor() as e:
1820 with pullop.remote.commandexecutor() as e:
1821 cg = e.callcommand('changegroupsubset', {
1821 cg = e.callcommand('changegroupsubset', {
1822 'bases': pullop.fetch,
1822 'bases': pullop.fetch,
1823 'heads': pullop.heads,
1823 'heads': pullop.heads,
1824 'source': 'pull',
1824 'source': 'pull',
1825 }).result()
1825 }).result()
1826
1826
1827 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1827 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1828 pullop.remote.url())
1828 pullop.remote.url())
1829 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1829 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1830
1830
1831 def _pullphase(pullop):
1831 def _pullphase(pullop):
1832 # Get remote phases data from remote
1832 # Get remote phases data from remote
1833 if 'phases' in pullop.stepsdone:
1833 if 'phases' in pullop.stepsdone:
1834 return
1834 return
1835 remotephases = listkeys(pullop.remote, 'phases')
1835 remotephases = listkeys(pullop.remote, 'phases')
1836 _pullapplyphases(pullop, remotephases)
1836 _pullapplyphases(pullop, remotephases)
1837
1837
1838 def _pullapplyphases(pullop, remotephases):
1838 def _pullapplyphases(pullop, remotephases):
1839 """apply phase movement from observed remote state"""
1839 """apply phase movement from observed remote state"""
1840 if 'phases' in pullop.stepsdone:
1840 if 'phases' in pullop.stepsdone:
1841 return
1841 return
1842 pullop.stepsdone.add('phases')
1842 pullop.stepsdone.add('phases')
1843 publishing = bool(remotephases.get('publishing', False))
1843 publishing = bool(remotephases.get('publishing', False))
1844 if remotephases and not publishing:
1844 if remotephases and not publishing:
1845 # remote is new and non-publishing
1845 # remote is new and non-publishing
1846 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1846 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1847 pullop.pulledsubset,
1847 pullop.pulledsubset,
1848 remotephases)
1848 remotephases)
1849 dheads = pullop.pulledsubset
1849 dheads = pullop.pulledsubset
1850 else:
1850 else:
1851 # Remote is old or publishing all common changesets
1851 # Remote is old or publishing all common changesets
1852 # should be seen as public
1852 # should be seen as public
1853 pheads = pullop.pulledsubset
1853 pheads = pullop.pulledsubset
1854 dheads = []
1854 dheads = []
1855 unfi = pullop.repo.unfiltered()
1855 unfi = pullop.repo.unfiltered()
1856 phase = unfi._phasecache.phase
1856 phase = unfi._phasecache.phase
1857 rev = unfi.changelog.nodemap.get
1857 rev = unfi.changelog.nodemap.get
1858 public = phases.public
1858 public = phases.public
1859 draft = phases.draft
1859 draft = phases.draft
1860
1860
1861 # exclude changesets already public locally and update the others
1861 # exclude changesets already public locally and update the others
1862 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1862 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1863 if pheads:
1863 if pheads:
1864 tr = pullop.gettransaction()
1864 tr = pullop.gettransaction()
1865 phases.advanceboundary(pullop.repo, tr, public, pheads)
1865 phases.advanceboundary(pullop.repo, tr, public, pheads)
1866
1866
1867 # exclude changesets already draft locally and update the others
1867 # exclude changesets already draft locally and update the others
1868 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1868 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1869 if dheads:
1869 if dheads:
1870 tr = pullop.gettransaction()
1870 tr = pullop.gettransaction()
1871 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1871 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1872
1872
1873 def _pullbookmarks(pullop):
1873 def _pullbookmarks(pullop):
1874 """process the remote bookmark information to update the local one"""
1874 """process the remote bookmark information to update the local one"""
1875 if 'bookmarks' in pullop.stepsdone:
1875 if 'bookmarks' in pullop.stepsdone:
1876 return
1876 return
1877 pullop.stepsdone.add('bookmarks')
1877 pullop.stepsdone.add('bookmarks')
1878 repo = pullop.repo
1878 repo = pullop.repo
1879 remotebookmarks = pullop.remotebookmarks
1879 remotebookmarks = pullop.remotebookmarks
1880 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1880 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1881 pullop.remote.url(),
1881 pullop.remote.url(),
1882 pullop.gettransaction,
1882 pullop.gettransaction,
1883 explicit=pullop.explicitbookmarks)
1883 explicit=pullop.explicitbookmarks)
1884
1884
1885 def _pullobsolete(pullop):
1885 def _pullobsolete(pullop):
1886 """utility function to pull obsolete markers from a remote
1886 """utility function to pull obsolete markers from a remote
1887
1887
1888 The `gettransaction` is function that return the pull transaction, creating
1888 The `gettransaction` is function that return the pull transaction, creating
1889 one if necessary. We return the transaction to inform the calling code that
1889 one if necessary. We return the transaction to inform the calling code that
1890 a new transaction have been created (when applicable).
1890 a new transaction have been created (when applicable).
1891
1891
1892 Exists mostly to allow overriding for experimentation purpose"""
1892 Exists mostly to allow overriding for experimentation purpose"""
1893 if 'obsmarkers' in pullop.stepsdone:
1893 if 'obsmarkers' in pullop.stepsdone:
1894 return
1894 return
1895 pullop.stepsdone.add('obsmarkers')
1895 pullop.stepsdone.add('obsmarkers')
1896 tr = None
1896 tr = None
1897 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1897 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1898 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1898 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1899 remoteobs = listkeys(pullop.remote, 'obsolete')
1899 remoteobs = listkeys(pullop.remote, 'obsolete')
1900 if 'dump0' in remoteobs:
1900 if 'dump0' in remoteobs:
1901 tr = pullop.gettransaction()
1901 tr = pullop.gettransaction()
1902 markers = []
1902 markers = []
1903 for key in sorted(remoteobs, reverse=True):
1903 for key in sorted(remoteobs, reverse=True):
1904 if key.startswith('dump'):
1904 if key.startswith('dump'):
1905 data = util.b85decode(remoteobs[key])
1905 data = util.b85decode(remoteobs[key])
1906 version, newmarks = obsolete._readmarkers(data)
1906 version, newmarks = obsolete._readmarkers(data)
1907 markers += newmarks
1907 markers += newmarks
1908 if markers:
1908 if markers:
1909 pullop.repo.obsstore.add(tr, markers)
1909 pullop.repo.obsstore.add(tr, markers)
1910 pullop.repo.invalidatevolatilesets()
1910 pullop.repo.invalidatevolatilesets()
1911 return tr
1911 return tr
1912
1912
1913 def applynarrowacl(repo, kwargs):
1913 def applynarrowacl(repo, kwargs):
1914 """Apply narrow fetch access control.
1914 """Apply narrow fetch access control.
1915
1915
1916 This massages the named arguments for getbundle wire protocol commands
1916 This massages the named arguments for getbundle wire protocol commands
1917 so requested data is filtered through access control rules.
1917 so requested data is filtered through access control rules.
1918 """
1918 """
1919 ui = repo.ui
1919 ui = repo.ui
1920 # TODO this assumes existence of HTTP and is a layering violation.
1920 # TODO this assumes existence of HTTP and is a layering violation.
1921 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1921 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1922 user_includes = ui.configlist(
1922 user_includes = ui.configlist(
1923 _NARROWACL_SECTION, username + '.includes',
1923 _NARROWACL_SECTION, username + '.includes',
1924 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1924 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1925 user_excludes = ui.configlist(
1925 user_excludes = ui.configlist(
1926 _NARROWACL_SECTION, username + '.excludes',
1926 _NARROWACL_SECTION, username + '.excludes',
1927 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1927 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1928 if not user_includes:
1928 if not user_includes:
1929 raise error.Abort(_("{} configuration for user {} is empty")
1929 raise error.Abort(_("{} configuration for user {} is empty")
1930 .format(_NARROWACL_SECTION, username))
1930 .format(_NARROWACL_SECTION, username))
1931
1931
1932 user_includes = [
1932 user_includes = [
1933 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1933 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1934 user_excludes = [
1934 user_excludes = [
1935 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1935 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1936
1936
1937 req_includes = set(kwargs.get(r'includepats', []))
1937 req_includes = set(kwargs.get(r'includepats', []))
1938 req_excludes = set(kwargs.get(r'excludepats', []))
1938 req_excludes = set(kwargs.get(r'excludepats', []))
1939
1939
1940 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1940 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1941 req_includes, req_excludes, user_includes, user_excludes)
1941 req_includes, req_excludes, user_includes, user_excludes)
1942
1942
1943 if invalid_includes:
1943 if invalid_includes:
1944 raise error.Abort(
1944 raise error.Abort(
1945 _("The following includes are not accessible for {}: {}")
1945 _("The following includes are not accessible for {}: {}")
1946 .format(username, invalid_includes))
1946 .format(username, invalid_includes))
1947
1947
1948 new_args = {}
1948 new_args = {}
1949 new_args.update(kwargs)
1949 new_args.update(kwargs)
1950 new_args[r'narrow'] = True
1950 new_args[r'narrow'] = True
1951 new_args[r'narrow_acl'] = True
1951 new_args[r'narrow_acl'] = True
1952 new_args[r'includepats'] = req_includes
1952 new_args[r'includepats'] = req_includes
1953 if req_excludes:
1953 if req_excludes:
1954 new_args[r'excludepats'] = req_excludes
1954 new_args[r'excludepats'] = req_excludes
1955
1955
1956 return new_args
1956 return new_args
1957
1957
1958 def _computeellipsis(repo, common, heads, known, match, depth=None):
1958 def _computeellipsis(repo, common, heads, known, match, depth=None):
1959 """Compute the shape of a narrowed DAG.
1959 """Compute the shape of a narrowed DAG.
1960
1960
1961 Args:
1961 Args:
1962 repo: The repository we're transferring.
1962 repo: The repository we're transferring.
1963 common: The roots of the DAG range we're transferring.
1963 common: The roots of the DAG range we're transferring.
1964 May be just [nullid], which means all ancestors of heads.
1964 May be just [nullid], which means all ancestors of heads.
1965 heads: The heads of the DAG range we're transferring.
1965 heads: The heads of the DAG range we're transferring.
1966 match: The narrowmatcher that allows us to identify relevant changes.
1966 match: The narrowmatcher that allows us to identify relevant changes.
1967 depth: If not None, only consider nodes to be full nodes if they are at
1967 depth: If not None, only consider nodes to be full nodes if they are at
1968 most depth changesets away from one of heads.
1968 most depth changesets away from one of heads.
1969
1969
1970 Returns:
1970 Returns:
1971 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1971 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1972
1972
1973 visitnodes: The list of nodes (either full or ellipsis) which
1973 visitnodes: The list of nodes (either full or ellipsis) which
1974 need to be sent to the client.
1974 need to be sent to the client.
1975 relevant_nodes: The set of changelog nodes which change a file inside
1975 relevant_nodes: The set of changelog nodes which change a file inside
1976 the narrowspec. The client needs these as non-ellipsis nodes.
1976 the narrowspec. The client needs these as non-ellipsis nodes.
1977 ellipsisroots: A dict of {rev: parents} that is used in
1977 ellipsisroots: A dict of {rev: parents} that is used in
1978 narrowchangegroup to produce ellipsis nodes with the
1978 narrowchangegroup to produce ellipsis nodes with the
1979 correct parents.
1979 correct parents.
1980 """
1980 """
1981 cl = repo.changelog
1981 cl = repo.changelog
1982 mfl = repo.manifestlog
1982 mfl = repo.manifestlog
1983
1983
1984 clrev = cl.rev
1984 clrev = cl.rev
1985
1985
1986 commonrevs = {clrev(n) for n in common} | {nullrev}
1986 commonrevs = {clrev(n) for n in common} | {nullrev}
1987 headsrevs = {clrev(n) for n in heads}
1987 headsrevs = {clrev(n) for n in heads}
1988
1988
1989 if depth:
1989 if depth:
1990 revdepth = {h: 0 for h in headsrevs}
1990 revdepth = {h: 0 for h in headsrevs}
1991
1991
1992 ellipsisheads = collections.defaultdict(set)
1992 ellipsisheads = collections.defaultdict(set)
1993 ellipsisroots = collections.defaultdict(set)
1993 ellipsisroots = collections.defaultdict(set)
1994
1994
1995 def addroot(head, curchange):
1995 def addroot(head, curchange):
1996 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1996 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1997 ellipsisroots[head].add(curchange)
1997 ellipsisroots[head].add(curchange)
1998 # Recursively split ellipsis heads with 3 roots by finding the
1998 # Recursively split ellipsis heads with 3 roots by finding the
1999 # roots' youngest common descendant which is an elided merge commit.
1999 # roots' youngest common descendant which is an elided merge commit.
2000 # That descendant takes 2 of the 3 roots as its own, and becomes a
2000 # That descendant takes 2 of the 3 roots as its own, and becomes a
2001 # root of the head.
2001 # root of the head.
2002 while len(ellipsisroots[head]) > 2:
2002 while len(ellipsisroots[head]) > 2:
2003 child, roots = splithead(head)
2003 child, roots = splithead(head)
2004 splitroots(head, child, roots)
2004 splitroots(head, child, roots)
2005 head = child # Recurse in case we just added a 3rd root
2005 head = child # Recurse in case we just added a 3rd root
2006
2006
2007 def splitroots(head, child, roots):
2007 def splitroots(head, child, roots):
2008 ellipsisroots[head].difference_update(roots)
2008 ellipsisroots[head].difference_update(roots)
2009 ellipsisroots[head].add(child)
2009 ellipsisroots[head].add(child)
2010 ellipsisroots[child].update(roots)
2010 ellipsisroots[child].update(roots)
2011 ellipsisroots[child].discard(child)
2011 ellipsisroots[child].discard(child)
2012
2012
2013 def splithead(head):
2013 def splithead(head):
2014 r1, r2, r3 = sorted(ellipsisroots[head])
2014 r1, r2, r3 = sorted(ellipsisroots[head])
2015 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2015 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2016 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
2016 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
2017 nr1, head, nr2, head)
2017 nr1, head, nr2, head)
2018 for j in mid:
2018 for j in mid:
2019 if j == nr2:
2019 if j == nr2:
2020 return nr2, (nr1, nr2)
2020 return nr2, (nr1, nr2)
2021 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2021 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2022 return j, (nr1, nr2)
2022 return j, (nr1, nr2)
2023 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
2023 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
2024 'roots: %d %d %d') % (head, r1, r2, r3))
2024 'roots: %d %d %d') % (head, r1, r2, r3))
2025
2025
2026 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2026 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2027 visit = reversed(missing)
2027 visit = reversed(missing)
2028 relevant_nodes = set()
2028 relevant_nodes = set()
2029 visitnodes = [cl.node(m) for m in missing]
2029 visitnodes = [cl.node(m) for m in missing]
2030 required = set(headsrevs) | known
2030 required = set(headsrevs) | known
2031 for rev in visit:
2031 for rev in visit:
2032 clrev = cl.changelogrevision(rev)
2032 clrev = cl.changelogrevision(rev)
2033 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2033 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2034 if depth is not None:
2034 if depth is not None:
2035 curdepth = revdepth[rev]
2035 curdepth = revdepth[rev]
2036 for p in ps:
2036 for p in ps:
2037 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2037 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2038 needed = False
2038 needed = False
2039 shallow_enough = depth is None or revdepth[rev] <= depth
2039 shallow_enough = depth is None or revdepth[rev] <= depth
2040 if shallow_enough:
2040 if shallow_enough:
2041 curmf = mfl[clrev.manifest].read()
2041 curmf = mfl[clrev.manifest].read()
2042 if ps:
2042 if ps:
2043 # We choose to not trust the changed files list in
2043 # We choose to not trust the changed files list in
2044 # changesets because it's not always correct. TODO: could
2044 # changesets because it's not always correct. TODO: could
2045 # we trust it for the non-merge case?
2045 # we trust it for the non-merge case?
2046 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2046 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2047 needed = bool(curmf.diff(p1mf, match))
2047 needed = bool(curmf.diff(p1mf, match))
2048 if not needed and len(ps) > 1:
2048 if not needed and len(ps) > 1:
2049 # For merge changes, the list of changed files is not
2049 # For merge changes, the list of changed files is not
2050 # helpful, since we need to emit the merge if a file
2050 # helpful, since we need to emit the merge if a file
2051 # in the narrow spec has changed on either side of the
2051 # in the narrow spec has changed on either side of the
2052 # merge. As a result, we do a manifest diff to check.
2052 # merge. As a result, we do a manifest diff to check.
2053 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2053 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2054 needed = bool(curmf.diff(p2mf, match))
2054 needed = bool(curmf.diff(p2mf, match))
2055 else:
2055 else:
2056 # For a root node, we need to include the node if any
2056 # For a root node, we need to include the node if any
2057 # files in the node match the narrowspec.
2057 # files in the node match the narrowspec.
2058 needed = any(curmf.walk(match))
2058 needed = any(curmf.walk(match))
2059
2059
2060 if needed:
2060 if needed:
2061 for head in ellipsisheads[rev]:
2061 for head in ellipsisheads[rev]:
2062 addroot(head, rev)
2062 addroot(head, rev)
2063 for p in ps:
2063 for p in ps:
2064 required.add(p)
2064 required.add(p)
2065 relevant_nodes.add(cl.node(rev))
2065 relevant_nodes.add(cl.node(rev))
2066 else:
2066 else:
2067 if not ps:
2067 if not ps:
2068 ps = [nullrev]
2068 ps = [nullrev]
2069 if rev in required:
2069 if rev in required:
2070 for head in ellipsisheads[rev]:
2070 for head in ellipsisheads[rev]:
2071 addroot(head, rev)
2071 addroot(head, rev)
2072 for p in ps:
2072 for p in ps:
2073 ellipsisheads[p].add(rev)
2073 ellipsisheads[p].add(rev)
2074 else:
2074 else:
2075 for p in ps:
2075 for p in ps:
2076 ellipsisheads[p] |= ellipsisheads[rev]
2076 ellipsisheads[p] |= ellipsisheads[rev]
2077
2077
2078 # add common changesets as roots of their reachable ellipsis heads
2078 # add common changesets as roots of their reachable ellipsis heads
2079 for c in commonrevs:
2079 for c in commonrevs:
2080 for head in ellipsisheads[c]:
2080 for head in ellipsisheads[c]:
2081 addroot(head, c)
2081 addroot(head, c)
2082 return visitnodes, relevant_nodes, ellipsisroots
2082 return visitnodes, relevant_nodes, ellipsisroots
2083
2083
2084 def caps20to10(repo, role):
2084 def caps20to10(repo, role):
2085 """return a set with appropriate options to use bundle20 during getbundle"""
2085 """return a set with appropriate options to use bundle20 during getbundle"""
2086 caps = {'HG20'}
2086 caps = {'HG20'}
2087 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2087 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2088 caps.add('bundle2=' + urlreq.quote(capsblob))
2088 caps.add('bundle2=' + urlreq.quote(capsblob))
2089 return caps
2089 return caps
2090
2090
2091 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2091 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2092 getbundle2partsorder = []
2092 getbundle2partsorder = []
2093
2093
2094 # Mapping between step name and function
2094 # Mapping between step name and function
2095 #
2095 #
2096 # This exists to help extensions wrap steps if necessary
2096 # This exists to help extensions wrap steps if necessary
2097 getbundle2partsmapping = {}
2097 getbundle2partsmapping = {}
2098
2098
2099 def getbundle2partsgenerator(stepname, idx=None):
2099 def getbundle2partsgenerator(stepname, idx=None):
2100 """decorator for function generating bundle2 part for getbundle
2100 """decorator for function generating bundle2 part for getbundle
2101
2101
2102 The function is added to the step -> function mapping and appended to the
2102 The function is added to the step -> function mapping and appended to the
2103 list of steps. Beware that decorated functions will be added in order
2103 list of steps. Beware that decorated functions will be added in order
2104 (this may matter).
2104 (this may matter).
2105
2105
2106 You can only use this decorator for new steps, if you want to wrap a step
2106 You can only use this decorator for new steps, if you want to wrap a step
2107 from an extension, attack the getbundle2partsmapping dictionary directly."""
2107 from an extension, attack the getbundle2partsmapping dictionary directly."""
2108 def dec(func):
2108 def dec(func):
2109 assert stepname not in getbundle2partsmapping
2109 assert stepname not in getbundle2partsmapping
2110 getbundle2partsmapping[stepname] = func
2110 getbundle2partsmapping[stepname] = func
2111 if idx is None:
2111 if idx is None:
2112 getbundle2partsorder.append(stepname)
2112 getbundle2partsorder.append(stepname)
2113 else:
2113 else:
2114 getbundle2partsorder.insert(idx, stepname)
2114 getbundle2partsorder.insert(idx, stepname)
2115 return func
2115 return func
2116 return dec
2116 return dec
2117
2117
2118 def bundle2requested(bundlecaps):
2118 def bundle2requested(bundlecaps):
2119 if bundlecaps is not None:
2119 if bundlecaps is not None:
2120 return any(cap.startswith('HG2') for cap in bundlecaps)
2120 return any(cap.startswith('HG2') for cap in bundlecaps)
2121 return False
2121 return False
2122
2122
2123 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2123 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2124 **kwargs):
2124 **kwargs):
2125 """Return chunks constituting a bundle's raw data.
2125 """Return chunks constituting a bundle's raw data.
2126
2126
2127 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2127 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2128 passed.
2128 passed.
2129
2129
2130 Returns a 2-tuple of a dict with metadata about the generated bundle
2130 Returns a 2-tuple of a dict with metadata about the generated bundle
2131 and an iterator over raw chunks (of varying sizes).
2131 and an iterator over raw chunks (of varying sizes).
2132 """
2132 """
2133 kwargs = pycompat.byteskwargs(kwargs)
2133 kwargs = pycompat.byteskwargs(kwargs)
2134 info = {}
2134 info = {}
2135 usebundle2 = bundle2requested(bundlecaps)
2135 usebundle2 = bundle2requested(bundlecaps)
2136 # bundle10 case
2136 # bundle10 case
2137 if not usebundle2:
2137 if not usebundle2:
2138 if bundlecaps and not kwargs.get('cg', True):
2138 if bundlecaps and not kwargs.get('cg', True):
2139 raise ValueError(_('request for bundle10 must include changegroup'))
2139 raise ValueError(_('request for bundle10 must include changegroup'))
2140
2140
2141 if kwargs:
2141 if kwargs:
2142 raise ValueError(_('unsupported getbundle arguments: %s')
2142 raise ValueError(_('unsupported getbundle arguments: %s')
2143 % ', '.join(sorted(kwargs.keys())))
2143 % ', '.join(sorted(kwargs.keys())))
2144 outgoing = _computeoutgoing(repo, heads, common)
2144 outgoing = _computeoutgoing(repo, heads, common)
2145 info['bundleversion'] = 1
2145 info['bundleversion'] = 1
2146 return info, changegroup.makestream(repo, outgoing, '01', source,
2146 return info, changegroup.makestream(repo, outgoing, '01', source,
2147 bundlecaps=bundlecaps)
2147 bundlecaps=bundlecaps)
2148
2148
2149 # bundle20 case
2149 # bundle20 case
2150 info['bundleversion'] = 2
2150 info['bundleversion'] = 2
2151 b2caps = {}
2151 b2caps = {}
2152 for bcaps in bundlecaps:
2152 for bcaps in bundlecaps:
2153 if bcaps.startswith('bundle2='):
2153 if bcaps.startswith('bundle2='):
2154 blob = urlreq.unquote(bcaps[len('bundle2='):])
2154 blob = urlreq.unquote(bcaps[len('bundle2='):])
2155 b2caps.update(bundle2.decodecaps(blob))
2155 b2caps.update(bundle2.decodecaps(blob))
2156 bundler = bundle2.bundle20(repo.ui, b2caps)
2156 bundler = bundle2.bundle20(repo.ui, b2caps)
2157
2157
2158 kwargs['heads'] = heads
2158 kwargs['heads'] = heads
2159 kwargs['common'] = common
2159 kwargs['common'] = common
2160
2160
2161 for name in getbundle2partsorder:
2161 for name in getbundle2partsorder:
2162 func = getbundle2partsmapping[name]
2162 func = getbundle2partsmapping[name]
2163 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2163 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2164 **pycompat.strkwargs(kwargs))
2164 **pycompat.strkwargs(kwargs))
2165
2165
2166 info['prefercompressed'] = bundler.prefercompressed
2166 info['prefercompressed'] = bundler.prefercompressed
2167
2167
2168 return info, bundler.getchunks()
2168 return info, bundler.getchunks()
2169
2169
2170 @getbundle2partsgenerator('stream2')
2170 @getbundle2partsgenerator('stream2')
2171 def _getbundlestream2(bundler, repo, *args, **kwargs):
2171 def _getbundlestream2(bundler, repo, *args, **kwargs):
2172 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2172 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2173
2173
2174 @getbundle2partsgenerator('changegroup')
2174 @getbundle2partsgenerator('changegroup')
2175 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2175 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2176 b2caps=None, heads=None, common=None, **kwargs):
2176 b2caps=None, heads=None, common=None, **kwargs):
2177 """add a changegroup part to the requested bundle"""
2177 """add a changegroup part to the requested bundle"""
2178 if not kwargs.get(r'cg', True):
2178 if not kwargs.get(r'cg', True):
2179 return
2179 return
2180
2180
2181 version = '01'
2181 version = '01'
2182 cgversions = b2caps.get('changegroup')
2182 cgversions = b2caps.get('changegroup')
2183 if cgversions: # 3.1 and 3.2 ship with an empty value
2183 if cgversions: # 3.1 and 3.2 ship with an empty value
2184 cgversions = [v for v in cgversions
2184 cgversions = [v for v in cgversions
2185 if v in changegroup.supportedoutgoingversions(repo)]
2185 if v in changegroup.supportedoutgoingversions(repo)]
2186 if not cgversions:
2186 if not cgversions:
2187 raise error.Abort(_('no common changegroup version'))
2187 raise error.Abort(_('no common changegroup version'))
2188 version = max(cgversions)
2188 version = max(cgversions)
2189
2189
2190 outgoing = _computeoutgoing(repo, heads, common)
2190 outgoing = _computeoutgoing(repo, heads, common)
2191 if not outgoing.missing:
2191 if not outgoing.missing:
2192 return
2192 return
2193
2193
2194 if kwargs.get(r'narrow', False):
2194 if kwargs.get(r'narrow', False):
2195 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2195 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2196 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2196 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2197 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2197 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2198 else:
2198 else:
2199 matcher = None
2199 matcher = None
2200
2200
2201 cgstream = changegroup.makestream(repo, outgoing, version, source,
2201 cgstream = changegroup.makestream(repo, outgoing, version, source,
2202 bundlecaps=bundlecaps, matcher=matcher)
2202 bundlecaps=bundlecaps, matcher=matcher)
2203
2203
2204 part = bundler.newpart('changegroup', data=cgstream)
2204 part = bundler.newpart('changegroup', data=cgstream)
2205 if cgversions:
2205 if cgversions:
2206 part.addparam('version', version)
2206 part.addparam('version', version)
2207
2207
2208 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2208 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2209 mandatory=False)
2209 mandatory=False)
2210
2210
2211 if 'treemanifest' in repo.requirements:
2211 if 'treemanifest' in repo.requirements:
2212 part.addparam('treemanifest', '1')
2212 part.addparam('treemanifest', '1')
2213
2213
2214 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2214 if (kwargs.get(r'narrow', False) and kwargs.get(r'narrow_acl', False)
2215 and (include or exclude)):
2215 and (include or exclude)):
2216 narrowspecpart = bundler.newpart('narrow:spec')
2216 narrowspecpart = bundler.newpart('narrow:spec')
2217 data = ''
2217 if include:
2218 if include:
2218 narrowspecpart.addparam(
2219 data += '\n'.join(include)
2219 'include', '\n'.join(include), mandatory=True)
2220 data += '\0'
2220 if exclude:
2221 if exclude:
2221 narrowspecpart.addparam(
2222 data += '\n'.join(exclude)
2222 'exclude', '\n'.join(exclude), mandatory=True)
2223
2224 narrowspecpart.data = data
2223
2225
2224 @getbundle2partsgenerator('bookmarks')
2226 @getbundle2partsgenerator('bookmarks')
2225 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2227 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2226 b2caps=None, **kwargs):
2228 b2caps=None, **kwargs):
2227 """add a bookmark part to the requested bundle"""
2229 """add a bookmark part to the requested bundle"""
2228 if not kwargs.get(r'bookmarks', False):
2230 if not kwargs.get(r'bookmarks', False):
2229 return
2231 return
2230 if 'bookmarks' not in b2caps:
2232 if 'bookmarks' not in b2caps:
2231 raise error.Abort(_('no common bookmarks exchange method'))
2233 raise error.Abort(_('no common bookmarks exchange method'))
2232 books = bookmod.listbinbookmarks(repo)
2234 books = bookmod.listbinbookmarks(repo)
2233 data = bookmod.binaryencode(books)
2235 data = bookmod.binaryencode(books)
2234 if data:
2236 if data:
2235 bundler.newpart('bookmarks', data=data)
2237 bundler.newpart('bookmarks', data=data)
2236
2238
2237 @getbundle2partsgenerator('listkeys')
2239 @getbundle2partsgenerator('listkeys')
2238 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2240 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2239 b2caps=None, **kwargs):
2241 b2caps=None, **kwargs):
2240 """add parts containing listkeys namespaces to the requested bundle"""
2242 """add parts containing listkeys namespaces to the requested bundle"""
2241 listkeys = kwargs.get(r'listkeys', ())
2243 listkeys = kwargs.get(r'listkeys', ())
2242 for namespace in listkeys:
2244 for namespace in listkeys:
2243 part = bundler.newpart('listkeys')
2245 part = bundler.newpart('listkeys')
2244 part.addparam('namespace', namespace)
2246 part.addparam('namespace', namespace)
2245 keys = repo.listkeys(namespace).items()
2247 keys = repo.listkeys(namespace).items()
2246 part.data = pushkey.encodekeys(keys)
2248 part.data = pushkey.encodekeys(keys)
2247
2249
2248 @getbundle2partsgenerator('obsmarkers')
2250 @getbundle2partsgenerator('obsmarkers')
2249 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2251 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2250 b2caps=None, heads=None, **kwargs):
2252 b2caps=None, heads=None, **kwargs):
2251 """add an obsolescence markers part to the requested bundle"""
2253 """add an obsolescence markers part to the requested bundle"""
2252 if kwargs.get(r'obsmarkers', False):
2254 if kwargs.get(r'obsmarkers', False):
2253 if heads is None:
2255 if heads is None:
2254 heads = repo.heads()
2256 heads = repo.heads()
2255 subset = [c.node() for c in repo.set('::%ln', heads)]
2257 subset = [c.node() for c in repo.set('::%ln', heads)]
2256 markers = repo.obsstore.relevantmarkers(subset)
2258 markers = repo.obsstore.relevantmarkers(subset)
2257 markers = sorted(markers)
2259 markers = sorted(markers)
2258 bundle2.buildobsmarkerspart(bundler, markers)
2260 bundle2.buildobsmarkerspart(bundler, markers)
2259
2261
2260 @getbundle2partsgenerator('phases')
2262 @getbundle2partsgenerator('phases')
2261 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2263 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2262 b2caps=None, heads=None, **kwargs):
2264 b2caps=None, heads=None, **kwargs):
2263 """add phase heads part to the requested bundle"""
2265 """add phase heads part to the requested bundle"""
2264 if kwargs.get(r'phases', False):
2266 if kwargs.get(r'phases', False):
2265 if not 'heads' in b2caps.get('phases'):
2267 if not 'heads' in b2caps.get('phases'):
2266 raise error.Abort(_('no common phases exchange method'))
2268 raise error.Abort(_('no common phases exchange method'))
2267 if heads is None:
2269 if heads is None:
2268 heads = repo.heads()
2270 heads = repo.heads()
2269
2271
2270 headsbyphase = collections.defaultdict(set)
2272 headsbyphase = collections.defaultdict(set)
2271 if repo.publishing():
2273 if repo.publishing():
2272 headsbyphase[phases.public] = heads
2274 headsbyphase[phases.public] = heads
2273 else:
2275 else:
2274 # find the appropriate heads to move
2276 # find the appropriate heads to move
2275
2277
2276 phase = repo._phasecache.phase
2278 phase = repo._phasecache.phase
2277 node = repo.changelog.node
2279 node = repo.changelog.node
2278 rev = repo.changelog.rev
2280 rev = repo.changelog.rev
2279 for h in heads:
2281 for h in heads:
2280 headsbyphase[phase(repo, rev(h))].add(h)
2282 headsbyphase[phase(repo, rev(h))].add(h)
2281 seenphases = list(headsbyphase.keys())
2283 seenphases = list(headsbyphase.keys())
2282
2284
2283 # We do not handle anything but public and draft phase for now)
2285 # We do not handle anything but public and draft phase for now)
2284 if seenphases:
2286 if seenphases:
2285 assert max(seenphases) <= phases.draft
2287 assert max(seenphases) <= phases.draft
2286
2288
2287 # if client is pulling non-public changesets, we need to find
2289 # if client is pulling non-public changesets, we need to find
2288 # intermediate public heads.
2290 # intermediate public heads.
2289 draftheads = headsbyphase.get(phases.draft, set())
2291 draftheads = headsbyphase.get(phases.draft, set())
2290 if draftheads:
2292 if draftheads:
2291 publicheads = headsbyphase.get(phases.public, set())
2293 publicheads = headsbyphase.get(phases.public, set())
2292
2294
2293 revset = 'heads(only(%ln, %ln) and public())'
2295 revset = 'heads(only(%ln, %ln) and public())'
2294 extraheads = repo.revs(revset, draftheads, publicheads)
2296 extraheads = repo.revs(revset, draftheads, publicheads)
2295 for r in extraheads:
2297 for r in extraheads:
2296 headsbyphase[phases.public].add(node(r))
2298 headsbyphase[phases.public].add(node(r))
2297
2299
2298 # transform data in a format used by the encoding function
2300 # transform data in a format used by the encoding function
2299 phasemapping = []
2301 phasemapping = []
2300 for phase in phases.allphases:
2302 for phase in phases.allphases:
2301 phasemapping.append(sorted(headsbyphase[phase]))
2303 phasemapping.append(sorted(headsbyphase[phase]))
2302
2304
2303 # generate the actual part
2305 # generate the actual part
2304 phasedata = phases.binaryencode(phasemapping)
2306 phasedata = phases.binaryencode(phasemapping)
2305 bundler.newpart('phase-heads', data=phasedata)
2307 bundler.newpart('phase-heads', data=phasedata)
2306
2308
2307 @getbundle2partsgenerator('hgtagsfnodes')
2309 @getbundle2partsgenerator('hgtagsfnodes')
2308 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2310 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2309 b2caps=None, heads=None, common=None,
2311 b2caps=None, heads=None, common=None,
2310 **kwargs):
2312 **kwargs):
2311 """Transfer the .hgtags filenodes mapping.
2313 """Transfer the .hgtags filenodes mapping.
2312
2314
2313 Only values for heads in this bundle will be transferred.
2315 Only values for heads in this bundle will be transferred.
2314
2316
2315 The part data consists of pairs of 20 byte changeset node and .hgtags
2317 The part data consists of pairs of 20 byte changeset node and .hgtags
2316 filenodes raw values.
2318 filenodes raw values.
2317 """
2319 """
2318 # Don't send unless:
2320 # Don't send unless:
2319 # - changeset are being exchanged,
2321 # - changeset are being exchanged,
2320 # - the client supports it.
2322 # - the client supports it.
2321 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2323 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2322 return
2324 return
2323
2325
2324 outgoing = _computeoutgoing(repo, heads, common)
2326 outgoing = _computeoutgoing(repo, heads, common)
2325 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2327 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2326
2328
2327 @getbundle2partsgenerator('cache:rev-branch-cache')
2329 @getbundle2partsgenerator('cache:rev-branch-cache')
2328 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2330 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2329 b2caps=None, heads=None, common=None,
2331 b2caps=None, heads=None, common=None,
2330 **kwargs):
2332 **kwargs):
2331 """Transfer the rev-branch-cache mapping
2333 """Transfer the rev-branch-cache mapping
2332
2334
2333 The payload is a series of data related to each branch
2335 The payload is a series of data related to each branch
2334
2336
2335 1) branch name length
2337 1) branch name length
2336 2) number of open heads
2338 2) number of open heads
2337 3) number of closed heads
2339 3) number of closed heads
2338 4) open heads nodes
2340 4) open heads nodes
2339 5) closed heads nodes
2341 5) closed heads nodes
2340 """
2342 """
2341 # Don't send unless:
2343 # Don't send unless:
2342 # - changeset are being exchanged,
2344 # - changeset are being exchanged,
2343 # - the client supports it.
2345 # - the client supports it.
2344 # - narrow bundle isn't in play (not currently compatible).
2346 # - narrow bundle isn't in play (not currently compatible).
2345 if (not kwargs.get(r'cg', True)
2347 if (not kwargs.get(r'cg', True)
2346 or 'rev-branch-cache' not in b2caps
2348 or 'rev-branch-cache' not in b2caps
2347 or kwargs.get(r'narrow', False)
2349 or kwargs.get(r'narrow', False)
2348 or repo.ui.has_section(_NARROWACL_SECTION)):
2350 or repo.ui.has_section(_NARROWACL_SECTION)):
2349 return
2351 return
2350
2352
2351 outgoing = _computeoutgoing(repo, heads, common)
2353 outgoing = _computeoutgoing(repo, heads, common)
2352 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2354 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2353
2355
2354 def check_heads(repo, their_heads, context):
2356 def check_heads(repo, their_heads, context):
2355 """check if the heads of a repo have been modified
2357 """check if the heads of a repo have been modified
2356
2358
2357 Used by peer for unbundling.
2359 Used by peer for unbundling.
2358 """
2360 """
2359 heads = repo.heads()
2361 heads = repo.heads()
2360 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2362 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2361 if not (their_heads == ['force'] or their_heads == heads or
2363 if not (their_heads == ['force'] or their_heads == heads or
2362 their_heads == ['hashed', heads_hash]):
2364 their_heads == ['hashed', heads_hash]):
2363 # someone else committed/pushed/unbundled while we
2365 # someone else committed/pushed/unbundled while we
2364 # were transferring data
2366 # were transferring data
2365 raise error.PushRaced('repository changed while %s - '
2367 raise error.PushRaced('repository changed while %s - '
2366 'please try again' % context)
2368 'please try again' % context)
2367
2369
2368 def unbundle(repo, cg, heads, source, url):
2370 def unbundle(repo, cg, heads, source, url):
2369 """Apply a bundle to a repo.
2371 """Apply a bundle to a repo.
2370
2372
2371 this function makes sure the repo is locked during the application and have
2373 this function makes sure the repo is locked during the application and have
2372 mechanism to check that no push race occurred between the creation of the
2374 mechanism to check that no push race occurred between the creation of the
2373 bundle and its application.
2375 bundle and its application.
2374
2376
2375 If the push was raced as PushRaced exception is raised."""
2377 If the push was raced as PushRaced exception is raised."""
2376 r = 0
2378 r = 0
2377 # need a transaction when processing a bundle2 stream
2379 # need a transaction when processing a bundle2 stream
2378 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2380 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2379 lockandtr = [None, None, None]
2381 lockandtr = [None, None, None]
2380 recordout = None
2382 recordout = None
2381 # quick fix for output mismatch with bundle2 in 3.4
2383 # quick fix for output mismatch with bundle2 in 3.4
2382 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2384 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2383 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2385 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2384 captureoutput = True
2386 captureoutput = True
2385 try:
2387 try:
2386 # note: outside bundle1, 'heads' is expected to be empty and this
2388 # note: outside bundle1, 'heads' is expected to be empty and this
2387 # 'check_heads' call wil be a no-op
2389 # 'check_heads' call wil be a no-op
2388 check_heads(repo, heads, 'uploading changes')
2390 check_heads(repo, heads, 'uploading changes')
2389 # push can proceed
2391 # push can proceed
2390 if not isinstance(cg, bundle2.unbundle20):
2392 if not isinstance(cg, bundle2.unbundle20):
2391 # legacy case: bundle1 (changegroup 01)
2393 # legacy case: bundle1 (changegroup 01)
2392 txnname = "\n".join([source, util.hidepassword(url)])
2394 txnname = "\n".join([source, util.hidepassword(url)])
2393 with repo.lock(), repo.transaction(txnname) as tr:
2395 with repo.lock(), repo.transaction(txnname) as tr:
2394 op = bundle2.applybundle(repo, cg, tr, source, url)
2396 op = bundle2.applybundle(repo, cg, tr, source, url)
2395 r = bundle2.combinechangegroupresults(op)
2397 r = bundle2.combinechangegroupresults(op)
2396 else:
2398 else:
2397 r = None
2399 r = None
2398 try:
2400 try:
2399 def gettransaction():
2401 def gettransaction():
2400 if not lockandtr[2]:
2402 if not lockandtr[2]:
2401 lockandtr[0] = repo.wlock()
2403 lockandtr[0] = repo.wlock()
2402 lockandtr[1] = repo.lock()
2404 lockandtr[1] = repo.lock()
2403 lockandtr[2] = repo.transaction(source)
2405 lockandtr[2] = repo.transaction(source)
2404 lockandtr[2].hookargs['source'] = source
2406 lockandtr[2].hookargs['source'] = source
2405 lockandtr[2].hookargs['url'] = url
2407 lockandtr[2].hookargs['url'] = url
2406 lockandtr[2].hookargs['bundle2'] = '1'
2408 lockandtr[2].hookargs['bundle2'] = '1'
2407 return lockandtr[2]
2409 return lockandtr[2]
2408
2410
2409 # Do greedy locking by default until we're satisfied with lazy
2411 # Do greedy locking by default until we're satisfied with lazy
2410 # locking.
2412 # locking.
2411 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2413 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2412 gettransaction()
2414 gettransaction()
2413
2415
2414 op = bundle2.bundleoperation(repo, gettransaction,
2416 op = bundle2.bundleoperation(repo, gettransaction,
2415 captureoutput=captureoutput,
2417 captureoutput=captureoutput,
2416 source='push')
2418 source='push')
2417 try:
2419 try:
2418 op = bundle2.processbundle(repo, cg, op=op)
2420 op = bundle2.processbundle(repo, cg, op=op)
2419 finally:
2421 finally:
2420 r = op.reply
2422 r = op.reply
2421 if captureoutput and r is not None:
2423 if captureoutput and r is not None:
2422 repo.ui.pushbuffer(error=True, subproc=True)
2424 repo.ui.pushbuffer(error=True, subproc=True)
2423 def recordout(output):
2425 def recordout(output):
2424 r.newpart('output', data=output, mandatory=False)
2426 r.newpart('output', data=output, mandatory=False)
2425 if lockandtr[2] is not None:
2427 if lockandtr[2] is not None:
2426 lockandtr[2].close()
2428 lockandtr[2].close()
2427 except BaseException as exc:
2429 except BaseException as exc:
2428 exc.duringunbundle2 = True
2430 exc.duringunbundle2 = True
2429 if captureoutput and r is not None:
2431 if captureoutput and r is not None:
2430 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2432 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2431 def recordout(output):
2433 def recordout(output):
2432 part = bundle2.bundlepart('output', data=output,
2434 part = bundle2.bundlepart('output', data=output,
2433 mandatory=False)
2435 mandatory=False)
2434 parts.append(part)
2436 parts.append(part)
2435 raise
2437 raise
2436 finally:
2438 finally:
2437 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2439 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2438 if recordout is not None:
2440 if recordout is not None:
2439 recordout(repo.ui.popbuffer())
2441 recordout(repo.ui.popbuffer())
2440 return r
2442 return r
2441
2443
2442 def _maybeapplyclonebundle(pullop):
2444 def _maybeapplyclonebundle(pullop):
2443 """Apply a clone bundle from a remote, if possible."""
2445 """Apply a clone bundle from a remote, if possible."""
2444
2446
2445 repo = pullop.repo
2447 repo = pullop.repo
2446 remote = pullop.remote
2448 remote = pullop.remote
2447
2449
2448 if not repo.ui.configbool('ui', 'clonebundles'):
2450 if not repo.ui.configbool('ui', 'clonebundles'):
2449 return
2451 return
2450
2452
2451 # Only run if local repo is empty.
2453 # Only run if local repo is empty.
2452 if len(repo):
2454 if len(repo):
2453 return
2455 return
2454
2456
2455 if pullop.heads:
2457 if pullop.heads:
2456 return
2458 return
2457
2459
2458 if not remote.capable('clonebundles'):
2460 if not remote.capable('clonebundles'):
2459 return
2461 return
2460
2462
2461 with remote.commandexecutor() as e:
2463 with remote.commandexecutor() as e:
2462 res = e.callcommand('clonebundles', {}).result()
2464 res = e.callcommand('clonebundles', {}).result()
2463
2465
2464 # If we call the wire protocol command, that's good enough to record the
2466 # If we call the wire protocol command, that's good enough to record the
2465 # attempt.
2467 # attempt.
2466 pullop.clonebundleattempted = True
2468 pullop.clonebundleattempted = True
2467
2469
2468 entries = parseclonebundlesmanifest(repo, res)
2470 entries = parseclonebundlesmanifest(repo, res)
2469 if not entries:
2471 if not entries:
2470 repo.ui.note(_('no clone bundles available on remote; '
2472 repo.ui.note(_('no clone bundles available on remote; '
2471 'falling back to regular clone\n'))
2473 'falling back to regular clone\n'))
2472 return
2474 return
2473
2475
2474 entries = filterclonebundleentries(
2476 entries = filterclonebundleentries(
2475 repo, entries, streamclonerequested=pullop.streamclonerequested)
2477 repo, entries, streamclonerequested=pullop.streamclonerequested)
2476
2478
2477 if not entries:
2479 if not entries:
2478 # There is a thundering herd concern here. However, if a server
2480 # There is a thundering herd concern here. However, if a server
2479 # operator doesn't advertise bundles appropriate for its clients,
2481 # operator doesn't advertise bundles appropriate for its clients,
2480 # they deserve what's coming. Furthermore, from a client's
2482 # they deserve what's coming. Furthermore, from a client's
2481 # perspective, no automatic fallback would mean not being able to
2483 # perspective, no automatic fallback would mean not being able to
2482 # clone!
2484 # clone!
2483 repo.ui.warn(_('no compatible clone bundles available on server; '
2485 repo.ui.warn(_('no compatible clone bundles available on server; '
2484 'falling back to regular clone\n'))
2486 'falling back to regular clone\n'))
2485 repo.ui.warn(_('(you may want to report this to the server '
2487 repo.ui.warn(_('(you may want to report this to the server '
2486 'operator)\n'))
2488 'operator)\n'))
2487 return
2489 return
2488
2490
2489 entries = sortclonebundleentries(repo.ui, entries)
2491 entries = sortclonebundleentries(repo.ui, entries)
2490
2492
2491 url = entries[0]['URL']
2493 url = entries[0]['URL']
2492 repo.ui.status(_('applying clone bundle from %s\n') % url)
2494 repo.ui.status(_('applying clone bundle from %s\n') % url)
2493 if trypullbundlefromurl(repo.ui, repo, url):
2495 if trypullbundlefromurl(repo.ui, repo, url):
2494 repo.ui.status(_('finished applying clone bundle\n'))
2496 repo.ui.status(_('finished applying clone bundle\n'))
2495 # Bundle failed.
2497 # Bundle failed.
2496 #
2498 #
2497 # We abort by default to avoid the thundering herd of
2499 # We abort by default to avoid the thundering herd of
2498 # clients flooding a server that was expecting expensive
2500 # clients flooding a server that was expecting expensive
2499 # clone load to be offloaded.
2501 # clone load to be offloaded.
2500 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2502 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2501 repo.ui.warn(_('falling back to normal clone\n'))
2503 repo.ui.warn(_('falling back to normal clone\n'))
2502 else:
2504 else:
2503 raise error.Abort(_('error applying bundle'),
2505 raise error.Abort(_('error applying bundle'),
2504 hint=_('if this error persists, consider contacting '
2506 hint=_('if this error persists, consider contacting '
2505 'the server operator or disable clone '
2507 'the server operator or disable clone '
2506 'bundles via '
2508 'bundles via '
2507 '"--config ui.clonebundles=false"'))
2509 '"--config ui.clonebundles=false"'))
2508
2510
2509 def parseclonebundlesmanifest(repo, s):
2511 def parseclonebundlesmanifest(repo, s):
2510 """Parses the raw text of a clone bundles manifest.
2512 """Parses the raw text of a clone bundles manifest.
2511
2513
2512 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2514 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2513 to the URL and other keys are the attributes for the entry.
2515 to the URL and other keys are the attributes for the entry.
2514 """
2516 """
2515 m = []
2517 m = []
2516 for line in s.splitlines():
2518 for line in s.splitlines():
2517 fields = line.split()
2519 fields = line.split()
2518 if not fields:
2520 if not fields:
2519 continue
2521 continue
2520 attrs = {'URL': fields[0]}
2522 attrs = {'URL': fields[0]}
2521 for rawattr in fields[1:]:
2523 for rawattr in fields[1:]:
2522 key, value = rawattr.split('=', 1)
2524 key, value = rawattr.split('=', 1)
2523 key = urlreq.unquote(key)
2525 key = urlreq.unquote(key)
2524 value = urlreq.unquote(value)
2526 value = urlreq.unquote(value)
2525 attrs[key] = value
2527 attrs[key] = value
2526
2528
2527 # Parse BUNDLESPEC into components. This makes client-side
2529 # Parse BUNDLESPEC into components. This makes client-side
2528 # preferences easier to specify since you can prefer a single
2530 # preferences easier to specify since you can prefer a single
2529 # component of the BUNDLESPEC.
2531 # component of the BUNDLESPEC.
2530 if key == 'BUNDLESPEC':
2532 if key == 'BUNDLESPEC':
2531 try:
2533 try:
2532 bundlespec = parsebundlespec(repo, value)
2534 bundlespec = parsebundlespec(repo, value)
2533 attrs['COMPRESSION'] = bundlespec.compression
2535 attrs['COMPRESSION'] = bundlespec.compression
2534 attrs['VERSION'] = bundlespec.version
2536 attrs['VERSION'] = bundlespec.version
2535 except error.InvalidBundleSpecification:
2537 except error.InvalidBundleSpecification:
2536 pass
2538 pass
2537 except error.UnsupportedBundleSpecification:
2539 except error.UnsupportedBundleSpecification:
2538 pass
2540 pass
2539
2541
2540 m.append(attrs)
2542 m.append(attrs)
2541
2543
2542 return m
2544 return m
2543
2545
2544 def isstreamclonespec(bundlespec):
2546 def isstreamclonespec(bundlespec):
2545 # Stream clone v1
2547 # Stream clone v1
2546 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2548 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2547 return True
2549 return True
2548
2550
2549 # Stream clone v2
2551 # Stream clone v2
2550 if (bundlespec.wirecompression == 'UN' and
2552 if (bundlespec.wirecompression == 'UN' and
2551 bundlespec.wireversion == '02' and
2553 bundlespec.wireversion == '02' and
2552 bundlespec.contentopts.get('streamv2')):
2554 bundlespec.contentopts.get('streamv2')):
2553 return True
2555 return True
2554
2556
2555 return False
2557 return False
2556
2558
2557 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2559 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2558 """Remove incompatible clone bundle manifest entries.
2560 """Remove incompatible clone bundle manifest entries.
2559
2561
2560 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2562 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2561 and returns a new list consisting of only the entries that this client
2563 and returns a new list consisting of only the entries that this client
2562 should be able to apply.
2564 should be able to apply.
2563
2565
2564 There is no guarantee we'll be able to apply all returned entries because
2566 There is no guarantee we'll be able to apply all returned entries because
2565 the metadata we use to filter on may be missing or wrong.
2567 the metadata we use to filter on may be missing or wrong.
2566 """
2568 """
2567 newentries = []
2569 newentries = []
2568 for entry in entries:
2570 for entry in entries:
2569 spec = entry.get('BUNDLESPEC')
2571 spec = entry.get('BUNDLESPEC')
2570 if spec:
2572 if spec:
2571 try:
2573 try:
2572 bundlespec = parsebundlespec(repo, spec, strict=True)
2574 bundlespec = parsebundlespec(repo, spec, strict=True)
2573
2575
2574 # If a stream clone was requested, filter out non-streamclone
2576 # If a stream clone was requested, filter out non-streamclone
2575 # entries.
2577 # entries.
2576 if streamclonerequested and not isstreamclonespec(bundlespec):
2578 if streamclonerequested and not isstreamclonespec(bundlespec):
2577 repo.ui.debug('filtering %s because not a stream clone\n' %
2579 repo.ui.debug('filtering %s because not a stream clone\n' %
2578 entry['URL'])
2580 entry['URL'])
2579 continue
2581 continue
2580
2582
2581 except error.InvalidBundleSpecification as e:
2583 except error.InvalidBundleSpecification as e:
2582 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2584 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2583 continue
2585 continue
2584 except error.UnsupportedBundleSpecification as e:
2586 except error.UnsupportedBundleSpecification as e:
2585 repo.ui.debug('filtering %s because unsupported bundle '
2587 repo.ui.debug('filtering %s because unsupported bundle '
2586 'spec: %s\n' % (
2588 'spec: %s\n' % (
2587 entry['URL'], stringutil.forcebytestr(e)))
2589 entry['URL'], stringutil.forcebytestr(e)))
2588 continue
2590 continue
2589 # If we don't have a spec and requested a stream clone, we don't know
2591 # If we don't have a spec and requested a stream clone, we don't know
2590 # what the entry is so don't attempt to apply it.
2592 # what the entry is so don't attempt to apply it.
2591 elif streamclonerequested:
2593 elif streamclonerequested:
2592 repo.ui.debug('filtering %s because cannot determine if a stream '
2594 repo.ui.debug('filtering %s because cannot determine if a stream '
2593 'clone bundle\n' % entry['URL'])
2595 'clone bundle\n' % entry['URL'])
2594 continue
2596 continue
2595
2597
2596 if 'REQUIRESNI' in entry and not sslutil.hassni:
2598 if 'REQUIRESNI' in entry and not sslutil.hassni:
2597 repo.ui.debug('filtering %s because SNI not supported\n' %
2599 repo.ui.debug('filtering %s because SNI not supported\n' %
2598 entry['URL'])
2600 entry['URL'])
2599 continue
2601 continue
2600
2602
2601 newentries.append(entry)
2603 newentries.append(entry)
2602
2604
2603 return newentries
2605 return newentries
2604
2606
2605 class clonebundleentry(object):
2607 class clonebundleentry(object):
2606 """Represents an item in a clone bundles manifest.
2608 """Represents an item in a clone bundles manifest.
2607
2609
2608 This rich class is needed to support sorting since sorted() in Python 3
2610 This rich class is needed to support sorting since sorted() in Python 3
2609 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2611 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2610 won't work.
2612 won't work.
2611 """
2613 """
2612
2614
2613 def __init__(self, value, prefers):
2615 def __init__(self, value, prefers):
2614 self.value = value
2616 self.value = value
2615 self.prefers = prefers
2617 self.prefers = prefers
2616
2618
2617 def _cmp(self, other):
2619 def _cmp(self, other):
2618 for prefkey, prefvalue in self.prefers:
2620 for prefkey, prefvalue in self.prefers:
2619 avalue = self.value.get(prefkey)
2621 avalue = self.value.get(prefkey)
2620 bvalue = other.value.get(prefkey)
2622 bvalue = other.value.get(prefkey)
2621
2623
2622 # Special case for b missing attribute and a matches exactly.
2624 # Special case for b missing attribute and a matches exactly.
2623 if avalue is not None and bvalue is None and avalue == prefvalue:
2625 if avalue is not None and bvalue is None and avalue == prefvalue:
2624 return -1
2626 return -1
2625
2627
2626 # Special case for a missing attribute and b matches exactly.
2628 # Special case for a missing attribute and b matches exactly.
2627 if bvalue is not None and avalue is None and bvalue == prefvalue:
2629 if bvalue is not None and avalue is None and bvalue == prefvalue:
2628 return 1
2630 return 1
2629
2631
2630 # We can't compare unless attribute present on both.
2632 # We can't compare unless attribute present on both.
2631 if avalue is None or bvalue is None:
2633 if avalue is None or bvalue is None:
2632 continue
2634 continue
2633
2635
2634 # Same values should fall back to next attribute.
2636 # Same values should fall back to next attribute.
2635 if avalue == bvalue:
2637 if avalue == bvalue:
2636 continue
2638 continue
2637
2639
2638 # Exact matches come first.
2640 # Exact matches come first.
2639 if avalue == prefvalue:
2641 if avalue == prefvalue:
2640 return -1
2642 return -1
2641 if bvalue == prefvalue:
2643 if bvalue == prefvalue:
2642 return 1
2644 return 1
2643
2645
2644 # Fall back to next attribute.
2646 # Fall back to next attribute.
2645 continue
2647 continue
2646
2648
2647 # If we got here we couldn't sort by attributes and prefers. Fall
2649 # If we got here we couldn't sort by attributes and prefers. Fall
2648 # back to index order.
2650 # back to index order.
2649 return 0
2651 return 0
2650
2652
2651 def __lt__(self, other):
2653 def __lt__(self, other):
2652 return self._cmp(other) < 0
2654 return self._cmp(other) < 0
2653
2655
2654 def __gt__(self, other):
2656 def __gt__(self, other):
2655 return self._cmp(other) > 0
2657 return self._cmp(other) > 0
2656
2658
2657 def __eq__(self, other):
2659 def __eq__(self, other):
2658 return self._cmp(other) == 0
2660 return self._cmp(other) == 0
2659
2661
2660 def __le__(self, other):
2662 def __le__(self, other):
2661 return self._cmp(other) <= 0
2663 return self._cmp(other) <= 0
2662
2664
2663 def __ge__(self, other):
2665 def __ge__(self, other):
2664 return self._cmp(other) >= 0
2666 return self._cmp(other) >= 0
2665
2667
2666 def __ne__(self, other):
2668 def __ne__(self, other):
2667 return self._cmp(other) != 0
2669 return self._cmp(other) != 0
2668
2670
2669 def sortclonebundleentries(ui, entries):
2671 def sortclonebundleentries(ui, entries):
2670 prefers = ui.configlist('ui', 'clonebundleprefers')
2672 prefers = ui.configlist('ui', 'clonebundleprefers')
2671 if not prefers:
2673 if not prefers:
2672 return list(entries)
2674 return list(entries)
2673
2675
2674 prefers = [p.split('=', 1) for p in prefers]
2676 prefers = [p.split('=', 1) for p in prefers]
2675
2677
2676 items = sorted(clonebundleentry(v, prefers) for v in entries)
2678 items = sorted(clonebundleentry(v, prefers) for v in entries)
2677 return [i.value for i in items]
2679 return [i.value for i in items]
2678
2680
2679 def trypullbundlefromurl(ui, repo, url):
2681 def trypullbundlefromurl(ui, repo, url):
2680 """Attempt to apply a bundle from a URL."""
2682 """Attempt to apply a bundle from a URL."""
2681 with repo.lock(), repo.transaction('bundleurl') as tr:
2683 with repo.lock(), repo.transaction('bundleurl') as tr:
2682 try:
2684 try:
2683 fh = urlmod.open(ui, url)
2685 fh = urlmod.open(ui, url)
2684 cg = readbundle(ui, fh, 'stream')
2686 cg = readbundle(ui, fh, 'stream')
2685
2687
2686 if isinstance(cg, streamclone.streamcloneapplier):
2688 if isinstance(cg, streamclone.streamcloneapplier):
2687 cg.apply(repo)
2689 cg.apply(repo)
2688 else:
2690 else:
2689 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2691 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2690 return True
2692 return True
2691 except urlerr.httperror as e:
2693 except urlerr.httperror as e:
2692 ui.warn(_('HTTP error fetching bundle: %s\n') %
2694 ui.warn(_('HTTP error fetching bundle: %s\n') %
2693 stringutil.forcebytestr(e))
2695 stringutil.forcebytestr(e))
2694 except urlerr.urlerror as e:
2696 except urlerr.urlerror as e:
2695 ui.warn(_('error fetching bundle: %s\n') %
2697 ui.warn(_('error fetching bundle: %s\n') %
2696 stringutil.forcebytestr(e.reason))
2698 stringutil.forcebytestr(e.reason))
2697
2699
2698 return False
2700 return False
General Comments 0
You need to be logged in to leave comments. Login now