##// END OF EJS Templates
phases: rework the logic of _pushdiscoveryphase to bound complexity...
marmoute -
r52479:1cef1412 default
parent child Browse files
Show More
@@ -1,2941 +1,2944 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import weakref
10 import weakref
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullrev,
15 nullrev,
16 )
16 )
17 from . import (
17 from . import (
18 bookmarks as bookmod,
18 bookmarks as bookmod,
19 bundle2,
19 bundle2,
20 bundlecaches,
20 bundlecaches,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 logexchange,
25 logexchange,
26 narrowspec,
26 narrowspec,
27 obsolete,
27 obsolete,
28 obsutil,
28 obsutil,
29 phases,
29 phases,
30 pushkey,
30 pushkey,
31 pycompat,
31 pycompat,
32 requirements,
32 requirements,
33 scmutil,
33 scmutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 wireprototypes,
37 wireprototypes,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 hashutil,
40 hashutil,
41 stringutil,
41 stringutil,
42 urlutil,
42 urlutil,
43 )
43 )
44 from .interfaces import repository
44 from .interfaces import repository
45
45
46 urlerr = util.urlerr
46 urlerr = util.urlerr
47 urlreq = util.urlreq
47 urlreq = util.urlreq
48
48
49 _NARROWACL_SECTION = b'narrowacl'
49 _NARROWACL_SECTION = b'narrowacl'
50
50
51
51
52 def readbundle(ui, fh, fname, vfs=None):
52 def readbundle(ui, fh, fname, vfs=None):
53 header = changegroup.readexactly(fh, 4)
53 header = changegroup.readexactly(fh, 4)
54
54
55 alg = None
55 alg = None
56 if not fname:
56 if not fname:
57 fname = b"stream"
57 fname = b"stream"
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 fh = changegroup.headerlessfixup(fh, header)
59 fh = changegroup.headerlessfixup(fh, header)
60 header = b"HG10"
60 header = b"HG10"
61 alg = b'UN'
61 alg = b'UN'
62 elif vfs:
62 elif vfs:
63 fname = vfs.join(fname)
63 fname = vfs.join(fname)
64
64
65 magic, version = header[0:2], header[2:4]
65 magic, version = header[0:2], header[2:4]
66
66
67 if magic != b'HG':
67 if magic != b'HG':
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 if version == b'10':
69 if version == b'10':
70 if alg is None:
70 if alg is None:
71 alg = changegroup.readexactly(fh, 2)
71 alg = changegroup.readexactly(fh, 2)
72 return changegroup.cg1unpacker(fh, alg)
72 return changegroup.cg1unpacker(fh, alg)
73 elif version.startswith(b'2'):
73 elif version.startswith(b'2'):
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 elif version == b'S1':
75 elif version == b'S1':
76 return streamclone.streamcloneapplier(fh)
76 return streamclone.streamcloneapplier(fh)
77 else:
77 else:
78 raise error.Abort(
78 raise error.Abort(
79 _(b'%s: unknown bundle version %s') % (fname, version)
79 _(b'%s: unknown bundle version %s') % (fname, version)
80 )
80 )
81
81
82
82
83 def _format_params(params):
83 def _format_params(params):
84 parts = []
84 parts = []
85 for key, value in sorted(params.items()):
85 for key, value in sorted(params.items()):
86 value = urlreq.quote(value)
86 value = urlreq.quote(value)
87 parts.append(b"%s=%s" % (key, value))
87 parts.append(b"%s=%s" % (key, value))
88 return b';'.join(parts)
88 return b';'.join(parts)
89
89
90
90
91 def getbundlespec(ui, fh):
91 def getbundlespec(ui, fh):
92 """Infer the bundlespec from a bundle file handle.
92 """Infer the bundlespec from a bundle file handle.
93
93
94 The input file handle is seeked and the original seek position is not
94 The input file handle is seeked and the original seek position is not
95 restored.
95 restored.
96 """
96 """
97
97
98 def speccompression(alg):
98 def speccompression(alg):
99 try:
99 try:
100 return util.compengines.forbundletype(alg).bundletype()[0]
100 return util.compengines.forbundletype(alg).bundletype()[0]
101 except KeyError:
101 except KeyError:
102 return None
102 return None
103
103
104 params = {}
104 params = {}
105
105
106 b = readbundle(ui, fh, None)
106 b = readbundle(ui, fh, None)
107 if isinstance(b, changegroup.cg1unpacker):
107 if isinstance(b, changegroup.cg1unpacker):
108 alg = b._type
108 alg = b._type
109 if alg == b'_truncatedBZ':
109 if alg == b'_truncatedBZ':
110 alg = b'BZ'
110 alg = b'BZ'
111 comp = speccompression(alg)
111 comp = speccompression(alg)
112 if not comp:
112 if not comp:
113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
114 return b'%s-v1' % comp
114 return b'%s-v1' % comp
115 elif isinstance(b, bundle2.unbundle20):
115 elif isinstance(b, bundle2.unbundle20):
116 if b'Compression' in b.params:
116 if b'Compression' in b.params:
117 comp = speccompression(b.params[b'Compression'])
117 comp = speccompression(b.params[b'Compression'])
118 if not comp:
118 if not comp:
119 raise error.Abort(
119 raise error.Abort(
120 _(b'unknown compression algorithm: %s') % comp
120 _(b'unknown compression algorithm: %s') % comp
121 )
121 )
122 else:
122 else:
123 comp = b'none'
123 comp = b'none'
124
124
125 version = None
125 version = None
126 for part in b.iterparts():
126 for part in b.iterparts():
127 if part.type == b'changegroup':
127 if part.type == b'changegroup':
128 cgversion = part.params[b'version']
128 cgversion = part.params[b'version']
129 if cgversion in (b'01', b'02'):
129 if cgversion in (b'01', b'02'):
130 version = b'v2'
130 version = b'v2'
131 elif cgversion in (b'03',):
131 elif cgversion in (b'03',):
132 version = b'v2'
132 version = b'v2'
133 params[b'cg.version'] = cgversion
133 params[b'cg.version'] = cgversion
134 else:
134 else:
135 raise error.Abort(
135 raise error.Abort(
136 _(
136 _(
137 b'changegroup version %s does not have '
137 b'changegroup version %s does not have '
138 b'a known bundlespec'
138 b'a known bundlespec'
139 )
139 )
140 % version,
140 % version,
141 hint=_(b'try upgrading your Mercurial client'),
141 hint=_(b'try upgrading your Mercurial client'),
142 )
142 )
143 elif part.type == b'stream2' and version is None:
143 elif part.type == b'stream2' and version is None:
144 # A stream2 part requires to be part of a v2 bundle
144 # A stream2 part requires to be part of a v2 bundle
145 requirements = urlreq.unquote(part.params[b'requirements'])
145 requirements = urlreq.unquote(part.params[b'requirements'])
146 splitted = requirements.split()
146 splitted = requirements.split()
147 params = bundle2._formatrequirementsparams(splitted)
147 params = bundle2._formatrequirementsparams(splitted)
148 return b'none-v2;stream=v2;%s' % params
148 return b'none-v2;stream=v2;%s' % params
149 elif part.type == b'stream3-exp' and version is None:
149 elif part.type == b'stream3-exp' and version is None:
150 # A stream3 part requires to be part of a v2 bundle
150 # A stream3 part requires to be part of a v2 bundle
151 requirements = urlreq.unquote(part.params[b'requirements'])
151 requirements = urlreq.unquote(part.params[b'requirements'])
152 splitted = requirements.split()
152 splitted = requirements.split()
153 params = bundle2._formatrequirementsparams(splitted)
153 params = bundle2._formatrequirementsparams(splitted)
154 return b'none-v2;stream=v3-exp;%s' % params
154 return b'none-v2;stream=v3-exp;%s' % params
155 elif part.type == b'obsmarkers':
155 elif part.type == b'obsmarkers':
156 params[b'obsolescence'] = b'yes'
156 params[b'obsolescence'] = b'yes'
157 if not part.mandatory:
157 if not part.mandatory:
158 params[b'obsolescence-mandatory'] = b'no'
158 params[b'obsolescence-mandatory'] = b'no'
159
159
160 if not version:
160 if not version:
161 raise error.Abort(
161 raise error.Abort(
162 _(b'could not identify changegroup version in bundle')
162 _(b'could not identify changegroup version in bundle')
163 )
163 )
164 spec = b'%s-%s' % (comp, version)
164 spec = b'%s-%s' % (comp, version)
165 if params:
165 if params:
166 spec += b';'
166 spec += b';'
167 spec += _format_params(params)
167 spec += _format_params(params)
168 return spec
168 return spec
169
169
170 elif isinstance(b, streamclone.streamcloneapplier):
170 elif isinstance(b, streamclone.streamcloneapplier):
171 requirements = streamclone.readbundle1header(fh)[2]
171 requirements = streamclone.readbundle1header(fh)[2]
172 formatted = bundle2._formatrequirementsparams(requirements)
172 formatted = bundle2._formatrequirementsparams(requirements)
173 return b'none-packed1;%s' % formatted
173 return b'none-packed1;%s' % formatted
174 else:
174 else:
175 raise error.Abort(_(b'unknown bundle type: %s') % b)
175 raise error.Abort(_(b'unknown bundle type: %s') % b)
176
176
177
177
178 def _computeoutgoing(repo, heads, common):
178 def _computeoutgoing(repo, heads, common):
179 """Computes which revs are outgoing given a set of common
179 """Computes which revs are outgoing given a set of common
180 and a set of heads.
180 and a set of heads.
181
181
182 This is a separate function so extensions can have access to
182 This is a separate function so extensions can have access to
183 the logic.
183 the logic.
184
184
185 Returns a discovery.outgoing object.
185 Returns a discovery.outgoing object.
186 """
186 """
187 cl = repo.changelog
187 cl = repo.changelog
188 if common:
188 if common:
189 hasnode = cl.hasnode
189 hasnode = cl.hasnode
190 common = [n for n in common if hasnode(n)]
190 common = [n for n in common if hasnode(n)]
191 else:
191 else:
192 common = [repo.nullid]
192 common = [repo.nullid]
193 if not heads:
193 if not heads:
194 heads = cl.heads()
194 heads = cl.heads()
195 return discovery.outgoing(repo, common, heads)
195 return discovery.outgoing(repo, common, heads)
196
196
197
197
198 def _checkpublish(pushop):
198 def _checkpublish(pushop):
199 repo = pushop.repo
199 repo = pushop.repo
200 ui = repo.ui
200 ui = repo.ui
201 behavior = ui.config(b'experimental', b'auto-publish')
201 behavior = ui.config(b'experimental', b'auto-publish')
202 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
202 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
203 return
203 return
204 remotephases = listkeys(pushop.remote, b'phases')
204 remotephases = listkeys(pushop.remote, b'phases')
205 if not remotephases.get(b'publishing', False):
205 if not remotephases.get(b'publishing', False):
206 return
206 return
207
207
208 if pushop.revs is None:
208 if pushop.revs is None:
209 published = repo.filtered(b'served').revs(b'not public()')
209 published = repo.filtered(b'served').revs(b'not public()')
210 else:
210 else:
211 published = repo.revs(b'::%ln - public()', pushop.revs)
211 published = repo.revs(b'::%ln - public()', pushop.revs)
212 # we want to use pushop.revs in the revset even if they themselves are
212 # we want to use pushop.revs in the revset even if they themselves are
213 # secret, but we don't want to have anything that the server won't see
213 # secret, but we don't want to have anything that the server won't see
214 # in the result of this expression
214 # in the result of this expression
215 published &= repo.filtered(b'served')
215 published &= repo.filtered(b'served')
216 if published:
216 if published:
217 if behavior == b'warn':
217 if behavior == b'warn':
218 ui.warn(
218 ui.warn(
219 _(b'%i changesets about to be published\n') % len(published)
219 _(b'%i changesets about to be published\n') % len(published)
220 )
220 )
221 elif behavior == b'confirm':
221 elif behavior == b'confirm':
222 if ui.promptchoice(
222 if ui.promptchoice(
223 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
223 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
224 % len(published)
224 % len(published)
225 ):
225 ):
226 raise error.CanceledError(_(b'user quit'))
226 raise error.CanceledError(_(b'user quit'))
227 elif behavior == b'abort':
227 elif behavior == b'abort':
228 msg = _(b'push would publish %i changesets') % len(published)
228 msg = _(b'push would publish %i changesets') % len(published)
229 hint = _(
229 hint = _(
230 b"use --publish or adjust 'experimental.auto-publish'"
230 b"use --publish or adjust 'experimental.auto-publish'"
231 b" config"
231 b" config"
232 )
232 )
233 raise error.Abort(msg, hint=hint)
233 raise error.Abort(msg, hint=hint)
234
234
235
235
236 def _forcebundle1(op):
236 def _forcebundle1(op):
237 """return true if a pull/push must use bundle1
237 """return true if a pull/push must use bundle1
238
238
239 This function is used to allow testing of the older bundle version"""
239 This function is used to allow testing of the older bundle version"""
240 ui = op.repo.ui
240 ui = op.repo.ui
241 # The goal is this config is to allow developer to choose the bundle
241 # The goal is this config is to allow developer to choose the bundle
242 # version used during exchanged. This is especially handy during test.
242 # version used during exchanged. This is especially handy during test.
243 # Value is a list of bundle version to be picked from, highest version
243 # Value is a list of bundle version to be picked from, highest version
244 # should be used.
244 # should be used.
245 #
245 #
246 # developer config: devel.legacy.exchange
246 # developer config: devel.legacy.exchange
247 exchange = ui.configlist(b'devel', b'legacy.exchange')
247 exchange = ui.configlist(b'devel', b'legacy.exchange')
248 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
248 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
249 return forcebundle1 or not op.remote.capable(b'bundle2')
249 return forcebundle1 or not op.remote.capable(b'bundle2')
250
250
251
251
252 class pushoperation:
252 class pushoperation:
253 """A object that represent a single push operation
253 """A object that represent a single push operation
254
254
255 Its purpose is to carry push related state and very common operations.
255 Its purpose is to carry push related state and very common operations.
256
256
257 A new pushoperation should be created at the beginning of each push and
257 A new pushoperation should be created at the beginning of each push and
258 discarded afterward.
258 discarded afterward.
259 """
259 """
260
260
261 def __init__(
261 def __init__(
262 self,
262 self,
263 repo,
263 repo,
264 remote,
264 remote,
265 force=False,
265 force=False,
266 revs=None,
266 revs=None,
267 newbranch=False,
267 newbranch=False,
268 bookmarks=(),
268 bookmarks=(),
269 publish=False,
269 publish=False,
270 pushvars=None,
270 pushvars=None,
271 ):
271 ):
272 # repo we push from
272 # repo we push from
273 self.repo = repo
273 self.repo = repo
274 self.ui = repo.ui
274 self.ui = repo.ui
275 # repo we push to
275 # repo we push to
276 self.remote = remote
276 self.remote = remote
277 # force option provided
277 # force option provided
278 self.force = force
278 self.force = force
279 # revs to be pushed (None is "all")
279 # revs to be pushed (None is "all")
280 self.revs = revs
280 self.revs = revs
281 # bookmark explicitly pushed
281 # bookmark explicitly pushed
282 self.bookmarks = bookmarks
282 self.bookmarks = bookmarks
283 # allow push of new branch
283 # allow push of new branch
284 self.newbranch = newbranch
284 self.newbranch = newbranch
285 # step already performed
285 # step already performed
286 # (used to check what steps have been already performed through bundle2)
286 # (used to check what steps have been already performed through bundle2)
287 self.stepsdone = set()
287 self.stepsdone = set()
288 # Integer version of the changegroup push result
288 # Integer version of the changegroup push result
289 # - None means nothing to push
289 # - None means nothing to push
290 # - 0 means HTTP error
290 # - 0 means HTTP error
291 # - 1 means we pushed and remote head count is unchanged *or*
291 # - 1 means we pushed and remote head count is unchanged *or*
292 # we have outgoing changesets but refused to push
292 # we have outgoing changesets but refused to push
293 # - other values as described by addchangegroup()
293 # - other values as described by addchangegroup()
294 self.cgresult = None
294 self.cgresult = None
295 # Boolean value for the bookmark push
295 # Boolean value for the bookmark push
296 self.bkresult = None
296 self.bkresult = None
297 # discover.outgoing object (contains common and outgoing data)
297 # discover.outgoing object (contains common and outgoing data)
298 self.outgoing = None
298 self.outgoing = None
299 # all remote topological heads before the push
299 # all remote topological heads before the push
300 self.remoteheads = None
300 self.remoteheads = None
301 # Details of the remote branch pre and post push
301 # Details of the remote branch pre and post push
302 #
302 #
303 # mapping: {'branch': ([remoteheads],
303 # mapping: {'branch': ([remoteheads],
304 # [newheads],
304 # [newheads],
305 # [unsyncedheads],
305 # [unsyncedheads],
306 # [discardedheads])}
306 # [discardedheads])}
307 # - branch: the branch name
307 # - branch: the branch name
308 # - remoteheads: the list of remote heads known locally
308 # - remoteheads: the list of remote heads known locally
309 # None if the branch is new
309 # None if the branch is new
310 # - newheads: the new remote heads (known locally) with outgoing pushed
310 # - newheads: the new remote heads (known locally) with outgoing pushed
311 # - unsyncedheads: the list of remote heads unknown locally.
311 # - unsyncedheads: the list of remote heads unknown locally.
312 # - discardedheads: the list of remote heads made obsolete by the push
312 # - discardedheads: the list of remote heads made obsolete by the push
313 self.pushbranchmap = None
313 self.pushbranchmap = None
314 # testable as a boolean indicating if any nodes are missing locally.
314 # testable as a boolean indicating if any nodes are missing locally.
315 self.incoming = None
315 self.incoming = None
316 # summary of the remote phase situation
316 # summary of the remote phase situation
317 self.remotephases = None
317 self.remotephases = None
318 # phases changes that must be pushed along side the changesets
318 # phases changes that must be pushed along side the changesets
319 self.outdatedphases = None
319 self.outdatedphases = None
320 # phases changes that must be pushed if changeset push fails
320 # phases changes that must be pushed if changeset push fails
321 self.fallbackoutdatedphases = None
321 self.fallbackoutdatedphases = None
322 # outgoing obsmarkers
322 # outgoing obsmarkers
323 self.outobsmarkers = set()
323 self.outobsmarkers = set()
324 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
324 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
325 self.outbookmarks = []
325 self.outbookmarks = []
326 # transaction manager
326 # transaction manager
327 self.trmanager = None
327 self.trmanager = None
328 # map { pushkey partid -> callback handling failure}
328 # map { pushkey partid -> callback handling failure}
329 # used to handle exception from mandatory pushkey part failure
329 # used to handle exception from mandatory pushkey part failure
330 self.pkfailcb = {}
330 self.pkfailcb = {}
331 # an iterable of pushvars or None
331 # an iterable of pushvars or None
332 self.pushvars = pushvars
332 self.pushvars = pushvars
333 # publish pushed changesets
333 # publish pushed changesets
334 self.publish = publish
334 self.publish = publish
335
335
336 @util.propertycache
336 @util.propertycache
337 def futureheads(self):
337 def futureheads(self):
338 """future remote heads if the changeset push succeeds"""
338 """future remote heads if the changeset push succeeds"""
339 return self.outgoing.ancestorsof
339 return self.outgoing.ancestorsof
340
340
341 @util.propertycache
341 @util.propertycache
342 def fallbackheads(self):
342 def fallbackheads(self):
343 """future remote heads if the changeset push fails"""
343 """future remote heads if the changeset push fails"""
344 if self.revs is None:
344 if self.revs is None:
345 # not target to push, all common are relevant
345 # not target to push, all common are relevant
346 return self.outgoing.commonheads
346 return self.outgoing.commonheads
347 unfi = self.repo.unfiltered()
347 unfi = self.repo.unfiltered()
348 # I want cheads = heads(::push_heads and ::commonheads)
348 # I want cheads = heads(::push_heads and ::commonheads)
349 #
349 #
350 # To push, we already computed
350 # To push, we already computed
351 # common = (::commonheads)
351 # common = (::commonheads)
352 # missing = ((commonheads::push_heads) - commonheads)
352 # missing = ((commonheads::push_heads) - commonheads)
353 #
353 #
354 # So we basically search
354 # So we basically search
355 #
355 #
356 # almost_heads = heads((parents(missing) + push_heads) & common)
356 # almost_heads = heads((parents(missing) + push_heads) & common)
357 #
357 #
358 # We use "almost" here as this can return revision that are ancestors
358 # We use "almost" here as this can return revision that are ancestors
359 # of other in the set and we need to explicitly turn it into an
359 # of other in the set and we need to explicitly turn it into an
360 # antichain later. We can do so using:
360 # antichain later. We can do so using:
361 #
361 #
362 # cheads = heads(almost_heads::almost_heads)
362 # cheads = heads(almost_heads::almost_heads)
363 #
363 #
364 # In pratice the code is a bit more convulted to avoid some extra
364 # In pratice the code is a bit more convulted to avoid some extra
365 # computation. It aims at doing the same computation as highlighted
365 # computation. It aims at doing the same computation as highlighted
366 # above however.
366 # above however.
367 common = self.outgoing.common
367 common = self.outgoing.common
368 unfi = self.repo.unfiltered()
368 unfi = self.repo.unfiltered()
369 cl = unfi.changelog
369 cl = unfi.changelog
370 to_rev = cl.index.rev
370 to_rev = cl.index.rev
371 to_node = cl.node
371 to_node = cl.node
372 parent_revs = cl.parentrevs
372 parent_revs = cl.parentrevs
373 unselected = []
373 unselected = []
374 cheads = set()
374 cheads = set()
375 # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
375 # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
376 for n in self.revs:
376 for n in self.revs:
377 r = to_rev(n)
377 r = to_rev(n)
378 if r in common:
378 if r in common:
379 cheads.add(r)
379 cheads.add(r)
380 else:
380 else:
381 unselected.append(r)
381 unselected.append(r)
382 known_non_heads = cl.ancestors(cheads, inclusive=True)
382 known_non_heads = cl.ancestors(cheads, inclusive=True)
383 if unselected:
383 if unselected:
384 missing_revs = {to_rev(n) for n in self.outgoing.missing}
384 missing_revs = {to_rev(n) for n in self.outgoing.missing}
385 missing_revs.add(nullrev)
385 missing_revs.add(nullrev)
386 root_points = set()
386 root_points = set()
387 for r in missing_revs:
387 for r in missing_revs:
388 p1, p2 = parent_revs(r)
388 p1, p2 = parent_revs(r)
389 if p1 not in missing_revs and p1 not in known_non_heads:
389 if p1 not in missing_revs and p1 not in known_non_heads:
390 root_points.add(p1)
390 root_points.add(p1)
391 if p2 not in missing_revs and p2 not in known_non_heads:
391 if p2 not in missing_revs and p2 not in known_non_heads:
392 root_points.add(p2)
392 root_points.add(p2)
393 if root_points:
393 if root_points:
394 heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
394 heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
395 cheads.update(heads)
395 cheads.update(heads)
396 # XXX-perf: could this be a set of revision?
396 # XXX-perf: could this be a set of revision?
397 return [to_node(r) for r in sorted(cheads)]
397 return [to_node(r) for r in sorted(cheads)]
398
398
399 @property
399 @property
400 def commonheads(self):
400 def commonheads(self):
401 """set of all common heads after changeset bundle push"""
401 """set of all common heads after changeset bundle push"""
402 if self.cgresult:
402 if self.cgresult:
403 return self.futureheads
403 return self.futureheads
404 else:
404 else:
405 return self.fallbackheads
405 return self.fallbackheads
406
406
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {
409 bookmsgmap = {
410 b'update': (
410 b'update': (
411 _(b"updating bookmark %s\n"),
411 _(b"updating bookmark %s\n"),
412 _(b'updating bookmark %s failed\n'),
412 _(b'updating bookmark %s failed\n'),
413 ),
413 ),
414 b'export': (
414 b'export': (
415 _(b"exporting bookmark %s\n"),
415 _(b"exporting bookmark %s\n"),
416 _(b'exporting bookmark %s failed\n'),
416 _(b'exporting bookmark %s failed\n'),
417 ),
417 ),
418 b'delete': (
418 b'delete': (
419 _(b"deleting remote bookmark %s\n"),
419 _(b"deleting remote bookmark %s\n"),
420 _(b'deleting remote bookmark %s failed\n'),
420 _(b'deleting remote bookmark %s failed\n'),
421 ),
421 ),
422 }
422 }
423
423
424
424
425 def push(
425 def push(
426 repo,
426 repo,
427 remote,
427 remote,
428 force=False,
428 force=False,
429 revs=None,
429 revs=None,
430 newbranch=False,
430 newbranch=False,
431 bookmarks=(),
431 bookmarks=(),
432 publish=False,
432 publish=False,
433 opargs=None,
433 opargs=None,
434 ):
434 ):
435 """Push outgoing changesets (limited by revs) from a local
435 """Push outgoing changesets (limited by revs) from a local
436 repository to remote. Return an integer:
436 repository to remote. Return an integer:
437 - None means nothing to push
437 - None means nothing to push
438 - 0 means HTTP error
438 - 0 means HTTP error
439 - 1 means we pushed and remote head count is unchanged *or*
439 - 1 means we pushed and remote head count is unchanged *or*
440 we have outgoing changesets but refused to push
440 we have outgoing changesets but refused to push
441 - other values as described by addchangegroup()
441 - other values as described by addchangegroup()
442 """
442 """
443 if opargs is None:
443 if opargs is None:
444 opargs = {}
444 opargs = {}
445 pushop = pushoperation(
445 pushop = pushoperation(
446 repo,
446 repo,
447 remote,
447 remote,
448 force,
448 force,
449 revs,
449 revs,
450 newbranch,
450 newbranch,
451 bookmarks,
451 bookmarks,
452 publish,
452 publish,
453 **pycompat.strkwargs(opargs)
453 **pycompat.strkwargs(opargs)
454 )
454 )
455 if pushop.remote.local():
455 if pushop.remote.local():
456 missing = (
456 missing = (
457 set(pushop.repo.requirements) - pushop.remote.local().supported
457 set(pushop.repo.requirements) - pushop.remote.local().supported
458 )
458 )
459 if missing:
459 if missing:
460 msg = _(
460 msg = _(
461 b"required features are not"
461 b"required features are not"
462 b" supported in the destination:"
462 b" supported in the destination:"
463 b" %s"
463 b" %s"
464 ) % (b', '.join(sorted(missing)))
464 ) % (b', '.join(sorted(missing)))
465 raise error.Abort(msg)
465 raise error.Abort(msg)
466
466
467 if not pushop.remote.canpush():
467 if not pushop.remote.canpush():
468 raise error.Abort(_(b"destination does not support push"))
468 raise error.Abort(_(b"destination does not support push"))
469
469
470 if not pushop.remote.capable(b'unbundle'):
470 if not pushop.remote.capable(b'unbundle'):
471 raise error.Abort(
471 raise error.Abort(
472 _(
472 _(
473 b'cannot push: destination does not support the '
473 b'cannot push: destination does not support the '
474 b'unbundle wire protocol command'
474 b'unbundle wire protocol command'
475 )
475 )
476 )
476 )
477 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
477 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
478 # Check that a computer is registered for that category for at least
478 # Check that a computer is registered for that category for at least
479 # one revlog kind.
479 # one revlog kind.
480 for kind, computers in repo._sidedata_computers.items():
480 for kind, computers in repo._sidedata_computers.items():
481 if computers.get(category):
481 if computers.get(category):
482 break
482 break
483 else:
483 else:
484 raise error.Abort(
484 raise error.Abort(
485 _(
485 _(
486 b'cannot push: required sidedata category not supported'
486 b'cannot push: required sidedata category not supported'
487 b" by this client: '%s'"
487 b" by this client: '%s'"
488 )
488 )
489 % pycompat.bytestr(category)
489 % pycompat.bytestr(category)
490 )
490 )
491 # get lock as we might write phase data
491 # get lock as we might write phase data
492 wlock = lock = None
492 wlock = lock = None
493 try:
493 try:
494 # bundle2 push may receive a reply bundle touching bookmarks
494 # bundle2 push may receive a reply bundle touching bookmarks
495 # requiring the wlock. Take it now to ensure proper ordering.
495 # requiring the wlock. Take it now to ensure proper ordering.
496 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
496 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
497 if (
497 if (
498 (not _forcebundle1(pushop))
498 (not _forcebundle1(pushop))
499 and maypushback
499 and maypushback
500 and not bookmod.bookmarksinstore(repo)
500 and not bookmod.bookmarksinstore(repo)
501 ):
501 ):
502 wlock = pushop.repo.wlock()
502 wlock = pushop.repo.wlock()
503 lock = pushop.repo.lock()
503 lock = pushop.repo.lock()
504 pushop.trmanager = transactionmanager(
504 pushop.trmanager = transactionmanager(
505 pushop.repo, b'push-response', pushop.remote.url()
505 pushop.repo, b'push-response', pushop.remote.url()
506 )
506 )
507 except error.LockUnavailable as err:
507 except error.LockUnavailable as err:
508 # source repo cannot be locked.
508 # source repo cannot be locked.
509 # We do not abort the push, but just disable the local phase
509 # We do not abort the push, but just disable the local phase
510 # synchronisation.
510 # synchronisation.
511 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
511 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
512 err
512 err
513 )
513 )
514 pushop.ui.debug(msg)
514 pushop.ui.debug(msg)
515
515
516 with wlock or util.nullcontextmanager():
516 with wlock or util.nullcontextmanager():
517 with lock or util.nullcontextmanager():
517 with lock or util.nullcontextmanager():
518 with pushop.trmanager or util.nullcontextmanager():
518 with pushop.trmanager or util.nullcontextmanager():
519 pushop.repo.checkpush(pushop)
519 pushop.repo.checkpush(pushop)
520 _checkpublish(pushop)
520 _checkpublish(pushop)
521 _pushdiscovery(pushop)
521 _pushdiscovery(pushop)
522 if not pushop.force:
522 if not pushop.force:
523 _checksubrepostate(pushop)
523 _checksubrepostate(pushop)
524 if not _forcebundle1(pushop):
524 if not _forcebundle1(pushop):
525 _pushbundle2(pushop)
525 _pushbundle2(pushop)
526 _pushchangeset(pushop)
526 _pushchangeset(pushop)
527 _pushsyncphase(pushop)
527 _pushsyncphase(pushop)
528 _pushobsolete(pushop)
528 _pushobsolete(pushop)
529 _pushbookmark(pushop)
529 _pushbookmark(pushop)
530
530
531 if repo.ui.configbool(b'experimental', b'remotenames'):
531 if repo.ui.configbool(b'experimental', b'remotenames'):
532 logexchange.pullremotenames(repo, remote)
532 logexchange.pullremotenames(repo, remote)
533
533
534 return pushop
534 return pushop
535
535
536
536
537 # list of steps to perform discovery before push
537 # list of steps to perform discovery before push
538 pushdiscoveryorder = []
538 pushdiscoveryorder = []
539
539
540 # Mapping between step name and function
540 # Mapping between step name and function
541 #
541 #
542 # This exists to help extensions wrap steps if necessary
542 # This exists to help extensions wrap steps if necessary
543 pushdiscoverymapping = {}
543 pushdiscoverymapping = {}
544
544
545
545
546 def pushdiscovery(stepname):
546 def pushdiscovery(stepname):
547 """decorator for function performing discovery before push
547 """decorator for function performing discovery before push
548
548
549 The function is added to the step -> function mapping and appended to the
549 The function is added to the step -> function mapping and appended to the
550 list of steps. Beware that decorated function will be added in order (this
550 list of steps. Beware that decorated function will be added in order (this
551 may matter).
551 may matter).
552
552
553 You can only use this decorator for a new step, if you want to wrap a step
553 You can only use this decorator for a new step, if you want to wrap a step
554 from an extension, change the pushdiscovery dictionary directly."""
554 from an extension, change the pushdiscovery dictionary directly."""
555
555
556 def dec(func):
556 def dec(func):
557 assert stepname not in pushdiscoverymapping
557 assert stepname not in pushdiscoverymapping
558 pushdiscoverymapping[stepname] = func
558 pushdiscoverymapping[stepname] = func
559 pushdiscoveryorder.append(stepname)
559 pushdiscoveryorder.append(stepname)
560 return func
560 return func
561
561
562 return dec
562 return dec
563
563
564
564
565 def _pushdiscovery(pushop):
565 def _pushdiscovery(pushop):
566 """Run all discovery steps"""
566 """Run all discovery steps"""
567 for stepname in pushdiscoveryorder:
567 for stepname in pushdiscoveryorder:
568 step = pushdiscoverymapping[stepname]
568 step = pushdiscoverymapping[stepname]
569 step(pushop)
569 step(pushop)
570
570
571
571
572 def _checksubrepostate(pushop):
572 def _checksubrepostate(pushop):
573 """Ensure all outgoing referenced subrepo revisions are present locally"""
573 """Ensure all outgoing referenced subrepo revisions are present locally"""
574
574
575 repo = pushop.repo
575 repo = pushop.repo
576
576
577 # If the repository does not use subrepos, skip the expensive
577 # If the repository does not use subrepos, skip the expensive
578 # manifest checks.
578 # manifest checks.
579 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
579 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
580 return
580 return
581
581
582 for n in pushop.outgoing.missing:
582 for n in pushop.outgoing.missing:
583 ctx = repo[n]
583 ctx = repo[n]
584
584
585 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
585 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
586 for subpath in sorted(ctx.substate):
586 for subpath in sorted(ctx.substate):
587 sub = ctx.sub(subpath)
587 sub = ctx.sub(subpath)
588 sub.verify(onpush=True)
588 sub.verify(onpush=True)
589
589
590
590
591 @pushdiscovery(b'changeset')
591 @pushdiscovery(b'changeset')
592 def _pushdiscoverychangeset(pushop):
592 def _pushdiscoverychangeset(pushop):
593 """discover the changeset that need to be pushed"""
593 """discover the changeset that need to be pushed"""
594 fci = discovery.findcommonincoming
594 fci = discovery.findcommonincoming
595 if pushop.revs:
595 if pushop.revs:
596 commoninc = fci(
596 commoninc = fci(
597 pushop.repo,
597 pushop.repo,
598 pushop.remote,
598 pushop.remote,
599 force=pushop.force,
599 force=pushop.force,
600 ancestorsof=pushop.revs,
600 ancestorsof=pushop.revs,
601 )
601 )
602 else:
602 else:
603 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
603 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
604 common, inc, remoteheads = commoninc
604 common, inc, remoteheads = commoninc
605 fco = discovery.findcommonoutgoing
605 fco = discovery.findcommonoutgoing
606 outgoing = fco(
606 outgoing = fco(
607 pushop.repo,
607 pushop.repo,
608 pushop.remote,
608 pushop.remote,
609 onlyheads=pushop.revs,
609 onlyheads=pushop.revs,
610 commoninc=commoninc,
610 commoninc=commoninc,
611 force=pushop.force,
611 force=pushop.force,
612 )
612 )
613 pushop.outgoing = outgoing
613 pushop.outgoing = outgoing
614 pushop.remoteheads = remoteheads
614 pushop.remoteheads = remoteheads
615 pushop.incoming = inc
615 pushop.incoming = inc
616
616
617
617
618 @pushdiscovery(b'phase')
618 @pushdiscovery(b'phase')
619 def _pushdiscoveryphase(pushop):
619 def _pushdiscoveryphase(pushop):
620 """discover the phase that needs to be pushed
620 """discover the phase that needs to be pushed
621
621
622 (computed for both success and failure case for changesets push)"""
622 (computed for both success and failure case for changesets push)"""
623 outgoing = pushop.outgoing
623 outgoing = pushop.outgoing
624 unfi = pushop.repo.unfiltered()
624 repo = pushop.repo
625 to_rev = unfi.changelog.index.rev
625 unfi = repo.unfiltered()
626 cl = unfi.changelog
627 to_rev = cl.index.rev
626 remotephases = listkeys(pushop.remote, b'phases')
628 remotephases = listkeys(pushop.remote, b'phases')
627
629
628 if (
630 if (
629 pushop.ui.configbool(b'ui', b'_usedassubrepo')
631 pushop.ui.configbool(b'ui', b'_usedassubrepo')
630 and remotephases # server supports phases
632 and remotephases # server supports phases
631 and not pushop.outgoing.missing # no changesets to be pushed
633 and not pushop.outgoing.missing # no changesets to be pushed
632 and remotephases.get(b'publishing', False)
634 and remotephases.get(b'publishing', False)
633 ):
635 ):
634 # When:
636 # When:
635 # - this is a subrepo push
637 # - this is a subrepo push
636 # - and remote support phase
638 # - and remote support phase
637 # - and no changeset are to be pushed
639 # - and no changeset are to be pushed
638 # - and remote is publishing
640 # - and remote is publishing
639 # We may be in issue 3781 case!
641 # We may be in issue 3781 case!
640 # We drop the possible phase synchronisation done by
642 # We drop the possible phase synchronisation done by
641 # courtesy to publish changesets possibly locally draft
643 # courtesy to publish changesets possibly locally draft
642 # on the remote.
644 # on the remote.
643 pushop.outdatedphases = []
645 pushop.outdatedphases = []
644 pushop.fallbackoutdatedphases = []
646 pushop.fallbackoutdatedphases = []
645 return
647 return
646
648
647 fallbackheads_rev = [to_rev(n) for n in pushop.fallbackheads]
649 fallbackheads_rev = {to_rev(n) for n in pushop.fallbackheads}
648 futureheads_rev = [to_rev(n) for n in pushop.futureheads]
649
650 pushop.remotephases = phases.RemotePhasesSummary(
650 pushop.remotephases = phases.RemotePhasesSummary(
651 pushop.repo,
651 pushop.repo,
652 fallbackheads_rev,
652 fallbackheads_rev,
653 remotephases,
653 remotephases,
654 )
654 )
655 droots = pushop.remotephases.draft_roots
655 droots = set(pushop.remotephases.draft_roots)
656
656
657 extracond = b''
657 fallback_publishing = pushop.remotephases.publishing
658 if not pushop.remotephases.publishing:
658 push_publishing = pushop.remotephases.publishing or pushop.publish
659 extracond = b' and public()'
659 missing_revs = {to_rev(n) for n in outgoing.missing}
660 revset = b'heads((%%ld::%%ld) %s)' % extracond
660 drafts = unfi._phasecache.get_raw_set(unfi, phases.draft)
661 # Get the list of all revs draft on remote by public here.
661
662 # XXX Beware that revset break if droots is not strictly
662 if fallback_publishing:
663 # XXX root we may want to ensure it is but it is costly
663 fallback_roots = droots - missing_revs
664 fallback = list(unfi.set(revset, droots, fallbackheads_rev))
664 revset = b'heads(%ld::%ld)'
665 if not pushop.remotephases.publishing and pushop.publish:
666 future = list(
667 unfi.set(
668 b'%ld and (not public() or %ld::)', futureheads_rev, droots
669 )
670 )
671 elif not outgoing.missing:
672 future = fallback
673 else:
665 else:
674 # adds changeset we are going to push as draft
666 fallback_roots = droots - drafts
675 #
667 fallback_roots -= missing_revs
676 # should not be necessary for publishing server, but because of an
668 # Get the list of all revs draft on remote but public here.
677 # issue fixed in xxxxx we have to do it anyway.
669 revset = b'heads((%ld::%ld) and public())'
678 missing_rev = [to_rev(n) for n in outgoing.missing]
670 if not fallback_roots:
679 fdroots = list(unfi.set(b'roots(%ld + %ld::)', missing_rev, droots))
671 fallback = fallback_rev = []
680 fdroots = [f.rev() for f in fdroots]
672 else:
681 future = list(unfi.set(revset, fdroots, futureheads_rev))
673 fallback_rev = unfi.revs(revset, fallback_roots, fallbackheads_rev)
682 pushop.outdatedphases = future
674 fallback = [repo[r] for r in fallback_rev]
675
676 if push_publishing:
677 published = missing_revs.copy()
678 else:
679 published = missing_revs - drafts
680 if pushop.publish:
681 published.update(fallbackheads_rev & drafts)
682 elif fallback:
683 published.update(fallback_rev)
684
685 pushop.outdatedphases = [repo[r] for r in cl.headrevs(published)]
683 pushop.fallbackoutdatedphases = fallback
686 pushop.fallbackoutdatedphases = fallback
684
687
685
688
686 @pushdiscovery(b'obsmarker')
689 @pushdiscovery(b'obsmarker')
687 def _pushdiscoveryobsmarkers(pushop):
690 def _pushdiscoveryobsmarkers(pushop):
688 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
691 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
689 return
692 return
690
693
691 if not pushop.repo.obsstore:
694 if not pushop.repo.obsstore:
692 return
695 return
693
696
694 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
697 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
695 return
698 return
696
699
697 repo = pushop.repo
700 repo = pushop.repo
698 # very naive computation, that can be quite expensive on big repo.
701 # very naive computation, that can be quite expensive on big repo.
699 # However: evolution is currently slow on them anyway.
702 # However: evolution is currently slow on them anyway.
700 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
703 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
701 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
704 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
702
705
703
706
704 @pushdiscovery(b'bookmarks')
707 @pushdiscovery(b'bookmarks')
705 def _pushdiscoverybookmarks(pushop):
708 def _pushdiscoverybookmarks(pushop):
706 ui = pushop.ui
709 ui = pushop.ui
707 repo = pushop.repo.unfiltered()
710 repo = pushop.repo.unfiltered()
708 remote = pushop.remote
711 remote = pushop.remote
709 ui.debug(b"checking for updated bookmarks\n")
712 ui.debug(b"checking for updated bookmarks\n")
710 ancestors = ()
713 ancestors = ()
711 if pushop.revs:
714 if pushop.revs:
712 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
715 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
713 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
716 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
714
717
715 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
718 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
716
719
717 explicit = {
720 explicit = {
718 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
721 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
719 }
722 }
720
723
721 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
724 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
722 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
725 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
723
726
724
727
725 def _processcompared(pushop, pushed, explicit, remotebms, comp):
728 def _processcompared(pushop, pushed, explicit, remotebms, comp):
726 """take decision on bookmarks to push to the remote repo
729 """take decision on bookmarks to push to the remote repo
727
730
728 Exists to help extensions alter this behavior.
731 Exists to help extensions alter this behavior.
729 """
732 """
730 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
733 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
731
734
732 repo = pushop.repo
735 repo = pushop.repo
733
736
734 for b, scid, dcid in advsrc:
737 for b, scid, dcid in advsrc:
735 if b in explicit:
738 if b in explicit:
736 explicit.remove(b)
739 explicit.remove(b)
737 if not pushed or repo[scid].rev() in pushed:
740 if not pushed or repo[scid].rev() in pushed:
738 pushop.outbookmarks.append((b, dcid, scid))
741 pushop.outbookmarks.append((b, dcid, scid))
739 # search added bookmark
742 # search added bookmark
740 for b, scid, dcid in addsrc:
743 for b, scid, dcid in addsrc:
741 if b in explicit:
744 if b in explicit:
742 explicit.remove(b)
745 explicit.remove(b)
743 if bookmod.isdivergent(b):
746 if bookmod.isdivergent(b):
744 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
747 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
745 pushop.bkresult = 2
748 pushop.bkresult = 2
746 else:
749 else:
747 pushop.outbookmarks.append((b, b'', scid))
750 pushop.outbookmarks.append((b, b'', scid))
748 # search for overwritten bookmark
751 # search for overwritten bookmark
749 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
752 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
750 if b in explicit:
753 if b in explicit:
751 explicit.remove(b)
754 explicit.remove(b)
752 pushop.outbookmarks.append((b, dcid, scid))
755 pushop.outbookmarks.append((b, dcid, scid))
753 # search for bookmark to delete
756 # search for bookmark to delete
754 for b, scid, dcid in adddst:
757 for b, scid, dcid in adddst:
755 if b in explicit:
758 if b in explicit:
756 explicit.remove(b)
759 explicit.remove(b)
757 # treat as "deleted locally"
760 # treat as "deleted locally"
758 pushop.outbookmarks.append((b, dcid, b''))
761 pushop.outbookmarks.append((b, dcid, b''))
759 # identical bookmarks shouldn't get reported
762 # identical bookmarks shouldn't get reported
760 for b, scid, dcid in same:
763 for b, scid, dcid in same:
761 if b in explicit:
764 if b in explicit:
762 explicit.remove(b)
765 explicit.remove(b)
763
766
764 if explicit:
767 if explicit:
765 explicit = sorted(explicit)
768 explicit = sorted(explicit)
766 # we should probably list all of them
769 # we should probably list all of them
767 pushop.ui.warn(
770 pushop.ui.warn(
768 _(
771 _(
769 b'bookmark %s does not exist on the local '
772 b'bookmark %s does not exist on the local '
770 b'or remote repository!\n'
773 b'or remote repository!\n'
771 )
774 )
772 % explicit[0]
775 % explicit[0]
773 )
776 )
774 pushop.bkresult = 2
777 pushop.bkresult = 2
775
778
776 pushop.outbookmarks.sort()
779 pushop.outbookmarks.sort()
777
780
778
781
779 def _pushcheckoutgoing(pushop):
782 def _pushcheckoutgoing(pushop):
780 outgoing = pushop.outgoing
783 outgoing = pushop.outgoing
781 unfi = pushop.repo.unfiltered()
784 unfi = pushop.repo.unfiltered()
782 if not outgoing.missing:
785 if not outgoing.missing:
783 # nothing to push
786 # nothing to push
784 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
787 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
785 return False
788 return False
786 # something to push
789 # something to push
787 if not pushop.force:
790 if not pushop.force:
788 # if repo.obsstore == False --> no obsolete
791 # if repo.obsstore == False --> no obsolete
789 # then, save the iteration
792 # then, save the iteration
790 if unfi.obsstore:
793 if unfi.obsstore:
791 # this message are here for 80 char limit reason
794 # this message are here for 80 char limit reason
792 mso = _(b"push includes obsolete changeset: %s!")
795 mso = _(b"push includes obsolete changeset: %s!")
793 mspd = _(b"push includes phase-divergent changeset: %s!")
796 mspd = _(b"push includes phase-divergent changeset: %s!")
794 mscd = _(b"push includes content-divergent changeset: %s!")
797 mscd = _(b"push includes content-divergent changeset: %s!")
795 mst = {
798 mst = {
796 b"orphan": _(b"push includes orphan changeset: %s!"),
799 b"orphan": _(b"push includes orphan changeset: %s!"),
797 b"phase-divergent": mspd,
800 b"phase-divergent": mspd,
798 b"content-divergent": mscd,
801 b"content-divergent": mscd,
799 }
802 }
800 # If we are to push if there is at least one
803 # If we are to push if there is at least one
801 # obsolete or unstable changeset in missing, at
804 # obsolete or unstable changeset in missing, at
802 # least one of the missinghead will be obsolete or
805 # least one of the missinghead will be obsolete or
803 # unstable. So checking heads only is ok
806 # unstable. So checking heads only is ok
804 for node in outgoing.ancestorsof:
807 for node in outgoing.ancestorsof:
805 ctx = unfi[node]
808 ctx = unfi[node]
806 if ctx.obsolete():
809 if ctx.obsolete():
807 raise error.Abort(mso % ctx)
810 raise error.Abort(mso % ctx)
808 elif ctx.isunstable():
811 elif ctx.isunstable():
809 # TODO print more than one instability in the abort
812 # TODO print more than one instability in the abort
810 # message
813 # message
811 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
814 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
812
815
813 discovery.checkheads(pushop)
816 discovery.checkheads(pushop)
814 return True
817 return True
815
818
816
819
817 # List of names of steps to perform for an outgoing bundle2, order matters.
820 # List of names of steps to perform for an outgoing bundle2, order matters.
818 b2partsgenorder = []
821 b2partsgenorder = []
819
822
820 # Mapping between step name and function
823 # Mapping between step name and function
821 #
824 #
822 # This exists to help extensions wrap steps if necessary
825 # This exists to help extensions wrap steps if necessary
823 b2partsgenmapping = {}
826 b2partsgenmapping = {}
824
827
825
828
826 def b2partsgenerator(stepname, idx=None):
829 def b2partsgenerator(stepname, idx=None):
827 """decorator for function generating bundle2 part
830 """decorator for function generating bundle2 part
828
831
829 The function is added to the step -> function mapping and appended to the
832 The function is added to the step -> function mapping and appended to the
830 list of steps. Beware that decorated functions will be added in order
833 list of steps. Beware that decorated functions will be added in order
831 (this may matter).
834 (this may matter).
832
835
833 You can only use this decorator for new steps, if you want to wrap a step
836 You can only use this decorator for new steps, if you want to wrap a step
834 from an extension, attack the b2partsgenmapping dictionary directly."""
837 from an extension, attack the b2partsgenmapping dictionary directly."""
835
838
836 def dec(func):
839 def dec(func):
837 assert stepname not in b2partsgenmapping
840 assert stepname not in b2partsgenmapping
838 b2partsgenmapping[stepname] = func
841 b2partsgenmapping[stepname] = func
839 if idx is None:
842 if idx is None:
840 b2partsgenorder.append(stepname)
843 b2partsgenorder.append(stepname)
841 else:
844 else:
842 b2partsgenorder.insert(idx, stepname)
845 b2partsgenorder.insert(idx, stepname)
843 return func
846 return func
844
847
845 return dec
848 return dec
846
849
847
850
848 def _pushb2ctxcheckheads(pushop, bundler):
851 def _pushb2ctxcheckheads(pushop, bundler):
849 """Generate race condition checking parts
852 """Generate race condition checking parts
850
853
851 Exists as an independent function to aid extensions
854 Exists as an independent function to aid extensions
852 """
855 """
853 # * 'force' do not check for push race,
856 # * 'force' do not check for push race,
854 # * if we don't push anything, there are nothing to check.
857 # * if we don't push anything, there are nothing to check.
855 if not pushop.force and pushop.outgoing.ancestorsof:
858 if not pushop.force and pushop.outgoing.ancestorsof:
856 allowunrelated = b'related' in bundler.capabilities.get(
859 allowunrelated = b'related' in bundler.capabilities.get(
857 b'checkheads', ()
860 b'checkheads', ()
858 )
861 )
859 emptyremote = pushop.pushbranchmap is None
862 emptyremote = pushop.pushbranchmap is None
860 if not allowunrelated or emptyremote:
863 if not allowunrelated or emptyremote:
861 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
864 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
862 else:
865 else:
863 affected = set()
866 affected = set()
864 for branch, heads in pushop.pushbranchmap.items():
867 for branch, heads in pushop.pushbranchmap.items():
865 remoteheads, newheads, unsyncedheads, discardedheads = heads
868 remoteheads, newheads, unsyncedheads, discardedheads = heads
866 if remoteheads is not None:
869 if remoteheads is not None:
867 remote = set(remoteheads)
870 remote = set(remoteheads)
868 affected |= set(discardedheads) & remote
871 affected |= set(discardedheads) & remote
869 affected |= remote - set(newheads)
872 affected |= remote - set(newheads)
870 if affected:
873 if affected:
871 data = iter(sorted(affected))
874 data = iter(sorted(affected))
872 bundler.newpart(b'check:updated-heads', data=data)
875 bundler.newpart(b'check:updated-heads', data=data)
873
876
874
877
875 def _pushing(pushop):
878 def _pushing(pushop):
876 """return True if we are pushing anything"""
879 """return True if we are pushing anything"""
877 return bool(
880 return bool(
878 pushop.outgoing.missing
881 pushop.outgoing.missing
879 or pushop.outdatedphases
882 or pushop.outdatedphases
880 or pushop.outobsmarkers
883 or pushop.outobsmarkers
881 or pushop.outbookmarks
884 or pushop.outbookmarks
882 )
885 )
883
886
884
887
885 @b2partsgenerator(b'check-bookmarks')
888 @b2partsgenerator(b'check-bookmarks')
886 def _pushb2checkbookmarks(pushop, bundler):
889 def _pushb2checkbookmarks(pushop, bundler):
887 """insert bookmark move checking"""
890 """insert bookmark move checking"""
888 if not _pushing(pushop) or pushop.force:
891 if not _pushing(pushop) or pushop.force:
889 return
892 return
890 b2caps = bundle2.bundle2caps(pushop.remote)
893 b2caps = bundle2.bundle2caps(pushop.remote)
891 hasbookmarkcheck = b'bookmarks' in b2caps
894 hasbookmarkcheck = b'bookmarks' in b2caps
892 if not (pushop.outbookmarks and hasbookmarkcheck):
895 if not (pushop.outbookmarks and hasbookmarkcheck):
893 return
896 return
894 data = []
897 data = []
895 for book, old, new in pushop.outbookmarks:
898 for book, old, new in pushop.outbookmarks:
896 data.append((book, old))
899 data.append((book, old))
897 checkdata = bookmod.binaryencode(pushop.repo, data)
900 checkdata = bookmod.binaryencode(pushop.repo, data)
898 bundler.newpart(b'check:bookmarks', data=checkdata)
901 bundler.newpart(b'check:bookmarks', data=checkdata)
899
902
900
903
901 @b2partsgenerator(b'check-phases')
904 @b2partsgenerator(b'check-phases')
902 def _pushb2checkphases(pushop, bundler):
905 def _pushb2checkphases(pushop, bundler):
903 """insert phase move checking"""
906 """insert phase move checking"""
904 if not _pushing(pushop) or pushop.force:
907 if not _pushing(pushop) or pushop.force:
905 return
908 return
906 b2caps = bundle2.bundle2caps(pushop.remote)
909 b2caps = bundle2.bundle2caps(pushop.remote)
907 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
910 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
908 if pushop.remotephases is not None and hasphaseheads:
911 if pushop.remotephases is not None and hasphaseheads:
909 # check that the remote phase has not changed
912 # check that the remote phase has not changed
910 checks = {p: [] for p in phases.allphases}
913 checks = {p: [] for p in phases.allphases}
911 to_node = pushop.repo.unfiltered().changelog.node
914 to_node = pushop.repo.unfiltered().changelog.node
912 checks[phases.public].extend(
915 checks[phases.public].extend(
913 to_node(r) for r in pushop.remotephases.public_heads
916 to_node(r) for r in pushop.remotephases.public_heads
914 )
917 )
915 checks[phases.draft].extend(
918 checks[phases.draft].extend(
916 to_node(r) for r in pushop.remotephases.draft_roots
919 to_node(r) for r in pushop.remotephases.draft_roots
917 )
920 )
918 if any(checks.values()):
921 if any(checks.values()):
919 for phase in checks:
922 for phase in checks:
920 checks[phase].sort()
923 checks[phase].sort()
921 checkdata = phases.binaryencode(checks)
924 checkdata = phases.binaryencode(checks)
922 bundler.newpart(b'check:phases', data=checkdata)
925 bundler.newpart(b'check:phases', data=checkdata)
923
926
924
927
925 @b2partsgenerator(b'changeset')
928 @b2partsgenerator(b'changeset')
926 def _pushb2ctx(pushop, bundler):
929 def _pushb2ctx(pushop, bundler):
927 """handle changegroup push through bundle2
930 """handle changegroup push through bundle2
928
931
929 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
932 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
930 """
933 """
931 if b'changesets' in pushop.stepsdone:
934 if b'changesets' in pushop.stepsdone:
932 return
935 return
933 pushop.stepsdone.add(b'changesets')
936 pushop.stepsdone.add(b'changesets')
934 # Send known heads to the server for race detection.
937 # Send known heads to the server for race detection.
935 if not _pushcheckoutgoing(pushop):
938 if not _pushcheckoutgoing(pushop):
936 return
939 return
937 pushop.repo.prepushoutgoinghooks(pushop)
940 pushop.repo.prepushoutgoinghooks(pushop)
938
941
939 _pushb2ctxcheckheads(pushop, bundler)
942 _pushb2ctxcheckheads(pushop, bundler)
940
943
941 b2caps = bundle2.bundle2caps(pushop.remote)
944 b2caps = bundle2.bundle2caps(pushop.remote)
942 version = b'01'
945 version = b'01'
943 cgversions = b2caps.get(b'changegroup')
946 cgversions = b2caps.get(b'changegroup')
944 if cgversions: # 3.1 and 3.2 ship with an empty value
947 if cgversions: # 3.1 and 3.2 ship with an empty value
945 cgversions = [
948 cgversions = [
946 v
949 v
947 for v in cgversions
950 for v in cgversions
948 if v in changegroup.supportedoutgoingversions(pushop.repo)
951 if v in changegroup.supportedoutgoingversions(pushop.repo)
949 ]
952 ]
950 if not cgversions:
953 if not cgversions:
951 raise error.Abort(_(b'no common changegroup version'))
954 raise error.Abort(_(b'no common changegroup version'))
952 version = max(cgversions)
955 version = max(cgversions)
953
956
954 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
957 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
955 cgstream = changegroup.makestream(
958 cgstream = changegroup.makestream(
956 pushop.repo,
959 pushop.repo,
957 pushop.outgoing,
960 pushop.outgoing,
958 version,
961 version,
959 b'push',
962 b'push',
960 bundlecaps=b2caps,
963 bundlecaps=b2caps,
961 remote_sidedata=remote_sidedata,
964 remote_sidedata=remote_sidedata,
962 )
965 )
963 cgpart = bundler.newpart(b'changegroup', data=cgstream)
966 cgpart = bundler.newpart(b'changegroup', data=cgstream)
964 if cgversions:
967 if cgversions:
965 cgpart.addparam(b'version', version)
968 cgpart.addparam(b'version', version)
966 if scmutil.istreemanifest(pushop.repo):
969 if scmutil.istreemanifest(pushop.repo):
967 cgpart.addparam(b'treemanifest', b'1')
970 cgpart.addparam(b'treemanifest', b'1')
968 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
971 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
969 cgpart.addparam(b'exp-sidedata', b'1')
972 cgpart.addparam(b'exp-sidedata', b'1')
970
973
971 def handlereply(op):
974 def handlereply(op):
972 """extract addchangegroup returns from server reply"""
975 """extract addchangegroup returns from server reply"""
973 cgreplies = op.records.getreplies(cgpart.id)
976 cgreplies = op.records.getreplies(cgpart.id)
974 assert len(cgreplies[b'changegroup']) == 1
977 assert len(cgreplies[b'changegroup']) == 1
975 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
978 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
976
979
977 return handlereply
980 return handlereply
978
981
979
982
980 @b2partsgenerator(b'phase')
983 @b2partsgenerator(b'phase')
981 def _pushb2phases(pushop, bundler):
984 def _pushb2phases(pushop, bundler):
982 """handle phase push through bundle2"""
985 """handle phase push through bundle2"""
983 if b'phases' in pushop.stepsdone:
986 if b'phases' in pushop.stepsdone:
984 return
987 return
985 b2caps = bundle2.bundle2caps(pushop.remote)
988 b2caps = bundle2.bundle2caps(pushop.remote)
986 ui = pushop.repo.ui
989 ui = pushop.repo.ui
987
990
988 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
991 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
989 haspushkey = b'pushkey' in b2caps
992 haspushkey = b'pushkey' in b2caps
990 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
993 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
991
994
992 if hasphaseheads and not legacyphase:
995 if hasphaseheads and not legacyphase:
993 return _pushb2phaseheads(pushop, bundler)
996 return _pushb2phaseheads(pushop, bundler)
994 elif haspushkey:
997 elif haspushkey:
995 return _pushb2phasespushkey(pushop, bundler)
998 return _pushb2phasespushkey(pushop, bundler)
996
999
997
1000
998 def _pushb2phaseheads(pushop, bundler):
1001 def _pushb2phaseheads(pushop, bundler):
999 """push phase information through a bundle2 - binary part"""
1002 """push phase information through a bundle2 - binary part"""
1000 pushop.stepsdone.add(b'phases')
1003 pushop.stepsdone.add(b'phases')
1001 if pushop.outdatedphases:
1004 if pushop.outdatedphases:
1002 updates = {p: [] for p in phases.allphases}
1005 updates = {p: [] for p in phases.allphases}
1003 updates[0].extend(h.node() for h in pushop.outdatedphases)
1006 updates[0].extend(h.node() for h in pushop.outdatedphases)
1004 phasedata = phases.binaryencode(updates)
1007 phasedata = phases.binaryencode(updates)
1005 bundler.newpart(b'phase-heads', data=phasedata)
1008 bundler.newpart(b'phase-heads', data=phasedata)
1006
1009
1007
1010
1008 def _pushb2phasespushkey(pushop, bundler):
1011 def _pushb2phasespushkey(pushop, bundler):
1009 """push phase information through a bundle2 - pushkey part"""
1012 """push phase information through a bundle2 - pushkey part"""
1010 pushop.stepsdone.add(b'phases')
1013 pushop.stepsdone.add(b'phases')
1011 part2node = []
1014 part2node = []
1012
1015
1013 def handlefailure(pushop, exc):
1016 def handlefailure(pushop, exc):
1014 targetid = int(exc.partid)
1017 targetid = int(exc.partid)
1015 for partid, node in part2node:
1018 for partid, node in part2node:
1016 if partid == targetid:
1019 if partid == targetid:
1017 raise error.Abort(_(b'updating %s to public failed') % node)
1020 raise error.Abort(_(b'updating %s to public failed') % node)
1018
1021
1019 enc = pushkey.encode
1022 enc = pushkey.encode
1020 for newremotehead in pushop.outdatedphases:
1023 for newremotehead in pushop.outdatedphases:
1021 part = bundler.newpart(b'pushkey')
1024 part = bundler.newpart(b'pushkey')
1022 part.addparam(b'namespace', enc(b'phases'))
1025 part.addparam(b'namespace', enc(b'phases'))
1023 part.addparam(b'key', enc(newremotehead.hex()))
1026 part.addparam(b'key', enc(newremotehead.hex()))
1024 part.addparam(b'old', enc(b'%d' % phases.draft))
1027 part.addparam(b'old', enc(b'%d' % phases.draft))
1025 part.addparam(b'new', enc(b'%d' % phases.public))
1028 part.addparam(b'new', enc(b'%d' % phases.public))
1026 part2node.append((part.id, newremotehead))
1029 part2node.append((part.id, newremotehead))
1027 pushop.pkfailcb[part.id] = handlefailure
1030 pushop.pkfailcb[part.id] = handlefailure
1028
1031
1029 def handlereply(op):
1032 def handlereply(op):
1030 for partid, node in part2node:
1033 for partid, node in part2node:
1031 partrep = op.records.getreplies(partid)
1034 partrep = op.records.getreplies(partid)
1032 results = partrep[b'pushkey']
1035 results = partrep[b'pushkey']
1033 assert len(results) <= 1
1036 assert len(results) <= 1
1034 msg = None
1037 msg = None
1035 if not results:
1038 if not results:
1036 msg = _(b'server ignored update of %s to public!\n') % node
1039 msg = _(b'server ignored update of %s to public!\n') % node
1037 elif not int(results[0][b'return']):
1040 elif not int(results[0][b'return']):
1038 msg = _(b'updating %s to public failed!\n') % node
1041 msg = _(b'updating %s to public failed!\n') % node
1039 if msg is not None:
1042 if msg is not None:
1040 pushop.ui.warn(msg)
1043 pushop.ui.warn(msg)
1041
1044
1042 return handlereply
1045 return handlereply
1043
1046
1044
1047
1045 @b2partsgenerator(b'obsmarkers')
1048 @b2partsgenerator(b'obsmarkers')
1046 def _pushb2obsmarkers(pushop, bundler):
1049 def _pushb2obsmarkers(pushop, bundler):
1047 if b'obsmarkers' in pushop.stepsdone:
1050 if b'obsmarkers' in pushop.stepsdone:
1048 return
1051 return
1049 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1052 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1050 if obsolete.commonversion(remoteversions) is None:
1053 if obsolete.commonversion(remoteversions) is None:
1051 return
1054 return
1052 pushop.stepsdone.add(b'obsmarkers')
1055 pushop.stepsdone.add(b'obsmarkers')
1053 if pushop.outobsmarkers:
1056 if pushop.outobsmarkers:
1054 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1057 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1055 bundle2.buildobsmarkerspart(bundler, markers)
1058 bundle2.buildobsmarkerspart(bundler, markers)
1056
1059
1057
1060
1058 @b2partsgenerator(b'bookmarks')
1061 @b2partsgenerator(b'bookmarks')
1059 def _pushb2bookmarks(pushop, bundler):
1062 def _pushb2bookmarks(pushop, bundler):
1060 """handle bookmark push through bundle2"""
1063 """handle bookmark push through bundle2"""
1061 if b'bookmarks' in pushop.stepsdone:
1064 if b'bookmarks' in pushop.stepsdone:
1062 return
1065 return
1063 b2caps = bundle2.bundle2caps(pushop.remote)
1066 b2caps = bundle2.bundle2caps(pushop.remote)
1064
1067
1065 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1068 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1066 legacybooks = b'bookmarks' in legacy
1069 legacybooks = b'bookmarks' in legacy
1067
1070
1068 if not legacybooks and b'bookmarks' in b2caps:
1071 if not legacybooks and b'bookmarks' in b2caps:
1069 return _pushb2bookmarkspart(pushop, bundler)
1072 return _pushb2bookmarkspart(pushop, bundler)
1070 elif b'pushkey' in b2caps:
1073 elif b'pushkey' in b2caps:
1071 return _pushb2bookmarkspushkey(pushop, bundler)
1074 return _pushb2bookmarkspushkey(pushop, bundler)
1072
1075
1073
1076
1074 def _bmaction(old, new):
1077 def _bmaction(old, new):
1075 """small utility for bookmark pushing"""
1078 """small utility for bookmark pushing"""
1076 if not old:
1079 if not old:
1077 return b'export'
1080 return b'export'
1078 elif not new:
1081 elif not new:
1079 return b'delete'
1082 return b'delete'
1080 return b'update'
1083 return b'update'
1081
1084
1082
1085
1083 def _abortonsecretctx(pushop, node, b):
1086 def _abortonsecretctx(pushop, node, b):
1084 """abort if a given bookmark points to a secret changeset"""
1087 """abort if a given bookmark points to a secret changeset"""
1085 if node and pushop.repo[node].phase() == phases.secret:
1088 if node and pushop.repo[node].phase() == phases.secret:
1086 raise error.Abort(
1089 raise error.Abort(
1087 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1090 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1088 )
1091 )
1089
1092
1090
1093
1091 def _pushb2bookmarkspart(pushop, bundler):
1094 def _pushb2bookmarkspart(pushop, bundler):
1092 pushop.stepsdone.add(b'bookmarks')
1095 pushop.stepsdone.add(b'bookmarks')
1093 if not pushop.outbookmarks:
1096 if not pushop.outbookmarks:
1094 return
1097 return
1095
1098
1096 allactions = []
1099 allactions = []
1097 data = []
1100 data = []
1098 for book, old, new in pushop.outbookmarks:
1101 for book, old, new in pushop.outbookmarks:
1099 _abortonsecretctx(pushop, new, book)
1102 _abortonsecretctx(pushop, new, book)
1100 data.append((book, new))
1103 data.append((book, new))
1101 allactions.append((book, _bmaction(old, new)))
1104 allactions.append((book, _bmaction(old, new)))
1102 checkdata = bookmod.binaryencode(pushop.repo, data)
1105 checkdata = bookmod.binaryencode(pushop.repo, data)
1103 bundler.newpart(b'bookmarks', data=checkdata)
1106 bundler.newpart(b'bookmarks', data=checkdata)
1104
1107
1105 def handlereply(op):
1108 def handlereply(op):
1106 ui = pushop.ui
1109 ui = pushop.ui
1107 # if success
1110 # if success
1108 for book, action in allactions:
1111 for book, action in allactions:
1109 ui.status(bookmsgmap[action][0] % book)
1112 ui.status(bookmsgmap[action][0] % book)
1110
1113
1111 return handlereply
1114 return handlereply
1112
1115
1113
1116
1114 def _pushb2bookmarkspushkey(pushop, bundler):
1117 def _pushb2bookmarkspushkey(pushop, bundler):
1115 pushop.stepsdone.add(b'bookmarks')
1118 pushop.stepsdone.add(b'bookmarks')
1116 part2book = []
1119 part2book = []
1117 enc = pushkey.encode
1120 enc = pushkey.encode
1118
1121
1119 def handlefailure(pushop, exc):
1122 def handlefailure(pushop, exc):
1120 targetid = int(exc.partid)
1123 targetid = int(exc.partid)
1121 for partid, book, action in part2book:
1124 for partid, book, action in part2book:
1122 if partid == targetid:
1125 if partid == targetid:
1123 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1126 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1124 # we should not be called for part we did not generated
1127 # we should not be called for part we did not generated
1125 assert False
1128 assert False
1126
1129
1127 for book, old, new in pushop.outbookmarks:
1130 for book, old, new in pushop.outbookmarks:
1128 _abortonsecretctx(pushop, new, book)
1131 _abortonsecretctx(pushop, new, book)
1129 part = bundler.newpart(b'pushkey')
1132 part = bundler.newpart(b'pushkey')
1130 part.addparam(b'namespace', enc(b'bookmarks'))
1133 part.addparam(b'namespace', enc(b'bookmarks'))
1131 part.addparam(b'key', enc(book))
1134 part.addparam(b'key', enc(book))
1132 part.addparam(b'old', enc(hex(old)))
1135 part.addparam(b'old', enc(hex(old)))
1133 part.addparam(b'new', enc(hex(new)))
1136 part.addparam(b'new', enc(hex(new)))
1134 action = b'update'
1137 action = b'update'
1135 if not old:
1138 if not old:
1136 action = b'export'
1139 action = b'export'
1137 elif not new:
1140 elif not new:
1138 action = b'delete'
1141 action = b'delete'
1139 part2book.append((part.id, book, action))
1142 part2book.append((part.id, book, action))
1140 pushop.pkfailcb[part.id] = handlefailure
1143 pushop.pkfailcb[part.id] = handlefailure
1141
1144
1142 def handlereply(op):
1145 def handlereply(op):
1143 ui = pushop.ui
1146 ui = pushop.ui
1144 for partid, book, action in part2book:
1147 for partid, book, action in part2book:
1145 partrep = op.records.getreplies(partid)
1148 partrep = op.records.getreplies(partid)
1146 results = partrep[b'pushkey']
1149 results = partrep[b'pushkey']
1147 assert len(results) <= 1
1150 assert len(results) <= 1
1148 if not results:
1151 if not results:
1149 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1152 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1150 else:
1153 else:
1151 ret = int(results[0][b'return'])
1154 ret = int(results[0][b'return'])
1152 if ret:
1155 if ret:
1153 ui.status(bookmsgmap[action][0] % book)
1156 ui.status(bookmsgmap[action][0] % book)
1154 else:
1157 else:
1155 ui.warn(bookmsgmap[action][1] % book)
1158 ui.warn(bookmsgmap[action][1] % book)
1156 if pushop.bkresult is not None:
1159 if pushop.bkresult is not None:
1157 pushop.bkresult = 1
1160 pushop.bkresult = 1
1158
1161
1159 return handlereply
1162 return handlereply
1160
1163
1161
1164
1162 @b2partsgenerator(b'pushvars', idx=0)
1165 @b2partsgenerator(b'pushvars', idx=0)
1163 def _getbundlesendvars(pushop, bundler):
1166 def _getbundlesendvars(pushop, bundler):
1164 '''send shellvars via bundle2'''
1167 '''send shellvars via bundle2'''
1165 pushvars = pushop.pushvars
1168 pushvars = pushop.pushvars
1166 if pushvars:
1169 if pushvars:
1167 shellvars = {}
1170 shellvars = {}
1168 for raw in pushvars:
1171 for raw in pushvars:
1169 if b'=' not in raw:
1172 if b'=' not in raw:
1170 msg = (
1173 msg = (
1171 b"unable to parse variable '%s', should follow "
1174 b"unable to parse variable '%s', should follow "
1172 b"'KEY=VALUE' or 'KEY=' format"
1175 b"'KEY=VALUE' or 'KEY=' format"
1173 )
1176 )
1174 raise error.Abort(msg % raw)
1177 raise error.Abort(msg % raw)
1175 k, v = raw.split(b'=', 1)
1178 k, v = raw.split(b'=', 1)
1176 shellvars[k] = v
1179 shellvars[k] = v
1177
1180
1178 part = bundler.newpart(b'pushvars')
1181 part = bundler.newpart(b'pushvars')
1179
1182
1180 for key, value in shellvars.items():
1183 for key, value in shellvars.items():
1181 part.addparam(key, value, mandatory=False)
1184 part.addparam(key, value, mandatory=False)
1182
1185
1183
1186
1184 def _pushbundle2(pushop):
1187 def _pushbundle2(pushop):
1185 """push data to the remote using bundle2
1188 """push data to the remote using bundle2
1186
1189
1187 The only currently supported type of data is changegroup but this will
1190 The only currently supported type of data is changegroup but this will
1188 evolve in the future."""
1191 evolve in the future."""
1189 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1192 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1190 pushback = pushop.trmanager and pushop.ui.configbool(
1193 pushback = pushop.trmanager and pushop.ui.configbool(
1191 b'experimental', b'bundle2.pushback'
1194 b'experimental', b'bundle2.pushback'
1192 )
1195 )
1193
1196
1194 # create reply capability
1197 # create reply capability
1195 capsblob = bundle2.encodecaps(
1198 capsblob = bundle2.encodecaps(
1196 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1199 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1197 )
1200 )
1198 bundler.newpart(b'replycaps', data=capsblob)
1201 bundler.newpart(b'replycaps', data=capsblob)
1199 replyhandlers = []
1202 replyhandlers = []
1200 for partgenname in b2partsgenorder:
1203 for partgenname in b2partsgenorder:
1201 partgen = b2partsgenmapping[partgenname]
1204 partgen = b2partsgenmapping[partgenname]
1202 ret = partgen(pushop, bundler)
1205 ret = partgen(pushop, bundler)
1203 if callable(ret):
1206 if callable(ret):
1204 replyhandlers.append(ret)
1207 replyhandlers.append(ret)
1205 # do not push if nothing to push
1208 # do not push if nothing to push
1206 if bundler.nbparts <= 1:
1209 if bundler.nbparts <= 1:
1207 return
1210 return
1208 stream = util.chunkbuffer(bundler.getchunks())
1211 stream = util.chunkbuffer(bundler.getchunks())
1209 try:
1212 try:
1210 try:
1213 try:
1211 with pushop.remote.commandexecutor() as e:
1214 with pushop.remote.commandexecutor() as e:
1212 reply = e.callcommand(
1215 reply = e.callcommand(
1213 b'unbundle',
1216 b'unbundle',
1214 {
1217 {
1215 b'bundle': stream,
1218 b'bundle': stream,
1216 b'heads': [b'force'],
1219 b'heads': [b'force'],
1217 b'url': pushop.remote.url(),
1220 b'url': pushop.remote.url(),
1218 },
1221 },
1219 ).result()
1222 ).result()
1220 except error.BundleValueError as exc:
1223 except error.BundleValueError as exc:
1221 raise error.RemoteError(_(b'missing support for %s') % exc)
1224 raise error.RemoteError(_(b'missing support for %s') % exc)
1222 try:
1225 try:
1223 trgetter = None
1226 trgetter = None
1224 if pushback:
1227 if pushback:
1225 trgetter = pushop.trmanager.transaction
1228 trgetter = pushop.trmanager.transaction
1226 op = bundle2.processbundle(
1229 op = bundle2.processbundle(
1227 pushop.repo,
1230 pushop.repo,
1228 reply,
1231 reply,
1229 trgetter,
1232 trgetter,
1230 remote=pushop.remote,
1233 remote=pushop.remote,
1231 )
1234 )
1232 except error.BundleValueError as exc:
1235 except error.BundleValueError as exc:
1233 raise error.RemoteError(_(b'missing support for %s') % exc)
1236 raise error.RemoteError(_(b'missing support for %s') % exc)
1234 except bundle2.AbortFromPart as exc:
1237 except bundle2.AbortFromPart as exc:
1235 pushop.ui.error(_(b'remote: %s\n') % exc)
1238 pushop.ui.error(_(b'remote: %s\n') % exc)
1236 if exc.hint is not None:
1239 if exc.hint is not None:
1237 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1240 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1238 raise error.RemoteError(_(b'push failed on remote'))
1241 raise error.RemoteError(_(b'push failed on remote'))
1239 except error.PushkeyFailed as exc:
1242 except error.PushkeyFailed as exc:
1240 partid = int(exc.partid)
1243 partid = int(exc.partid)
1241 if partid not in pushop.pkfailcb:
1244 if partid not in pushop.pkfailcb:
1242 raise
1245 raise
1243 pushop.pkfailcb[partid](pushop, exc)
1246 pushop.pkfailcb[partid](pushop, exc)
1244 for rephand in replyhandlers:
1247 for rephand in replyhandlers:
1245 rephand(op)
1248 rephand(op)
1246
1249
1247
1250
1248 def _pushchangeset(pushop):
1251 def _pushchangeset(pushop):
1249 """Make the actual push of changeset bundle to remote repo"""
1252 """Make the actual push of changeset bundle to remote repo"""
1250 if b'changesets' in pushop.stepsdone:
1253 if b'changesets' in pushop.stepsdone:
1251 return
1254 return
1252 pushop.stepsdone.add(b'changesets')
1255 pushop.stepsdone.add(b'changesets')
1253 if not _pushcheckoutgoing(pushop):
1256 if not _pushcheckoutgoing(pushop):
1254 return
1257 return
1255
1258
1256 # Should have verified this in push().
1259 # Should have verified this in push().
1257 assert pushop.remote.capable(b'unbundle')
1260 assert pushop.remote.capable(b'unbundle')
1258
1261
1259 pushop.repo.prepushoutgoinghooks(pushop)
1262 pushop.repo.prepushoutgoinghooks(pushop)
1260 outgoing = pushop.outgoing
1263 outgoing = pushop.outgoing
1261 # TODO: get bundlecaps from remote
1264 # TODO: get bundlecaps from remote
1262 bundlecaps = None
1265 bundlecaps = None
1263 # create a changegroup from local
1266 # create a changegroup from local
1264 if pushop.revs is None and not (
1267 if pushop.revs is None and not (
1265 outgoing.excluded or pushop.repo.changelog.filteredrevs
1268 outgoing.excluded or pushop.repo.changelog.filteredrevs
1266 ):
1269 ):
1267 # push everything,
1270 # push everything,
1268 # use the fast path, no race possible on push
1271 # use the fast path, no race possible on push
1269 cg = changegroup.makechangegroup(
1272 cg = changegroup.makechangegroup(
1270 pushop.repo,
1273 pushop.repo,
1271 outgoing,
1274 outgoing,
1272 b'01',
1275 b'01',
1273 b'push',
1276 b'push',
1274 fastpath=True,
1277 fastpath=True,
1275 bundlecaps=bundlecaps,
1278 bundlecaps=bundlecaps,
1276 )
1279 )
1277 else:
1280 else:
1278 cg = changegroup.makechangegroup(
1281 cg = changegroup.makechangegroup(
1279 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1282 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1280 )
1283 )
1281
1284
1282 # apply changegroup to remote
1285 # apply changegroup to remote
1283 # local repo finds heads on server, finds out what
1286 # local repo finds heads on server, finds out what
1284 # revs it must push. once revs transferred, if server
1287 # revs it must push. once revs transferred, if server
1285 # finds it has different heads (someone else won
1288 # finds it has different heads (someone else won
1286 # commit/push race), server aborts.
1289 # commit/push race), server aborts.
1287 if pushop.force:
1290 if pushop.force:
1288 remoteheads = [b'force']
1291 remoteheads = [b'force']
1289 else:
1292 else:
1290 remoteheads = pushop.remoteheads
1293 remoteheads = pushop.remoteheads
1291 # ssh: return remote's addchangegroup()
1294 # ssh: return remote's addchangegroup()
1292 # http: return remote's addchangegroup() or 0 for error
1295 # http: return remote's addchangegroup() or 0 for error
1293 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1296 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1294
1297
1295
1298
1296 def _pushsyncphase(pushop):
1299 def _pushsyncphase(pushop):
1297 """synchronise phase information locally and remotely"""
1300 """synchronise phase information locally and remotely"""
1298 cheads = pushop.commonheads
1301 cheads = pushop.commonheads
1299 # even when we don't push, exchanging phase data is useful
1302 # even when we don't push, exchanging phase data is useful
1300 remotephases = listkeys(pushop.remote, b'phases')
1303 remotephases = listkeys(pushop.remote, b'phases')
1301 if (
1304 if (
1302 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1305 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1303 and remotephases # server supports phases
1306 and remotephases # server supports phases
1304 and pushop.cgresult is None # nothing was pushed
1307 and pushop.cgresult is None # nothing was pushed
1305 and remotephases.get(b'publishing', False)
1308 and remotephases.get(b'publishing', False)
1306 ):
1309 ):
1307 # When:
1310 # When:
1308 # - this is a subrepo push
1311 # - this is a subrepo push
1309 # - and remote support phase
1312 # - and remote support phase
1310 # - and no changeset was pushed
1313 # - and no changeset was pushed
1311 # - and remote is publishing
1314 # - and remote is publishing
1312 # We may be in issue 3871 case!
1315 # We may be in issue 3871 case!
1313 # We drop the possible phase synchronisation done by
1316 # We drop the possible phase synchronisation done by
1314 # courtesy to publish changesets possibly locally draft
1317 # courtesy to publish changesets possibly locally draft
1315 # on the remote.
1318 # on the remote.
1316 remotephases = {b'publishing': b'True'}
1319 remotephases = {b'publishing': b'True'}
1317 if not remotephases: # old server or public only reply from non-publishing
1320 if not remotephases: # old server or public only reply from non-publishing
1318 _localphasemove(pushop, cheads)
1321 _localphasemove(pushop, cheads)
1319 # don't push any phase data as there is nothing to push
1322 # don't push any phase data as there is nothing to push
1320 else:
1323 else:
1321 unfi = pushop.repo.unfiltered()
1324 unfi = pushop.repo.unfiltered()
1322 to_rev = unfi.changelog.index.rev
1325 to_rev = unfi.changelog.index.rev
1323 to_node = unfi.changelog.node
1326 to_node = unfi.changelog.node
1324 cheads_revs = [to_rev(n) for n in cheads]
1327 cheads_revs = [to_rev(n) for n in cheads]
1325 pheads_revs, _dr = phases.analyze_remote_phases(
1328 pheads_revs, _dr = phases.analyze_remote_phases(
1326 pushop.repo,
1329 pushop.repo,
1327 cheads_revs,
1330 cheads_revs,
1328 remotephases,
1331 remotephases,
1329 )
1332 )
1330 pheads = [to_node(r) for r in pheads_revs]
1333 pheads = [to_node(r) for r in pheads_revs]
1331 ### Apply remote phase on local
1334 ### Apply remote phase on local
1332 if remotephases.get(b'publishing', False):
1335 if remotephases.get(b'publishing', False):
1333 _localphasemove(pushop, cheads)
1336 _localphasemove(pushop, cheads)
1334 else: # publish = False
1337 else: # publish = False
1335 _localphasemove(pushop, pheads)
1338 _localphasemove(pushop, pheads)
1336 _localphasemove(pushop, cheads, phases.draft)
1339 _localphasemove(pushop, cheads, phases.draft)
1337 ### Apply local phase on remote
1340 ### Apply local phase on remote
1338
1341
1339 if pushop.cgresult:
1342 if pushop.cgresult:
1340 if b'phases' in pushop.stepsdone:
1343 if b'phases' in pushop.stepsdone:
1341 # phases already pushed though bundle2
1344 # phases already pushed though bundle2
1342 return
1345 return
1343 outdated = pushop.outdatedphases
1346 outdated = pushop.outdatedphases
1344 else:
1347 else:
1345 outdated = pushop.fallbackoutdatedphases
1348 outdated = pushop.fallbackoutdatedphases
1346
1349
1347 pushop.stepsdone.add(b'phases')
1350 pushop.stepsdone.add(b'phases')
1348
1351
1349 # filter heads already turned public by the push
1352 # filter heads already turned public by the push
1350 outdated = [c for c in outdated if c.node() not in pheads]
1353 outdated = [c for c in outdated if c.node() not in pheads]
1351 # fallback to independent pushkey command
1354 # fallback to independent pushkey command
1352 for newremotehead in outdated:
1355 for newremotehead in outdated:
1353 with pushop.remote.commandexecutor() as e:
1356 with pushop.remote.commandexecutor() as e:
1354 r = e.callcommand(
1357 r = e.callcommand(
1355 b'pushkey',
1358 b'pushkey',
1356 {
1359 {
1357 b'namespace': b'phases',
1360 b'namespace': b'phases',
1358 b'key': newremotehead.hex(),
1361 b'key': newremotehead.hex(),
1359 b'old': b'%d' % phases.draft,
1362 b'old': b'%d' % phases.draft,
1360 b'new': b'%d' % phases.public,
1363 b'new': b'%d' % phases.public,
1361 },
1364 },
1362 ).result()
1365 ).result()
1363
1366
1364 if not r:
1367 if not r:
1365 pushop.ui.warn(
1368 pushop.ui.warn(
1366 _(b'updating %s to public failed!\n') % newremotehead
1369 _(b'updating %s to public failed!\n') % newremotehead
1367 )
1370 )
1368
1371
1369
1372
1370 def _localphasemove(pushop, nodes, phase=phases.public):
1373 def _localphasemove(pushop, nodes, phase=phases.public):
1371 """move <nodes> to <phase> in the local source repo"""
1374 """move <nodes> to <phase> in the local source repo"""
1372 if pushop.trmanager:
1375 if pushop.trmanager:
1373 phases.advanceboundary(
1376 phases.advanceboundary(
1374 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1377 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1375 )
1378 )
1376 else:
1379 else:
1377 # repo is not locked, do not change any phases!
1380 # repo is not locked, do not change any phases!
1378 # Informs the user that phases should have been moved when
1381 # Informs the user that phases should have been moved when
1379 # applicable.
1382 # applicable.
1380 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1383 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1381 phasestr = phases.phasenames[phase]
1384 phasestr = phases.phasenames[phase]
1382 if actualmoves:
1385 if actualmoves:
1383 pushop.ui.status(
1386 pushop.ui.status(
1384 _(
1387 _(
1385 b'cannot lock source repo, skipping '
1388 b'cannot lock source repo, skipping '
1386 b'local %s phase update\n'
1389 b'local %s phase update\n'
1387 )
1390 )
1388 % phasestr
1391 % phasestr
1389 )
1392 )
1390
1393
1391
1394
1392 def _pushobsolete(pushop):
1395 def _pushobsolete(pushop):
1393 """utility function to push obsolete markers to a remote"""
1396 """utility function to push obsolete markers to a remote"""
1394 if b'obsmarkers' in pushop.stepsdone:
1397 if b'obsmarkers' in pushop.stepsdone:
1395 return
1398 return
1396 repo = pushop.repo
1399 repo = pushop.repo
1397 remote = pushop.remote
1400 remote = pushop.remote
1398 pushop.stepsdone.add(b'obsmarkers')
1401 pushop.stepsdone.add(b'obsmarkers')
1399 if pushop.outobsmarkers:
1402 if pushop.outobsmarkers:
1400 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1403 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1401 rslts = []
1404 rslts = []
1402 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1405 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1403 remotedata = obsolete._pushkeyescape(markers)
1406 remotedata = obsolete._pushkeyescape(markers)
1404 for key in sorted(remotedata, reverse=True):
1407 for key in sorted(remotedata, reverse=True):
1405 # reverse sort to ensure we end with dump0
1408 # reverse sort to ensure we end with dump0
1406 data = remotedata[key]
1409 data = remotedata[key]
1407 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1410 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1408 if [r for r in rslts if not r]:
1411 if [r for r in rslts if not r]:
1409 msg = _(b'failed to push some obsolete markers!\n')
1412 msg = _(b'failed to push some obsolete markers!\n')
1410 repo.ui.warn(msg)
1413 repo.ui.warn(msg)
1411
1414
1412
1415
1413 def _pushbookmark(pushop):
1416 def _pushbookmark(pushop):
1414 """Update bookmark position on remote"""
1417 """Update bookmark position on remote"""
1415 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1418 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1416 return
1419 return
1417 pushop.stepsdone.add(b'bookmarks')
1420 pushop.stepsdone.add(b'bookmarks')
1418 ui = pushop.ui
1421 ui = pushop.ui
1419 remote = pushop.remote
1422 remote = pushop.remote
1420
1423
1421 for b, old, new in pushop.outbookmarks:
1424 for b, old, new in pushop.outbookmarks:
1422 action = b'update'
1425 action = b'update'
1423 if not old:
1426 if not old:
1424 action = b'export'
1427 action = b'export'
1425 elif not new:
1428 elif not new:
1426 action = b'delete'
1429 action = b'delete'
1427
1430
1428 with remote.commandexecutor() as e:
1431 with remote.commandexecutor() as e:
1429 r = e.callcommand(
1432 r = e.callcommand(
1430 b'pushkey',
1433 b'pushkey',
1431 {
1434 {
1432 b'namespace': b'bookmarks',
1435 b'namespace': b'bookmarks',
1433 b'key': b,
1436 b'key': b,
1434 b'old': hex(old),
1437 b'old': hex(old),
1435 b'new': hex(new),
1438 b'new': hex(new),
1436 },
1439 },
1437 ).result()
1440 ).result()
1438
1441
1439 if r:
1442 if r:
1440 ui.status(bookmsgmap[action][0] % b)
1443 ui.status(bookmsgmap[action][0] % b)
1441 else:
1444 else:
1442 ui.warn(bookmsgmap[action][1] % b)
1445 ui.warn(bookmsgmap[action][1] % b)
1443 # discovery can have set the value form invalid entry
1446 # discovery can have set the value form invalid entry
1444 if pushop.bkresult is not None:
1447 if pushop.bkresult is not None:
1445 pushop.bkresult = 1
1448 pushop.bkresult = 1
1446
1449
1447
1450
1448 class pulloperation:
1451 class pulloperation:
1449 """A object that represent a single pull operation
1452 """A object that represent a single pull operation
1450
1453
1451 It purpose is to carry pull related state and very common operation.
1454 It purpose is to carry pull related state and very common operation.
1452
1455
1453 A new should be created at the beginning of each pull and discarded
1456 A new should be created at the beginning of each pull and discarded
1454 afterward.
1457 afterward.
1455 """
1458 """
1456
1459
1457 def __init__(
1460 def __init__(
1458 self,
1461 self,
1459 repo,
1462 repo,
1460 remote,
1463 remote,
1461 heads=None,
1464 heads=None,
1462 force=False,
1465 force=False,
1463 bookmarks=(),
1466 bookmarks=(),
1464 remotebookmarks=None,
1467 remotebookmarks=None,
1465 streamclonerequested=None,
1468 streamclonerequested=None,
1466 includepats=None,
1469 includepats=None,
1467 excludepats=None,
1470 excludepats=None,
1468 depth=None,
1471 depth=None,
1469 path=None,
1472 path=None,
1470 ):
1473 ):
1471 # repo we pull into
1474 # repo we pull into
1472 self.repo = repo
1475 self.repo = repo
1473 # repo we pull from
1476 # repo we pull from
1474 self.remote = remote
1477 self.remote = remote
1475 # path object used to build this remote
1478 # path object used to build this remote
1476 #
1479 #
1477 # Ideally, the remote peer would carry that directly.
1480 # Ideally, the remote peer would carry that directly.
1478 self.remote_path = path
1481 self.remote_path = path
1479 # revision we try to pull (None is "all")
1482 # revision we try to pull (None is "all")
1480 self.heads = heads
1483 self.heads = heads
1481 # bookmark pulled explicitly
1484 # bookmark pulled explicitly
1482 self.explicitbookmarks = [
1485 self.explicitbookmarks = [
1483 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1486 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1484 ]
1487 ]
1485 # do we force pull?
1488 # do we force pull?
1486 self.force = force
1489 self.force = force
1487 # whether a streaming clone was requested
1490 # whether a streaming clone was requested
1488 self.streamclonerequested = streamclonerequested
1491 self.streamclonerequested = streamclonerequested
1489 # transaction manager
1492 # transaction manager
1490 self.trmanager = None
1493 self.trmanager = None
1491 # set of common changeset between local and remote before pull
1494 # set of common changeset between local and remote before pull
1492 self.common = None
1495 self.common = None
1493 # set of pulled head
1496 # set of pulled head
1494 self.rheads = None
1497 self.rheads = None
1495 # list of missing changeset to fetch remotely
1498 # list of missing changeset to fetch remotely
1496 self.fetch = None
1499 self.fetch = None
1497 # remote bookmarks data
1500 # remote bookmarks data
1498 self.remotebookmarks = remotebookmarks
1501 self.remotebookmarks = remotebookmarks
1499 # result of changegroup pulling (used as return code by pull)
1502 # result of changegroup pulling (used as return code by pull)
1500 self.cgresult = None
1503 self.cgresult = None
1501 # list of step already done
1504 # list of step already done
1502 self.stepsdone = set()
1505 self.stepsdone = set()
1503 # Whether we attempted a clone from pre-generated bundles.
1506 # Whether we attempted a clone from pre-generated bundles.
1504 self.clonebundleattempted = False
1507 self.clonebundleattempted = False
1505 # Set of file patterns to include.
1508 # Set of file patterns to include.
1506 self.includepats = includepats
1509 self.includepats = includepats
1507 # Set of file patterns to exclude.
1510 # Set of file patterns to exclude.
1508 self.excludepats = excludepats
1511 self.excludepats = excludepats
1509 # Number of ancestor changesets to pull from each pulled head.
1512 # Number of ancestor changesets to pull from each pulled head.
1510 self.depth = depth
1513 self.depth = depth
1511
1514
1512 @util.propertycache
1515 @util.propertycache
1513 def pulledsubset(self):
1516 def pulledsubset(self):
1514 """heads of the set of changeset target by the pull"""
1517 """heads of the set of changeset target by the pull"""
1515 # compute target subset
1518 # compute target subset
1516 if self.heads is None:
1519 if self.heads is None:
1517 # We pulled every thing possible
1520 # We pulled every thing possible
1518 # sync on everything common
1521 # sync on everything common
1519 c = set(self.common)
1522 c = set(self.common)
1520 ret = list(self.common)
1523 ret = list(self.common)
1521 for n in self.rheads:
1524 for n in self.rheads:
1522 if n not in c:
1525 if n not in c:
1523 ret.append(n)
1526 ret.append(n)
1524 return ret
1527 return ret
1525 else:
1528 else:
1526 # We pulled a specific subset
1529 # We pulled a specific subset
1527 # sync on this subset
1530 # sync on this subset
1528 return self.heads
1531 return self.heads
1529
1532
1530 @util.propertycache
1533 @util.propertycache
1531 def canusebundle2(self):
1534 def canusebundle2(self):
1532 return not _forcebundle1(self)
1535 return not _forcebundle1(self)
1533
1536
1534 @util.propertycache
1537 @util.propertycache
1535 def remotebundle2caps(self):
1538 def remotebundle2caps(self):
1536 return bundle2.bundle2caps(self.remote)
1539 return bundle2.bundle2caps(self.remote)
1537
1540
1538 def gettransaction(self):
1541 def gettransaction(self):
1539 # deprecated; talk to trmanager directly
1542 # deprecated; talk to trmanager directly
1540 return self.trmanager.transaction()
1543 return self.trmanager.transaction()
1541
1544
1542
1545
1543 class transactionmanager(util.transactional):
1546 class transactionmanager(util.transactional):
1544 """An object to manage the life cycle of a transaction
1547 """An object to manage the life cycle of a transaction
1545
1548
1546 It creates the transaction on demand and calls the appropriate hooks when
1549 It creates the transaction on demand and calls the appropriate hooks when
1547 closing the transaction."""
1550 closing the transaction."""
1548
1551
1549 def __init__(self, repo, source, url):
1552 def __init__(self, repo, source, url):
1550 self.repo = repo
1553 self.repo = repo
1551 self.source = source
1554 self.source = source
1552 self.url = url
1555 self.url = url
1553 self._tr = None
1556 self._tr = None
1554
1557
1555 def transaction(self):
1558 def transaction(self):
1556 """Return an open transaction object, constructing if necessary"""
1559 """Return an open transaction object, constructing if necessary"""
1557 if not self._tr:
1560 if not self._tr:
1558 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1561 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1559 self._tr = self.repo.transaction(trname)
1562 self._tr = self.repo.transaction(trname)
1560 self._tr.hookargs[b'source'] = self.source
1563 self._tr.hookargs[b'source'] = self.source
1561 self._tr.hookargs[b'url'] = self.url
1564 self._tr.hookargs[b'url'] = self.url
1562 return self._tr
1565 return self._tr
1563
1566
1564 def close(self):
1567 def close(self):
1565 """close transaction if created"""
1568 """close transaction if created"""
1566 if self._tr is not None:
1569 if self._tr is not None:
1567 self._tr.close()
1570 self._tr.close()
1568
1571
1569 def release(self):
1572 def release(self):
1570 """release transaction if created"""
1573 """release transaction if created"""
1571 if self._tr is not None:
1574 if self._tr is not None:
1572 self._tr.release()
1575 self._tr.release()
1573
1576
1574
1577
1575 def listkeys(remote, namespace):
1578 def listkeys(remote, namespace):
1576 with remote.commandexecutor() as e:
1579 with remote.commandexecutor() as e:
1577 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1580 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1578
1581
1579
1582
1580 def _fullpullbundle2(repo, pullop):
1583 def _fullpullbundle2(repo, pullop):
1581 # The server may send a partial reply, i.e. when inlining
1584 # The server may send a partial reply, i.e. when inlining
1582 # pre-computed bundles. In that case, update the common
1585 # pre-computed bundles. In that case, update the common
1583 # set based on the results and pull another bundle.
1586 # set based on the results and pull another bundle.
1584 #
1587 #
1585 # There are two indicators that the process is finished:
1588 # There are two indicators that the process is finished:
1586 # - no changeset has been added, or
1589 # - no changeset has been added, or
1587 # - all remote heads are known locally.
1590 # - all remote heads are known locally.
1588 # The head check must use the unfiltered view as obsoletion
1591 # The head check must use the unfiltered view as obsoletion
1589 # markers can hide heads.
1592 # markers can hide heads.
1590 unfi = repo.unfiltered()
1593 unfi = repo.unfiltered()
1591 unficl = unfi.changelog
1594 unficl = unfi.changelog
1592
1595
1593 def headsofdiff(h1, h2):
1596 def headsofdiff(h1, h2):
1594 """Returns heads(h1 % h2)"""
1597 """Returns heads(h1 % h2)"""
1595 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1598 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1596 return {ctx.node() for ctx in res}
1599 return {ctx.node() for ctx in res}
1597
1600
1598 def headsofunion(h1, h2):
1601 def headsofunion(h1, h2):
1599 """Returns heads((h1 + h2) - null)"""
1602 """Returns heads((h1 + h2) - null)"""
1600 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1603 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1601 return {ctx.node() for ctx in res}
1604 return {ctx.node() for ctx in res}
1602
1605
1603 while True:
1606 while True:
1604 old_heads = unficl.heads()
1607 old_heads = unficl.heads()
1605 clstart = len(unficl)
1608 clstart = len(unficl)
1606 _pullbundle2(pullop)
1609 _pullbundle2(pullop)
1607 if requirements.NARROW_REQUIREMENT in repo.requirements:
1610 if requirements.NARROW_REQUIREMENT in repo.requirements:
1608 # XXX narrow clones filter the heads on the server side during
1611 # XXX narrow clones filter the heads on the server side during
1609 # XXX getbundle and result in partial replies as well.
1612 # XXX getbundle and result in partial replies as well.
1610 # XXX Disable pull bundles in this case as band aid to avoid
1613 # XXX Disable pull bundles in this case as band aid to avoid
1611 # XXX extra round trips.
1614 # XXX extra round trips.
1612 break
1615 break
1613 if clstart == len(unficl):
1616 if clstart == len(unficl):
1614 break
1617 break
1615 if all(unficl.hasnode(n) for n in pullop.rheads):
1618 if all(unficl.hasnode(n) for n in pullop.rheads):
1616 break
1619 break
1617 new_heads = headsofdiff(unficl.heads(), old_heads)
1620 new_heads = headsofdiff(unficl.heads(), old_heads)
1618 pullop.common = headsofunion(new_heads, pullop.common)
1621 pullop.common = headsofunion(new_heads, pullop.common)
1619 pullop.rheads = set(pullop.rheads) - pullop.common
1622 pullop.rheads = set(pullop.rheads) - pullop.common
1620
1623
1621
1624
1622 def add_confirm_callback(repo, pullop):
1625 def add_confirm_callback(repo, pullop):
1623 """adds a finalize callback to transaction which can be used to show stats
1626 """adds a finalize callback to transaction which can be used to show stats
1624 to user and confirm the pull before committing transaction"""
1627 to user and confirm the pull before committing transaction"""
1625
1628
1626 tr = pullop.trmanager.transaction()
1629 tr = pullop.trmanager.transaction()
1627 scmutil.registersummarycallback(
1630 scmutil.registersummarycallback(
1628 repo, tr, txnname=b'pull', as_validator=True
1631 repo, tr, txnname=b'pull', as_validator=True
1629 )
1632 )
1630 reporef = weakref.ref(repo.unfiltered())
1633 reporef = weakref.ref(repo.unfiltered())
1631
1634
1632 def prompt(tr):
1635 def prompt(tr):
1633 repo = reporef()
1636 repo = reporef()
1634 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1637 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1635 if repo.ui.promptchoice(cm):
1638 if repo.ui.promptchoice(cm):
1636 raise error.Abort(b"user aborted")
1639 raise error.Abort(b"user aborted")
1637
1640
1638 tr.addvalidator(b'900-pull-prompt', prompt)
1641 tr.addvalidator(b'900-pull-prompt', prompt)
1639
1642
1640
1643
1641 def pull(
1644 def pull(
1642 repo,
1645 repo,
1643 remote,
1646 remote,
1644 path=None,
1647 path=None,
1645 heads=None,
1648 heads=None,
1646 force=False,
1649 force=False,
1647 bookmarks=(),
1650 bookmarks=(),
1648 opargs=None,
1651 opargs=None,
1649 streamclonerequested=None,
1652 streamclonerequested=None,
1650 includepats=None,
1653 includepats=None,
1651 excludepats=None,
1654 excludepats=None,
1652 depth=None,
1655 depth=None,
1653 confirm=None,
1656 confirm=None,
1654 ):
1657 ):
1655 """Fetch repository data from a remote.
1658 """Fetch repository data from a remote.
1656
1659
1657 This is the main function used to retrieve data from a remote repository.
1660 This is the main function used to retrieve data from a remote repository.
1658
1661
1659 ``repo`` is the local repository to clone into.
1662 ``repo`` is the local repository to clone into.
1660 ``remote`` is a peer instance.
1663 ``remote`` is a peer instance.
1661 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1664 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1662 default) means to pull everything from the remote.
1665 default) means to pull everything from the remote.
1663 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1666 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1664 default, all remote bookmarks are pulled.
1667 default, all remote bookmarks are pulled.
1665 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1668 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1666 initialization.
1669 initialization.
1667 ``streamclonerequested`` is a boolean indicating whether a "streaming
1670 ``streamclonerequested`` is a boolean indicating whether a "streaming
1668 clone" is requested. A "streaming clone" is essentially a raw file copy
1671 clone" is requested. A "streaming clone" is essentially a raw file copy
1669 of revlogs from the server. This only works when the local repository is
1672 of revlogs from the server. This only works when the local repository is
1670 empty. The default value of ``None`` means to respect the server
1673 empty. The default value of ``None`` means to respect the server
1671 configuration for preferring stream clones.
1674 configuration for preferring stream clones.
1672 ``includepats`` and ``excludepats`` define explicit file patterns to
1675 ``includepats`` and ``excludepats`` define explicit file patterns to
1673 include and exclude in storage, respectively. If not defined, narrow
1676 include and exclude in storage, respectively. If not defined, narrow
1674 patterns from the repo instance are used, if available.
1677 patterns from the repo instance are used, if available.
1675 ``depth`` is an integer indicating the DAG depth of history we're
1678 ``depth`` is an integer indicating the DAG depth of history we're
1676 interested in. If defined, for each revision specified in ``heads``, we
1679 interested in. If defined, for each revision specified in ``heads``, we
1677 will fetch up to this many of its ancestors and data associated with them.
1680 will fetch up to this many of its ancestors and data associated with them.
1678 ``confirm`` is a boolean indicating whether the pull should be confirmed
1681 ``confirm`` is a boolean indicating whether the pull should be confirmed
1679 before committing the transaction. This overrides HGPLAIN.
1682 before committing the transaction. This overrides HGPLAIN.
1680
1683
1681 Returns the ``pulloperation`` created for this pull.
1684 Returns the ``pulloperation`` created for this pull.
1682 """
1685 """
1683 if opargs is None:
1686 if opargs is None:
1684 opargs = {}
1687 opargs = {}
1685
1688
1686 # We allow the narrow patterns to be passed in explicitly to provide more
1689 # We allow the narrow patterns to be passed in explicitly to provide more
1687 # flexibility for API consumers.
1690 # flexibility for API consumers.
1688 if includepats is not None or excludepats is not None:
1691 if includepats is not None or excludepats is not None:
1689 includepats = includepats or set()
1692 includepats = includepats or set()
1690 excludepats = excludepats or set()
1693 excludepats = excludepats or set()
1691 else:
1694 else:
1692 includepats, excludepats = repo.narrowpats
1695 includepats, excludepats = repo.narrowpats
1693
1696
1694 narrowspec.validatepatterns(includepats)
1697 narrowspec.validatepatterns(includepats)
1695 narrowspec.validatepatterns(excludepats)
1698 narrowspec.validatepatterns(excludepats)
1696
1699
1697 pullop = pulloperation(
1700 pullop = pulloperation(
1698 repo,
1701 repo,
1699 remote,
1702 remote,
1700 path=path,
1703 path=path,
1701 heads=heads,
1704 heads=heads,
1702 force=force,
1705 force=force,
1703 bookmarks=bookmarks,
1706 bookmarks=bookmarks,
1704 streamclonerequested=streamclonerequested,
1707 streamclonerequested=streamclonerequested,
1705 includepats=includepats,
1708 includepats=includepats,
1706 excludepats=excludepats,
1709 excludepats=excludepats,
1707 depth=depth,
1710 depth=depth,
1708 **pycompat.strkwargs(opargs)
1711 **pycompat.strkwargs(opargs)
1709 )
1712 )
1710
1713
1711 peerlocal = pullop.remote.local()
1714 peerlocal = pullop.remote.local()
1712 if peerlocal:
1715 if peerlocal:
1713 missing = set(peerlocal.requirements) - pullop.repo.supported
1716 missing = set(peerlocal.requirements) - pullop.repo.supported
1714 if missing:
1717 if missing:
1715 msg = _(
1718 msg = _(
1716 b"required features are not"
1719 b"required features are not"
1717 b" supported in the destination:"
1720 b" supported in the destination:"
1718 b" %s"
1721 b" %s"
1719 ) % (b', '.join(sorted(missing)))
1722 ) % (b', '.join(sorted(missing)))
1720 raise error.Abort(msg)
1723 raise error.Abort(msg)
1721
1724
1722 for category in repo._wanted_sidedata:
1725 for category in repo._wanted_sidedata:
1723 # Check that a computer is registered for that category for at least
1726 # Check that a computer is registered for that category for at least
1724 # one revlog kind.
1727 # one revlog kind.
1725 for kind, computers in repo._sidedata_computers.items():
1728 for kind, computers in repo._sidedata_computers.items():
1726 if computers.get(category):
1729 if computers.get(category):
1727 break
1730 break
1728 else:
1731 else:
1729 # This should never happen since repos are supposed to be able to
1732 # This should never happen since repos are supposed to be able to
1730 # generate the sidedata they require.
1733 # generate the sidedata they require.
1731 raise error.ProgrammingError(
1734 raise error.ProgrammingError(
1732 _(
1735 _(
1733 b'sidedata category requested by local side without local'
1736 b'sidedata category requested by local side without local'
1734 b"support: '%s'"
1737 b"support: '%s'"
1735 )
1738 )
1736 % pycompat.bytestr(category)
1739 % pycompat.bytestr(category)
1737 )
1740 )
1738
1741
1739 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1742 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1740 wlock = util.nullcontextmanager()
1743 wlock = util.nullcontextmanager()
1741 if not bookmod.bookmarksinstore(repo):
1744 if not bookmod.bookmarksinstore(repo):
1742 wlock = repo.wlock()
1745 wlock = repo.wlock()
1743 with wlock, repo.lock(), pullop.trmanager:
1746 with wlock, repo.lock(), pullop.trmanager:
1744 if confirm or (
1747 if confirm or (
1745 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1748 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1746 ):
1749 ):
1747 add_confirm_callback(repo, pullop)
1750 add_confirm_callback(repo, pullop)
1748
1751
1749 # This should ideally be in _pullbundle2(). However, it needs to run
1752 # This should ideally be in _pullbundle2(). However, it needs to run
1750 # before discovery to avoid extra work.
1753 # before discovery to avoid extra work.
1751 _maybeapplyclonebundle(pullop)
1754 _maybeapplyclonebundle(pullop)
1752 streamclone.maybeperformlegacystreamclone(pullop)
1755 streamclone.maybeperformlegacystreamclone(pullop)
1753 _pulldiscovery(pullop)
1756 _pulldiscovery(pullop)
1754 if pullop.canusebundle2:
1757 if pullop.canusebundle2:
1755 _fullpullbundle2(repo, pullop)
1758 _fullpullbundle2(repo, pullop)
1756 _pullchangeset(pullop)
1759 _pullchangeset(pullop)
1757 _pullphase(pullop)
1760 _pullphase(pullop)
1758 _pullbookmarks(pullop)
1761 _pullbookmarks(pullop)
1759 _pullobsolete(pullop)
1762 _pullobsolete(pullop)
1760
1763
1761 # storing remotenames
1764 # storing remotenames
1762 if repo.ui.configbool(b'experimental', b'remotenames'):
1765 if repo.ui.configbool(b'experimental', b'remotenames'):
1763 logexchange.pullremotenames(repo, remote)
1766 logexchange.pullremotenames(repo, remote)
1764
1767
1765 return pullop
1768 return pullop
1766
1769
1767
1770
1768 # list of steps to perform discovery before pull
1771 # list of steps to perform discovery before pull
1769 pulldiscoveryorder = []
1772 pulldiscoveryorder = []
1770
1773
1771 # Mapping between step name and function
1774 # Mapping between step name and function
1772 #
1775 #
1773 # This exists to help extensions wrap steps if necessary
1776 # This exists to help extensions wrap steps if necessary
1774 pulldiscoverymapping = {}
1777 pulldiscoverymapping = {}
1775
1778
1776
1779
1777 def pulldiscovery(stepname):
1780 def pulldiscovery(stepname):
1778 """decorator for function performing discovery before pull
1781 """decorator for function performing discovery before pull
1779
1782
1780 The function is added to the step -> function mapping and appended to the
1783 The function is added to the step -> function mapping and appended to the
1781 list of steps. Beware that decorated function will be added in order (this
1784 list of steps. Beware that decorated function will be added in order (this
1782 may matter).
1785 may matter).
1783
1786
1784 You can only use this decorator for a new step, if you want to wrap a step
1787 You can only use this decorator for a new step, if you want to wrap a step
1785 from an extension, change the pulldiscovery dictionary directly."""
1788 from an extension, change the pulldiscovery dictionary directly."""
1786
1789
1787 def dec(func):
1790 def dec(func):
1788 assert stepname not in pulldiscoverymapping
1791 assert stepname not in pulldiscoverymapping
1789 pulldiscoverymapping[stepname] = func
1792 pulldiscoverymapping[stepname] = func
1790 pulldiscoveryorder.append(stepname)
1793 pulldiscoveryorder.append(stepname)
1791 return func
1794 return func
1792
1795
1793 return dec
1796 return dec
1794
1797
1795
1798
1796 def _pulldiscovery(pullop):
1799 def _pulldiscovery(pullop):
1797 """Run all discovery steps"""
1800 """Run all discovery steps"""
1798 for stepname in pulldiscoveryorder:
1801 for stepname in pulldiscoveryorder:
1799 step = pulldiscoverymapping[stepname]
1802 step = pulldiscoverymapping[stepname]
1800 step(pullop)
1803 step(pullop)
1801
1804
1802
1805
1803 @pulldiscovery(b'b1:bookmarks')
1806 @pulldiscovery(b'b1:bookmarks')
1804 def _pullbookmarkbundle1(pullop):
1807 def _pullbookmarkbundle1(pullop):
1805 """fetch bookmark data in bundle1 case
1808 """fetch bookmark data in bundle1 case
1806
1809
1807 If not using bundle2, we have to fetch bookmarks before changeset
1810 If not using bundle2, we have to fetch bookmarks before changeset
1808 discovery to reduce the chance and impact of race conditions."""
1811 discovery to reduce the chance and impact of race conditions."""
1809 if pullop.remotebookmarks is not None:
1812 if pullop.remotebookmarks is not None:
1810 return
1813 return
1811 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1814 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1812 # all known bundle2 servers now support listkeys, but lets be nice with
1815 # all known bundle2 servers now support listkeys, but lets be nice with
1813 # new implementation.
1816 # new implementation.
1814 return
1817 return
1815 books = listkeys(pullop.remote, b'bookmarks')
1818 books = listkeys(pullop.remote, b'bookmarks')
1816 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1819 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1817
1820
1818
1821
1819 @pulldiscovery(b'changegroup')
1822 @pulldiscovery(b'changegroup')
1820 def _pulldiscoverychangegroup(pullop):
1823 def _pulldiscoverychangegroup(pullop):
1821 """discovery phase for the pull
1824 """discovery phase for the pull
1822
1825
1823 Current handle changeset discovery only, will change handle all discovery
1826 Current handle changeset discovery only, will change handle all discovery
1824 at some point."""
1827 at some point."""
1825 tmp = discovery.findcommonincoming(
1828 tmp = discovery.findcommonincoming(
1826 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1829 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1827 )
1830 )
1828 common, fetch, rheads = tmp
1831 common, fetch, rheads = tmp
1829 has_node = pullop.repo.unfiltered().changelog.index.has_node
1832 has_node = pullop.repo.unfiltered().changelog.index.has_node
1830 if fetch and rheads:
1833 if fetch and rheads:
1831 # If a remote heads is filtered locally, put in back in common.
1834 # If a remote heads is filtered locally, put in back in common.
1832 #
1835 #
1833 # This is a hackish solution to catch most of "common but locally
1836 # This is a hackish solution to catch most of "common but locally
1834 # hidden situation". We do not performs discovery on unfiltered
1837 # hidden situation". We do not performs discovery on unfiltered
1835 # repository because it end up doing a pathological amount of round
1838 # repository because it end up doing a pathological amount of round
1836 # trip for w huge amount of changeset we do not care about.
1839 # trip for w huge amount of changeset we do not care about.
1837 #
1840 #
1838 # If a set of such "common but filtered" changeset exist on the server
1841 # If a set of such "common but filtered" changeset exist on the server
1839 # but are not including a remote heads, we'll not be able to detect it,
1842 # but are not including a remote heads, we'll not be able to detect it,
1840 scommon = set(common)
1843 scommon = set(common)
1841 for n in rheads:
1844 for n in rheads:
1842 if has_node(n):
1845 if has_node(n):
1843 if n not in scommon:
1846 if n not in scommon:
1844 common.append(n)
1847 common.append(n)
1845 if set(rheads).issubset(set(common)):
1848 if set(rheads).issubset(set(common)):
1846 fetch = []
1849 fetch = []
1847 pullop.common = common
1850 pullop.common = common
1848 pullop.fetch = fetch
1851 pullop.fetch = fetch
1849 pullop.rheads = rheads
1852 pullop.rheads = rheads
1850
1853
1851
1854
1852 def _pullbundle2(pullop):
1855 def _pullbundle2(pullop):
1853 """pull data using bundle2
1856 """pull data using bundle2
1854
1857
1855 For now, the only supported data are changegroup."""
1858 For now, the only supported data are changegroup."""
1856 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1859 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1857
1860
1858 # make ui easier to access
1861 # make ui easier to access
1859 ui = pullop.repo.ui
1862 ui = pullop.repo.ui
1860
1863
1861 # At the moment we don't do stream clones over bundle2. If that is
1864 # At the moment we don't do stream clones over bundle2. If that is
1862 # implemented then here's where the check for that will go.
1865 # implemented then here's where the check for that will go.
1863 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1866 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1864
1867
1865 # declare pull perimeters
1868 # declare pull perimeters
1866 kwargs[b'common'] = pullop.common
1869 kwargs[b'common'] = pullop.common
1867 kwargs[b'heads'] = pullop.heads or pullop.rheads
1870 kwargs[b'heads'] = pullop.heads or pullop.rheads
1868
1871
1869 # check server supports narrow and then adding includepats and excludepats
1872 # check server supports narrow and then adding includepats and excludepats
1870 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1873 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1871 if servernarrow and pullop.includepats:
1874 if servernarrow and pullop.includepats:
1872 kwargs[b'includepats'] = pullop.includepats
1875 kwargs[b'includepats'] = pullop.includepats
1873 if servernarrow and pullop.excludepats:
1876 if servernarrow and pullop.excludepats:
1874 kwargs[b'excludepats'] = pullop.excludepats
1877 kwargs[b'excludepats'] = pullop.excludepats
1875
1878
1876 if streaming:
1879 if streaming:
1877 kwargs[b'cg'] = False
1880 kwargs[b'cg'] = False
1878 kwargs[b'stream'] = True
1881 kwargs[b'stream'] = True
1879 pullop.stepsdone.add(b'changegroup')
1882 pullop.stepsdone.add(b'changegroup')
1880 pullop.stepsdone.add(b'phases')
1883 pullop.stepsdone.add(b'phases')
1881
1884
1882 else:
1885 else:
1883 # pulling changegroup
1886 # pulling changegroup
1884 pullop.stepsdone.add(b'changegroup')
1887 pullop.stepsdone.add(b'changegroup')
1885
1888
1886 kwargs[b'cg'] = pullop.fetch
1889 kwargs[b'cg'] = pullop.fetch
1887
1890
1888 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1891 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1889 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1892 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1890 if not legacyphase and hasbinaryphase:
1893 if not legacyphase and hasbinaryphase:
1891 kwargs[b'phases'] = True
1894 kwargs[b'phases'] = True
1892 pullop.stepsdone.add(b'phases')
1895 pullop.stepsdone.add(b'phases')
1893
1896
1894 if b'listkeys' in pullop.remotebundle2caps:
1897 if b'listkeys' in pullop.remotebundle2caps:
1895 if b'phases' not in pullop.stepsdone:
1898 if b'phases' not in pullop.stepsdone:
1896 kwargs[b'listkeys'] = [b'phases']
1899 kwargs[b'listkeys'] = [b'phases']
1897
1900
1898 bookmarksrequested = False
1901 bookmarksrequested = False
1899 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1902 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1900 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1903 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1901
1904
1902 if pullop.remotebookmarks is not None:
1905 if pullop.remotebookmarks is not None:
1903 pullop.stepsdone.add(b'request-bookmarks')
1906 pullop.stepsdone.add(b'request-bookmarks')
1904
1907
1905 if (
1908 if (
1906 b'request-bookmarks' not in pullop.stepsdone
1909 b'request-bookmarks' not in pullop.stepsdone
1907 and pullop.remotebookmarks is None
1910 and pullop.remotebookmarks is None
1908 and not legacybookmark
1911 and not legacybookmark
1909 and hasbinarybook
1912 and hasbinarybook
1910 ):
1913 ):
1911 kwargs[b'bookmarks'] = True
1914 kwargs[b'bookmarks'] = True
1912 bookmarksrequested = True
1915 bookmarksrequested = True
1913
1916
1914 if b'listkeys' in pullop.remotebundle2caps:
1917 if b'listkeys' in pullop.remotebundle2caps:
1915 if b'request-bookmarks' not in pullop.stepsdone:
1918 if b'request-bookmarks' not in pullop.stepsdone:
1916 # make sure to always includes bookmark data when migrating
1919 # make sure to always includes bookmark data when migrating
1917 # `hg incoming --bundle` to using this function.
1920 # `hg incoming --bundle` to using this function.
1918 pullop.stepsdone.add(b'request-bookmarks')
1921 pullop.stepsdone.add(b'request-bookmarks')
1919 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1922 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1920
1923
1921 # If this is a full pull / clone and the server supports the clone bundles
1924 # If this is a full pull / clone and the server supports the clone bundles
1922 # feature, tell the server whether we attempted a clone bundle. The
1925 # feature, tell the server whether we attempted a clone bundle. The
1923 # presence of this flag indicates the client supports clone bundles. This
1926 # presence of this flag indicates the client supports clone bundles. This
1924 # will enable the server to treat clients that support clone bundles
1927 # will enable the server to treat clients that support clone bundles
1925 # differently from those that don't.
1928 # differently from those that don't.
1926 if (
1929 if (
1927 pullop.remote.capable(b'clonebundles')
1930 pullop.remote.capable(b'clonebundles')
1928 and pullop.heads is None
1931 and pullop.heads is None
1929 and list(pullop.common) == [pullop.repo.nullid]
1932 and list(pullop.common) == [pullop.repo.nullid]
1930 ):
1933 ):
1931 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1934 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1932
1935
1933 if streaming:
1936 if streaming:
1934 pullop.repo.ui.status(_(b'streaming all changes\n'))
1937 pullop.repo.ui.status(_(b'streaming all changes\n'))
1935 elif not pullop.fetch:
1938 elif not pullop.fetch:
1936 pullop.repo.ui.status(_(b"no changes found\n"))
1939 pullop.repo.ui.status(_(b"no changes found\n"))
1937 pullop.cgresult = 0
1940 pullop.cgresult = 0
1938 else:
1941 else:
1939 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1942 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1940 pullop.repo.ui.status(_(b"requesting all changes\n"))
1943 pullop.repo.ui.status(_(b"requesting all changes\n"))
1941 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1944 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1942 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1945 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1943 if obsolete.commonversion(remoteversions) is not None:
1946 if obsolete.commonversion(remoteversions) is not None:
1944 kwargs[b'obsmarkers'] = True
1947 kwargs[b'obsmarkers'] = True
1945 pullop.stepsdone.add(b'obsmarkers')
1948 pullop.stepsdone.add(b'obsmarkers')
1946 _pullbundle2extraprepare(pullop, kwargs)
1949 _pullbundle2extraprepare(pullop, kwargs)
1947
1950
1948 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1951 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1949 if remote_sidedata:
1952 if remote_sidedata:
1950 kwargs[b'remote_sidedata'] = remote_sidedata
1953 kwargs[b'remote_sidedata'] = remote_sidedata
1951
1954
1952 with pullop.remote.commandexecutor() as e:
1955 with pullop.remote.commandexecutor() as e:
1953 args = dict(kwargs)
1956 args = dict(kwargs)
1954 args[b'source'] = b'pull'
1957 args[b'source'] = b'pull'
1955 bundle = e.callcommand(b'getbundle', args).result()
1958 bundle = e.callcommand(b'getbundle', args).result()
1956
1959
1957 try:
1960 try:
1958 op = bundle2.bundleoperation(
1961 op = bundle2.bundleoperation(
1959 pullop.repo,
1962 pullop.repo,
1960 pullop.gettransaction,
1963 pullop.gettransaction,
1961 source=b'pull',
1964 source=b'pull',
1962 remote=pullop.remote,
1965 remote=pullop.remote,
1963 )
1966 )
1964 op.modes[b'bookmarks'] = b'records'
1967 op.modes[b'bookmarks'] = b'records'
1965 bundle2.processbundle(
1968 bundle2.processbundle(
1966 pullop.repo,
1969 pullop.repo,
1967 bundle,
1970 bundle,
1968 op=op,
1971 op=op,
1969 remote=pullop.remote,
1972 remote=pullop.remote,
1970 )
1973 )
1971 except bundle2.AbortFromPart as exc:
1974 except bundle2.AbortFromPart as exc:
1972 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1975 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1973 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1976 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1974 except error.BundleValueError as exc:
1977 except error.BundleValueError as exc:
1975 raise error.RemoteError(_(b'missing support for %s') % exc)
1978 raise error.RemoteError(_(b'missing support for %s') % exc)
1976
1979
1977 if pullop.fetch:
1980 if pullop.fetch:
1978 pullop.cgresult = bundle2.combinechangegroupresults(op)
1981 pullop.cgresult = bundle2.combinechangegroupresults(op)
1979
1982
1980 # processing phases change
1983 # processing phases change
1981 for namespace, value in op.records[b'listkeys']:
1984 for namespace, value in op.records[b'listkeys']:
1982 if namespace == b'phases':
1985 if namespace == b'phases':
1983 _pullapplyphases(pullop, value)
1986 _pullapplyphases(pullop, value)
1984
1987
1985 # processing bookmark update
1988 # processing bookmark update
1986 if bookmarksrequested:
1989 if bookmarksrequested:
1987 books = {}
1990 books = {}
1988 for record in op.records[b'bookmarks']:
1991 for record in op.records[b'bookmarks']:
1989 books[record[b'bookmark']] = record[b"node"]
1992 books[record[b'bookmark']] = record[b"node"]
1990 pullop.remotebookmarks = books
1993 pullop.remotebookmarks = books
1991 else:
1994 else:
1992 for namespace, value in op.records[b'listkeys']:
1995 for namespace, value in op.records[b'listkeys']:
1993 if namespace == b'bookmarks':
1996 if namespace == b'bookmarks':
1994 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1997 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1995
1998
1996 # bookmark data were either already there or pulled in the bundle
1999 # bookmark data were either already there or pulled in the bundle
1997 if pullop.remotebookmarks is not None:
2000 if pullop.remotebookmarks is not None:
1998 _pullbookmarks(pullop)
2001 _pullbookmarks(pullop)
1999
2002
2000
2003
2001 def _pullbundle2extraprepare(pullop, kwargs):
2004 def _pullbundle2extraprepare(pullop, kwargs):
2002 """hook function so that extensions can extend the getbundle call"""
2005 """hook function so that extensions can extend the getbundle call"""
2003
2006
2004
2007
2005 def _pullchangeset(pullop):
2008 def _pullchangeset(pullop):
2006 """pull changeset from unbundle into the local repo"""
2009 """pull changeset from unbundle into the local repo"""
2007 # We delay the open of the transaction as late as possible so we
2010 # We delay the open of the transaction as late as possible so we
2008 # don't open transaction for nothing or you break future useful
2011 # don't open transaction for nothing or you break future useful
2009 # rollback call
2012 # rollback call
2010 if b'changegroup' in pullop.stepsdone:
2013 if b'changegroup' in pullop.stepsdone:
2011 return
2014 return
2012 pullop.stepsdone.add(b'changegroup')
2015 pullop.stepsdone.add(b'changegroup')
2013 if not pullop.fetch:
2016 if not pullop.fetch:
2014 pullop.repo.ui.status(_(b"no changes found\n"))
2017 pullop.repo.ui.status(_(b"no changes found\n"))
2015 pullop.cgresult = 0
2018 pullop.cgresult = 0
2016 return
2019 return
2017 tr = pullop.gettransaction()
2020 tr = pullop.gettransaction()
2018 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
2021 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
2019 pullop.repo.ui.status(_(b"requesting all changes\n"))
2022 pullop.repo.ui.status(_(b"requesting all changes\n"))
2020 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2023 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2021 # issue1320, avoid a race if remote changed after discovery
2024 # issue1320, avoid a race if remote changed after discovery
2022 pullop.heads = pullop.rheads
2025 pullop.heads = pullop.rheads
2023
2026
2024 if pullop.remote.capable(b'getbundle'):
2027 if pullop.remote.capable(b'getbundle'):
2025 # TODO: get bundlecaps from remote
2028 # TODO: get bundlecaps from remote
2026 cg = pullop.remote.getbundle(
2029 cg = pullop.remote.getbundle(
2027 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2030 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2028 )
2031 )
2029 elif pullop.heads is None:
2032 elif pullop.heads is None:
2030 with pullop.remote.commandexecutor() as e:
2033 with pullop.remote.commandexecutor() as e:
2031 cg = e.callcommand(
2034 cg = e.callcommand(
2032 b'changegroup',
2035 b'changegroup',
2033 {
2036 {
2034 b'nodes': pullop.fetch,
2037 b'nodes': pullop.fetch,
2035 b'source': b'pull',
2038 b'source': b'pull',
2036 },
2039 },
2037 ).result()
2040 ).result()
2038
2041
2039 elif not pullop.remote.capable(b'changegroupsubset'):
2042 elif not pullop.remote.capable(b'changegroupsubset'):
2040 raise error.Abort(
2043 raise error.Abort(
2041 _(
2044 _(
2042 b"partial pull cannot be done because "
2045 b"partial pull cannot be done because "
2043 b"other repository doesn't support "
2046 b"other repository doesn't support "
2044 b"changegroupsubset."
2047 b"changegroupsubset."
2045 )
2048 )
2046 )
2049 )
2047 else:
2050 else:
2048 with pullop.remote.commandexecutor() as e:
2051 with pullop.remote.commandexecutor() as e:
2049 cg = e.callcommand(
2052 cg = e.callcommand(
2050 b'changegroupsubset',
2053 b'changegroupsubset',
2051 {
2054 {
2052 b'bases': pullop.fetch,
2055 b'bases': pullop.fetch,
2053 b'heads': pullop.heads,
2056 b'heads': pullop.heads,
2054 b'source': b'pull',
2057 b'source': b'pull',
2055 },
2058 },
2056 ).result()
2059 ).result()
2057
2060
2058 bundleop = bundle2.applybundle(
2061 bundleop = bundle2.applybundle(
2059 pullop.repo,
2062 pullop.repo,
2060 cg,
2063 cg,
2061 tr,
2064 tr,
2062 b'pull',
2065 b'pull',
2063 pullop.remote.url(),
2066 pullop.remote.url(),
2064 remote=pullop.remote,
2067 remote=pullop.remote,
2065 )
2068 )
2066 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2069 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2067
2070
2068
2071
2069 def _pullphase(pullop):
2072 def _pullphase(pullop):
2070 # Get remote phases data from remote
2073 # Get remote phases data from remote
2071 if b'phases' in pullop.stepsdone:
2074 if b'phases' in pullop.stepsdone:
2072 return
2075 return
2073 remotephases = listkeys(pullop.remote, b'phases')
2076 remotephases = listkeys(pullop.remote, b'phases')
2074 _pullapplyphases(pullop, remotephases)
2077 _pullapplyphases(pullop, remotephases)
2075
2078
2076
2079
2077 def _pullapplyphases(pullop, remotephases):
2080 def _pullapplyphases(pullop, remotephases):
2078 """apply phase movement from observed remote state"""
2081 """apply phase movement from observed remote state"""
2079 if b'phases' in pullop.stepsdone:
2082 if b'phases' in pullop.stepsdone:
2080 return
2083 return
2081 pullop.stepsdone.add(b'phases')
2084 pullop.stepsdone.add(b'phases')
2082 publishing = bool(remotephases.get(b'publishing', False))
2085 publishing = bool(remotephases.get(b'publishing', False))
2083 if remotephases and not publishing:
2086 if remotephases and not publishing:
2084 unfi = pullop.repo.unfiltered()
2087 unfi = pullop.repo.unfiltered()
2085 to_rev = unfi.changelog.index.rev
2088 to_rev = unfi.changelog.index.rev
2086 to_node = unfi.changelog.node
2089 to_node = unfi.changelog.node
2087 pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
2090 pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
2088 # remote is new and non-publishing
2091 # remote is new and non-publishing
2089 pheads_revs, _dr = phases.analyze_remote_phases(
2092 pheads_revs, _dr = phases.analyze_remote_phases(
2090 pullop.repo,
2093 pullop.repo,
2091 pulledsubset_revs,
2094 pulledsubset_revs,
2092 remotephases,
2095 remotephases,
2093 )
2096 )
2094 pheads = [to_node(r) for r in pheads_revs]
2097 pheads = [to_node(r) for r in pheads_revs]
2095 dheads = pullop.pulledsubset
2098 dheads = pullop.pulledsubset
2096 else:
2099 else:
2097 # Remote is old or publishing all common changesets
2100 # Remote is old or publishing all common changesets
2098 # should be seen as public
2101 # should be seen as public
2099 pheads = pullop.pulledsubset
2102 pheads = pullop.pulledsubset
2100 dheads = []
2103 dheads = []
2101 unfi = pullop.repo.unfiltered()
2104 unfi = pullop.repo.unfiltered()
2102 phase = unfi._phasecache.phase
2105 phase = unfi._phasecache.phase
2103 rev = unfi.changelog.index.get_rev
2106 rev = unfi.changelog.index.get_rev
2104 public = phases.public
2107 public = phases.public
2105 draft = phases.draft
2108 draft = phases.draft
2106
2109
2107 # exclude changesets already public locally and update the others
2110 # exclude changesets already public locally and update the others
2108 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2111 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2109 if pheads:
2112 if pheads:
2110 tr = pullop.gettransaction()
2113 tr = pullop.gettransaction()
2111 phases.advanceboundary(pullop.repo, tr, public, pheads)
2114 phases.advanceboundary(pullop.repo, tr, public, pheads)
2112
2115
2113 # exclude changesets already draft locally and update the others
2116 # exclude changesets already draft locally and update the others
2114 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2117 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2115 if dheads:
2118 if dheads:
2116 tr = pullop.gettransaction()
2119 tr = pullop.gettransaction()
2117 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2120 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2118
2121
2119
2122
2120 def _pullbookmarks(pullop):
2123 def _pullbookmarks(pullop):
2121 """process the remote bookmark information to update the local one"""
2124 """process the remote bookmark information to update the local one"""
2122 if b'bookmarks' in pullop.stepsdone:
2125 if b'bookmarks' in pullop.stepsdone:
2123 return
2126 return
2124 pullop.stepsdone.add(b'bookmarks')
2127 pullop.stepsdone.add(b'bookmarks')
2125 repo = pullop.repo
2128 repo = pullop.repo
2126 remotebookmarks = pullop.remotebookmarks
2129 remotebookmarks = pullop.remotebookmarks
2127 bookmarks_mode = None
2130 bookmarks_mode = None
2128 if pullop.remote_path is not None:
2131 if pullop.remote_path is not None:
2129 bookmarks_mode = pullop.remote_path.bookmarks_mode
2132 bookmarks_mode = pullop.remote_path.bookmarks_mode
2130 bookmod.updatefromremote(
2133 bookmod.updatefromremote(
2131 repo.ui,
2134 repo.ui,
2132 repo,
2135 repo,
2133 remotebookmarks,
2136 remotebookmarks,
2134 pullop.remote.url(),
2137 pullop.remote.url(),
2135 pullop.gettransaction,
2138 pullop.gettransaction,
2136 explicit=pullop.explicitbookmarks,
2139 explicit=pullop.explicitbookmarks,
2137 mode=bookmarks_mode,
2140 mode=bookmarks_mode,
2138 )
2141 )
2139
2142
2140
2143
2141 def _pullobsolete(pullop):
2144 def _pullobsolete(pullop):
2142 """utility function to pull obsolete markers from a remote
2145 """utility function to pull obsolete markers from a remote
2143
2146
2144 The `gettransaction` is function that return the pull transaction, creating
2147 The `gettransaction` is function that return the pull transaction, creating
2145 one if necessary. We return the transaction to inform the calling code that
2148 one if necessary. We return the transaction to inform the calling code that
2146 a new transaction have been created (when applicable).
2149 a new transaction have been created (when applicable).
2147
2150
2148 Exists mostly to allow overriding for experimentation purpose"""
2151 Exists mostly to allow overriding for experimentation purpose"""
2149 if b'obsmarkers' in pullop.stepsdone:
2152 if b'obsmarkers' in pullop.stepsdone:
2150 return
2153 return
2151 pullop.stepsdone.add(b'obsmarkers')
2154 pullop.stepsdone.add(b'obsmarkers')
2152 tr = None
2155 tr = None
2153 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2156 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2154 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2157 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2155 remoteobs = listkeys(pullop.remote, b'obsolete')
2158 remoteobs = listkeys(pullop.remote, b'obsolete')
2156 if b'dump0' in remoteobs:
2159 if b'dump0' in remoteobs:
2157 tr = pullop.gettransaction()
2160 tr = pullop.gettransaction()
2158 markers = []
2161 markers = []
2159 for key in sorted(remoteobs, reverse=True):
2162 for key in sorted(remoteobs, reverse=True):
2160 if key.startswith(b'dump'):
2163 if key.startswith(b'dump'):
2161 data = util.b85decode(remoteobs[key])
2164 data = util.b85decode(remoteobs[key])
2162 version, newmarks = obsolete._readmarkers(data)
2165 version, newmarks = obsolete._readmarkers(data)
2163 markers += newmarks
2166 markers += newmarks
2164 if markers:
2167 if markers:
2165 pullop.repo.obsstore.add(tr, markers)
2168 pullop.repo.obsstore.add(tr, markers)
2166 pullop.repo.invalidatevolatilesets()
2169 pullop.repo.invalidatevolatilesets()
2167 return tr
2170 return tr
2168
2171
2169
2172
2170 def applynarrowacl(repo, kwargs):
2173 def applynarrowacl(repo, kwargs):
2171 """Apply narrow fetch access control.
2174 """Apply narrow fetch access control.
2172
2175
2173 This massages the named arguments for getbundle wire protocol commands
2176 This massages the named arguments for getbundle wire protocol commands
2174 so requested data is filtered through access control rules.
2177 so requested data is filtered through access control rules.
2175 """
2178 """
2176 ui = repo.ui
2179 ui = repo.ui
2177 # TODO this assumes existence of HTTP and is a layering violation.
2180 # TODO this assumes existence of HTTP and is a layering violation.
2178 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2181 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2179 user_includes = ui.configlist(
2182 user_includes = ui.configlist(
2180 _NARROWACL_SECTION,
2183 _NARROWACL_SECTION,
2181 username + b'.includes',
2184 username + b'.includes',
2182 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2185 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2183 )
2186 )
2184 user_excludes = ui.configlist(
2187 user_excludes = ui.configlist(
2185 _NARROWACL_SECTION,
2188 _NARROWACL_SECTION,
2186 username + b'.excludes',
2189 username + b'.excludes',
2187 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2190 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2188 )
2191 )
2189 if not user_includes:
2192 if not user_includes:
2190 raise error.Abort(
2193 raise error.Abort(
2191 _(b"%s configuration for user %s is empty")
2194 _(b"%s configuration for user %s is empty")
2192 % (_NARROWACL_SECTION, username)
2195 % (_NARROWACL_SECTION, username)
2193 )
2196 )
2194
2197
2195 user_includes = [
2198 user_includes = [
2196 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2199 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2197 ]
2200 ]
2198 user_excludes = [
2201 user_excludes = [
2199 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2202 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2200 ]
2203 ]
2201
2204
2202 req_includes = set(kwargs.get('includepats', []))
2205 req_includes = set(kwargs.get('includepats', []))
2203 req_excludes = set(kwargs.get('excludepats', []))
2206 req_excludes = set(kwargs.get('excludepats', []))
2204
2207
2205 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2208 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2206 req_includes, req_excludes, user_includes, user_excludes
2209 req_includes, req_excludes, user_includes, user_excludes
2207 )
2210 )
2208
2211
2209 if invalid_includes:
2212 if invalid_includes:
2210 raise error.Abort(
2213 raise error.Abort(
2211 _(b"The following includes are not accessible for %s: %s")
2214 _(b"The following includes are not accessible for %s: %s")
2212 % (username, stringutil.pprint(invalid_includes))
2215 % (username, stringutil.pprint(invalid_includes))
2213 )
2216 )
2214
2217
2215 new_args = {}
2218 new_args = {}
2216 new_args.update(kwargs)
2219 new_args.update(kwargs)
2217 new_args['narrow'] = True
2220 new_args['narrow'] = True
2218 new_args['narrow_acl'] = True
2221 new_args['narrow_acl'] = True
2219 new_args['includepats'] = req_includes
2222 new_args['includepats'] = req_includes
2220 if req_excludes:
2223 if req_excludes:
2221 new_args['excludepats'] = req_excludes
2224 new_args['excludepats'] = req_excludes
2222
2225
2223 return new_args
2226 return new_args
2224
2227
2225
2228
2226 def _computeellipsis(repo, common, heads, known, match, depth=None):
2229 def _computeellipsis(repo, common, heads, known, match, depth=None):
2227 """Compute the shape of a narrowed DAG.
2230 """Compute the shape of a narrowed DAG.
2228
2231
2229 Args:
2232 Args:
2230 repo: The repository we're transferring.
2233 repo: The repository we're transferring.
2231 common: The roots of the DAG range we're transferring.
2234 common: The roots of the DAG range we're transferring.
2232 May be just [nullid], which means all ancestors of heads.
2235 May be just [nullid], which means all ancestors of heads.
2233 heads: The heads of the DAG range we're transferring.
2236 heads: The heads of the DAG range we're transferring.
2234 match: The narrowmatcher that allows us to identify relevant changes.
2237 match: The narrowmatcher that allows us to identify relevant changes.
2235 depth: If not None, only consider nodes to be full nodes if they are at
2238 depth: If not None, only consider nodes to be full nodes if they are at
2236 most depth changesets away from one of heads.
2239 most depth changesets away from one of heads.
2237
2240
2238 Returns:
2241 Returns:
2239 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2242 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2240
2243
2241 visitnodes: The list of nodes (either full or ellipsis) which
2244 visitnodes: The list of nodes (either full or ellipsis) which
2242 need to be sent to the client.
2245 need to be sent to the client.
2243 relevant_nodes: The set of changelog nodes which change a file inside
2246 relevant_nodes: The set of changelog nodes which change a file inside
2244 the narrowspec. The client needs these as non-ellipsis nodes.
2247 the narrowspec. The client needs these as non-ellipsis nodes.
2245 ellipsisroots: A dict of {rev: parents} that is used in
2248 ellipsisroots: A dict of {rev: parents} that is used in
2246 narrowchangegroup to produce ellipsis nodes with the
2249 narrowchangegroup to produce ellipsis nodes with the
2247 correct parents.
2250 correct parents.
2248 """
2251 """
2249 cl = repo.changelog
2252 cl = repo.changelog
2250 mfl = repo.manifestlog
2253 mfl = repo.manifestlog
2251
2254
2252 clrev = cl.rev
2255 clrev = cl.rev
2253
2256
2254 commonrevs = {clrev(n) for n in common} | {nullrev}
2257 commonrevs = {clrev(n) for n in common} | {nullrev}
2255 headsrevs = {clrev(n) for n in heads}
2258 headsrevs = {clrev(n) for n in heads}
2256
2259
2257 if depth:
2260 if depth:
2258 revdepth = {h: 0 for h in headsrevs}
2261 revdepth = {h: 0 for h in headsrevs}
2259
2262
2260 ellipsisheads = collections.defaultdict(set)
2263 ellipsisheads = collections.defaultdict(set)
2261 ellipsisroots = collections.defaultdict(set)
2264 ellipsisroots = collections.defaultdict(set)
2262
2265
2263 def addroot(head, curchange):
2266 def addroot(head, curchange):
2264 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2267 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2265 ellipsisroots[head].add(curchange)
2268 ellipsisroots[head].add(curchange)
2266 # Recursively split ellipsis heads with 3 roots by finding the
2269 # Recursively split ellipsis heads with 3 roots by finding the
2267 # roots' youngest common descendant which is an elided merge commit.
2270 # roots' youngest common descendant which is an elided merge commit.
2268 # That descendant takes 2 of the 3 roots as its own, and becomes a
2271 # That descendant takes 2 of the 3 roots as its own, and becomes a
2269 # root of the head.
2272 # root of the head.
2270 while len(ellipsisroots[head]) > 2:
2273 while len(ellipsisroots[head]) > 2:
2271 child, roots = splithead(head)
2274 child, roots = splithead(head)
2272 splitroots(head, child, roots)
2275 splitroots(head, child, roots)
2273 head = child # Recurse in case we just added a 3rd root
2276 head = child # Recurse in case we just added a 3rd root
2274
2277
2275 def splitroots(head, child, roots):
2278 def splitroots(head, child, roots):
2276 ellipsisroots[head].difference_update(roots)
2279 ellipsisroots[head].difference_update(roots)
2277 ellipsisroots[head].add(child)
2280 ellipsisroots[head].add(child)
2278 ellipsisroots[child].update(roots)
2281 ellipsisroots[child].update(roots)
2279 ellipsisroots[child].discard(child)
2282 ellipsisroots[child].discard(child)
2280
2283
2281 def splithead(head):
2284 def splithead(head):
2282 r1, r2, r3 = sorted(ellipsisroots[head])
2285 r1, r2, r3 = sorted(ellipsisroots[head])
2283 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2286 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2284 mid = repo.revs(
2287 mid = repo.revs(
2285 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2288 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2286 )
2289 )
2287 for j in mid:
2290 for j in mid:
2288 if j == nr2:
2291 if j == nr2:
2289 return nr2, (nr1, nr2)
2292 return nr2, (nr1, nr2)
2290 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2293 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2291 return j, (nr1, nr2)
2294 return j, (nr1, nr2)
2292 raise error.Abort(
2295 raise error.Abort(
2293 _(
2296 _(
2294 b'Failed to split up ellipsis node! head: %d, '
2297 b'Failed to split up ellipsis node! head: %d, '
2295 b'roots: %d %d %d'
2298 b'roots: %d %d %d'
2296 )
2299 )
2297 % (head, r1, r2, r3)
2300 % (head, r1, r2, r3)
2298 )
2301 )
2299
2302
2300 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2303 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2301 visit = reversed(missing)
2304 visit = reversed(missing)
2302 relevant_nodes = set()
2305 relevant_nodes = set()
2303 visitnodes = [cl.node(m) for m in missing]
2306 visitnodes = [cl.node(m) for m in missing]
2304 required = set(headsrevs) | known
2307 required = set(headsrevs) | known
2305 for rev in visit:
2308 for rev in visit:
2306 clrev = cl.changelogrevision(rev)
2309 clrev = cl.changelogrevision(rev)
2307 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2310 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2308 if depth is not None:
2311 if depth is not None:
2309 curdepth = revdepth[rev]
2312 curdepth = revdepth[rev]
2310 for p in ps:
2313 for p in ps:
2311 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2314 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2312 needed = False
2315 needed = False
2313 shallow_enough = depth is None or revdepth[rev] <= depth
2316 shallow_enough = depth is None or revdepth[rev] <= depth
2314 if shallow_enough:
2317 if shallow_enough:
2315 curmf = mfl[clrev.manifest].read()
2318 curmf = mfl[clrev.manifest].read()
2316 if ps:
2319 if ps:
2317 # We choose to not trust the changed files list in
2320 # We choose to not trust the changed files list in
2318 # changesets because it's not always correct. TODO: could
2321 # changesets because it's not always correct. TODO: could
2319 # we trust it for the non-merge case?
2322 # we trust it for the non-merge case?
2320 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2323 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2321 needed = bool(curmf.diff(p1mf, match))
2324 needed = bool(curmf.diff(p1mf, match))
2322 if not needed and len(ps) > 1:
2325 if not needed and len(ps) > 1:
2323 # For merge changes, the list of changed files is not
2326 # For merge changes, the list of changed files is not
2324 # helpful, since we need to emit the merge if a file
2327 # helpful, since we need to emit the merge if a file
2325 # in the narrow spec has changed on either side of the
2328 # in the narrow spec has changed on either side of the
2326 # merge. As a result, we do a manifest diff to check.
2329 # merge. As a result, we do a manifest diff to check.
2327 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2330 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2328 needed = bool(curmf.diff(p2mf, match))
2331 needed = bool(curmf.diff(p2mf, match))
2329 else:
2332 else:
2330 # For a root node, we need to include the node if any
2333 # For a root node, we need to include the node if any
2331 # files in the node match the narrowspec.
2334 # files in the node match the narrowspec.
2332 needed = any(curmf.walk(match))
2335 needed = any(curmf.walk(match))
2333
2336
2334 if needed:
2337 if needed:
2335 for head in ellipsisheads[rev]:
2338 for head in ellipsisheads[rev]:
2336 addroot(head, rev)
2339 addroot(head, rev)
2337 for p in ps:
2340 for p in ps:
2338 required.add(p)
2341 required.add(p)
2339 relevant_nodes.add(cl.node(rev))
2342 relevant_nodes.add(cl.node(rev))
2340 else:
2343 else:
2341 if not ps:
2344 if not ps:
2342 ps = [nullrev]
2345 ps = [nullrev]
2343 if rev in required:
2346 if rev in required:
2344 for head in ellipsisheads[rev]:
2347 for head in ellipsisheads[rev]:
2345 addroot(head, rev)
2348 addroot(head, rev)
2346 for p in ps:
2349 for p in ps:
2347 ellipsisheads[p].add(rev)
2350 ellipsisheads[p].add(rev)
2348 else:
2351 else:
2349 for p in ps:
2352 for p in ps:
2350 ellipsisheads[p] |= ellipsisheads[rev]
2353 ellipsisheads[p] |= ellipsisheads[rev]
2351
2354
2352 # add common changesets as roots of their reachable ellipsis heads
2355 # add common changesets as roots of their reachable ellipsis heads
2353 for c in commonrevs:
2356 for c in commonrevs:
2354 for head in ellipsisheads[c]:
2357 for head in ellipsisheads[c]:
2355 addroot(head, c)
2358 addroot(head, c)
2356 return visitnodes, relevant_nodes, ellipsisroots
2359 return visitnodes, relevant_nodes, ellipsisroots
2357
2360
2358
2361
2359 def caps20to10(repo, role):
2362 def caps20to10(repo, role):
2360 """return a set with appropriate options to use bundle20 during getbundle"""
2363 """return a set with appropriate options to use bundle20 during getbundle"""
2361 caps = {b'HG20'}
2364 caps = {b'HG20'}
2362 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2365 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2363 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2366 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2364 return caps
2367 return caps
2365
2368
2366
2369
2367 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2370 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2368 getbundle2partsorder = []
2371 getbundle2partsorder = []
2369
2372
2370 # Mapping between step name and function
2373 # Mapping between step name and function
2371 #
2374 #
2372 # This exists to help extensions wrap steps if necessary
2375 # This exists to help extensions wrap steps if necessary
2373 getbundle2partsmapping = {}
2376 getbundle2partsmapping = {}
2374
2377
2375
2378
2376 def getbundle2partsgenerator(stepname, idx=None):
2379 def getbundle2partsgenerator(stepname, idx=None):
2377 """decorator for function generating bundle2 part for getbundle
2380 """decorator for function generating bundle2 part for getbundle
2378
2381
2379 The function is added to the step -> function mapping and appended to the
2382 The function is added to the step -> function mapping and appended to the
2380 list of steps. Beware that decorated functions will be added in order
2383 list of steps. Beware that decorated functions will be added in order
2381 (this may matter).
2384 (this may matter).
2382
2385
2383 You can only use this decorator for new steps, if you want to wrap a step
2386 You can only use this decorator for new steps, if you want to wrap a step
2384 from an extension, attack the getbundle2partsmapping dictionary directly."""
2387 from an extension, attack the getbundle2partsmapping dictionary directly."""
2385
2388
2386 def dec(func):
2389 def dec(func):
2387 assert stepname not in getbundle2partsmapping
2390 assert stepname not in getbundle2partsmapping
2388 getbundle2partsmapping[stepname] = func
2391 getbundle2partsmapping[stepname] = func
2389 if idx is None:
2392 if idx is None:
2390 getbundle2partsorder.append(stepname)
2393 getbundle2partsorder.append(stepname)
2391 else:
2394 else:
2392 getbundle2partsorder.insert(idx, stepname)
2395 getbundle2partsorder.insert(idx, stepname)
2393 return func
2396 return func
2394
2397
2395 return dec
2398 return dec
2396
2399
2397
2400
2398 def bundle2requested(bundlecaps):
2401 def bundle2requested(bundlecaps):
2399 if bundlecaps is not None:
2402 if bundlecaps is not None:
2400 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2403 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2401 return False
2404 return False
2402
2405
2403
2406
2404 def getbundlechunks(
2407 def getbundlechunks(
2405 repo,
2408 repo,
2406 source,
2409 source,
2407 heads=None,
2410 heads=None,
2408 common=None,
2411 common=None,
2409 bundlecaps=None,
2412 bundlecaps=None,
2410 remote_sidedata=None,
2413 remote_sidedata=None,
2411 **kwargs
2414 **kwargs
2412 ):
2415 ):
2413 """Return chunks constituting a bundle's raw data.
2416 """Return chunks constituting a bundle's raw data.
2414
2417
2415 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2418 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2416 passed.
2419 passed.
2417
2420
2418 Returns a 2-tuple of a dict with metadata about the generated bundle
2421 Returns a 2-tuple of a dict with metadata about the generated bundle
2419 and an iterator over raw chunks (of varying sizes).
2422 and an iterator over raw chunks (of varying sizes).
2420 """
2423 """
2421 kwargs = pycompat.byteskwargs(kwargs)
2424 kwargs = pycompat.byteskwargs(kwargs)
2422 info = {}
2425 info = {}
2423 usebundle2 = bundle2requested(bundlecaps)
2426 usebundle2 = bundle2requested(bundlecaps)
2424 # bundle10 case
2427 # bundle10 case
2425 if not usebundle2:
2428 if not usebundle2:
2426 if bundlecaps and not kwargs.get(b'cg', True):
2429 if bundlecaps and not kwargs.get(b'cg', True):
2427 raise ValueError(
2430 raise ValueError(
2428 _(b'request for bundle10 must include changegroup')
2431 _(b'request for bundle10 must include changegroup')
2429 )
2432 )
2430
2433
2431 if kwargs:
2434 if kwargs:
2432 raise ValueError(
2435 raise ValueError(
2433 _(b'unsupported getbundle arguments: %s')
2436 _(b'unsupported getbundle arguments: %s')
2434 % b', '.join(sorted(kwargs.keys()))
2437 % b', '.join(sorted(kwargs.keys()))
2435 )
2438 )
2436 outgoing = _computeoutgoing(repo, heads, common)
2439 outgoing = _computeoutgoing(repo, heads, common)
2437 info[b'bundleversion'] = 1
2440 info[b'bundleversion'] = 1
2438 return (
2441 return (
2439 info,
2442 info,
2440 changegroup.makestream(
2443 changegroup.makestream(
2441 repo,
2444 repo,
2442 outgoing,
2445 outgoing,
2443 b'01',
2446 b'01',
2444 source,
2447 source,
2445 bundlecaps=bundlecaps,
2448 bundlecaps=bundlecaps,
2446 remote_sidedata=remote_sidedata,
2449 remote_sidedata=remote_sidedata,
2447 ),
2450 ),
2448 )
2451 )
2449
2452
2450 # bundle20 case
2453 # bundle20 case
2451 info[b'bundleversion'] = 2
2454 info[b'bundleversion'] = 2
2452 b2caps = {}
2455 b2caps = {}
2453 for bcaps in bundlecaps:
2456 for bcaps in bundlecaps:
2454 if bcaps.startswith(b'bundle2='):
2457 if bcaps.startswith(b'bundle2='):
2455 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2458 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2456 b2caps.update(bundle2.decodecaps(blob))
2459 b2caps.update(bundle2.decodecaps(blob))
2457 bundler = bundle2.bundle20(repo.ui, b2caps)
2460 bundler = bundle2.bundle20(repo.ui, b2caps)
2458
2461
2459 kwargs[b'heads'] = heads
2462 kwargs[b'heads'] = heads
2460 kwargs[b'common'] = common
2463 kwargs[b'common'] = common
2461
2464
2462 for name in getbundle2partsorder:
2465 for name in getbundle2partsorder:
2463 func = getbundle2partsmapping[name]
2466 func = getbundle2partsmapping[name]
2464 func(
2467 func(
2465 bundler,
2468 bundler,
2466 repo,
2469 repo,
2467 source,
2470 source,
2468 bundlecaps=bundlecaps,
2471 bundlecaps=bundlecaps,
2469 b2caps=b2caps,
2472 b2caps=b2caps,
2470 remote_sidedata=remote_sidedata,
2473 remote_sidedata=remote_sidedata,
2471 **pycompat.strkwargs(kwargs)
2474 **pycompat.strkwargs(kwargs)
2472 )
2475 )
2473
2476
2474 info[b'prefercompressed'] = bundler.prefercompressed
2477 info[b'prefercompressed'] = bundler.prefercompressed
2475
2478
2476 return info, bundler.getchunks()
2479 return info, bundler.getchunks()
2477
2480
2478
2481
2479 @getbundle2partsgenerator(b'stream')
2482 @getbundle2partsgenerator(b'stream')
2480 def _getbundlestream2(bundler, repo, *args, **kwargs):
2483 def _getbundlestream2(bundler, repo, *args, **kwargs):
2481 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2484 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2482
2485
2483
2486
2484 @getbundle2partsgenerator(b'changegroup')
2487 @getbundle2partsgenerator(b'changegroup')
2485 def _getbundlechangegrouppart(
2488 def _getbundlechangegrouppart(
2486 bundler,
2489 bundler,
2487 repo,
2490 repo,
2488 source,
2491 source,
2489 bundlecaps=None,
2492 bundlecaps=None,
2490 b2caps=None,
2493 b2caps=None,
2491 heads=None,
2494 heads=None,
2492 common=None,
2495 common=None,
2493 remote_sidedata=None,
2496 remote_sidedata=None,
2494 **kwargs
2497 **kwargs
2495 ):
2498 ):
2496 """add a changegroup part to the requested bundle"""
2499 """add a changegroup part to the requested bundle"""
2497 if not kwargs.get('cg', True) or not b2caps:
2500 if not kwargs.get('cg', True) or not b2caps:
2498 return
2501 return
2499
2502
2500 version = b'01'
2503 version = b'01'
2501 cgversions = b2caps.get(b'changegroup')
2504 cgversions = b2caps.get(b'changegroup')
2502 if cgversions: # 3.1 and 3.2 ship with an empty value
2505 if cgversions: # 3.1 and 3.2 ship with an empty value
2503 cgversions = [
2506 cgversions = [
2504 v
2507 v
2505 for v in cgversions
2508 for v in cgversions
2506 if v in changegroup.supportedoutgoingversions(repo)
2509 if v in changegroup.supportedoutgoingversions(repo)
2507 ]
2510 ]
2508 if not cgversions:
2511 if not cgversions:
2509 raise error.Abort(_(b'no common changegroup version'))
2512 raise error.Abort(_(b'no common changegroup version'))
2510 version = max(cgversions)
2513 version = max(cgversions)
2511
2514
2512 outgoing = _computeoutgoing(repo, heads, common)
2515 outgoing = _computeoutgoing(repo, heads, common)
2513 if not outgoing.missing:
2516 if not outgoing.missing:
2514 return
2517 return
2515
2518
2516 if kwargs.get('narrow', False):
2519 if kwargs.get('narrow', False):
2517 include = sorted(filter(bool, kwargs.get('includepats', [])))
2520 include = sorted(filter(bool, kwargs.get('includepats', [])))
2518 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2521 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2519 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2522 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2520 else:
2523 else:
2521 matcher = None
2524 matcher = None
2522
2525
2523 cgstream = changegroup.makestream(
2526 cgstream = changegroup.makestream(
2524 repo,
2527 repo,
2525 outgoing,
2528 outgoing,
2526 version,
2529 version,
2527 source,
2530 source,
2528 bundlecaps=bundlecaps,
2531 bundlecaps=bundlecaps,
2529 matcher=matcher,
2532 matcher=matcher,
2530 remote_sidedata=remote_sidedata,
2533 remote_sidedata=remote_sidedata,
2531 )
2534 )
2532
2535
2533 part = bundler.newpart(b'changegroup', data=cgstream)
2536 part = bundler.newpart(b'changegroup', data=cgstream)
2534 if cgversions:
2537 if cgversions:
2535 part.addparam(b'version', version)
2538 part.addparam(b'version', version)
2536
2539
2537 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2540 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2538
2541
2539 if scmutil.istreemanifest(repo):
2542 if scmutil.istreemanifest(repo):
2540 part.addparam(b'treemanifest', b'1')
2543 part.addparam(b'treemanifest', b'1')
2541
2544
2542 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2545 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2543 part.addparam(b'exp-sidedata', b'1')
2546 part.addparam(b'exp-sidedata', b'1')
2544 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2547 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2545 part.addparam(b'exp-wanted-sidedata', sidedata)
2548 part.addparam(b'exp-wanted-sidedata', sidedata)
2546
2549
2547 if (
2550 if (
2548 kwargs.get('narrow', False)
2551 kwargs.get('narrow', False)
2549 and kwargs.get('narrow_acl', False)
2552 and kwargs.get('narrow_acl', False)
2550 and (include or exclude)
2553 and (include or exclude)
2551 ):
2554 ):
2552 # this is mandatory because otherwise ACL clients won't work
2555 # this is mandatory because otherwise ACL clients won't work
2553 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2556 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2554 narrowspecpart.data = b'%s\0%s' % (
2557 narrowspecpart.data = b'%s\0%s' % (
2555 b'\n'.join(include),
2558 b'\n'.join(include),
2556 b'\n'.join(exclude),
2559 b'\n'.join(exclude),
2557 )
2560 )
2558
2561
2559
2562
2560 @getbundle2partsgenerator(b'bookmarks')
2563 @getbundle2partsgenerator(b'bookmarks')
2561 def _getbundlebookmarkpart(
2564 def _getbundlebookmarkpart(
2562 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2565 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2563 ):
2566 ):
2564 """add a bookmark part to the requested bundle"""
2567 """add a bookmark part to the requested bundle"""
2565 if not kwargs.get('bookmarks', False):
2568 if not kwargs.get('bookmarks', False):
2566 return
2569 return
2567 if not b2caps or b'bookmarks' not in b2caps:
2570 if not b2caps or b'bookmarks' not in b2caps:
2568 raise error.Abort(_(b'no common bookmarks exchange method'))
2571 raise error.Abort(_(b'no common bookmarks exchange method'))
2569 books = bookmod.listbinbookmarks(repo)
2572 books = bookmod.listbinbookmarks(repo)
2570 data = bookmod.binaryencode(repo, books)
2573 data = bookmod.binaryencode(repo, books)
2571 if data:
2574 if data:
2572 bundler.newpart(b'bookmarks', data=data)
2575 bundler.newpart(b'bookmarks', data=data)
2573
2576
2574
2577
2575 @getbundle2partsgenerator(b'listkeys')
2578 @getbundle2partsgenerator(b'listkeys')
2576 def _getbundlelistkeysparts(
2579 def _getbundlelistkeysparts(
2577 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2580 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2578 ):
2581 ):
2579 """add parts containing listkeys namespaces to the requested bundle"""
2582 """add parts containing listkeys namespaces to the requested bundle"""
2580 listkeys = kwargs.get('listkeys', ())
2583 listkeys = kwargs.get('listkeys', ())
2581 for namespace in listkeys:
2584 for namespace in listkeys:
2582 part = bundler.newpart(b'listkeys')
2585 part = bundler.newpart(b'listkeys')
2583 part.addparam(b'namespace', namespace)
2586 part.addparam(b'namespace', namespace)
2584 keys = repo.listkeys(namespace).items()
2587 keys = repo.listkeys(namespace).items()
2585 part.data = pushkey.encodekeys(keys)
2588 part.data = pushkey.encodekeys(keys)
2586
2589
2587
2590
2588 @getbundle2partsgenerator(b'obsmarkers')
2591 @getbundle2partsgenerator(b'obsmarkers')
2589 def _getbundleobsmarkerpart(
2592 def _getbundleobsmarkerpart(
2590 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2593 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2591 ):
2594 ):
2592 """add an obsolescence markers part to the requested bundle"""
2595 """add an obsolescence markers part to the requested bundle"""
2593 if kwargs.get('obsmarkers', False):
2596 if kwargs.get('obsmarkers', False):
2594 if heads is None:
2597 if heads is None:
2595 heads = repo.heads()
2598 heads = repo.heads()
2596 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2599 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2597 markers = repo.obsstore.relevantmarkers(subset)
2600 markers = repo.obsstore.relevantmarkers(subset)
2598 markers = obsutil.sortedmarkers(markers)
2601 markers = obsutil.sortedmarkers(markers)
2599 bundle2.buildobsmarkerspart(bundler, markers)
2602 bundle2.buildobsmarkerspart(bundler, markers)
2600
2603
2601
2604
2602 @getbundle2partsgenerator(b'phases')
2605 @getbundle2partsgenerator(b'phases')
2603 def _getbundlephasespart(
2606 def _getbundlephasespart(
2604 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2607 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2605 ):
2608 ):
2606 """add phase heads part to the requested bundle"""
2609 """add phase heads part to the requested bundle"""
2607 if kwargs.get('phases', False):
2610 if kwargs.get('phases', False):
2608 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2611 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2609 raise error.Abort(_(b'no common phases exchange method'))
2612 raise error.Abort(_(b'no common phases exchange method'))
2610 if heads is None:
2613 if heads is None:
2611 heads = repo.heads()
2614 heads = repo.heads()
2612
2615
2613 headsbyphase = collections.defaultdict(set)
2616 headsbyphase = collections.defaultdict(set)
2614 if repo.publishing():
2617 if repo.publishing():
2615 headsbyphase[phases.public] = heads
2618 headsbyphase[phases.public] = heads
2616 else:
2619 else:
2617 # find the appropriate heads to move
2620 # find the appropriate heads to move
2618
2621
2619 phase = repo._phasecache.phase
2622 phase = repo._phasecache.phase
2620 node = repo.changelog.node
2623 node = repo.changelog.node
2621 rev = repo.changelog.rev
2624 rev = repo.changelog.rev
2622 for h in heads:
2625 for h in heads:
2623 headsbyphase[phase(repo, rev(h))].add(h)
2626 headsbyphase[phase(repo, rev(h))].add(h)
2624 seenphases = list(headsbyphase.keys())
2627 seenphases = list(headsbyphase.keys())
2625
2628
2626 # We do not handle anything but public and draft phase for now)
2629 # We do not handle anything but public and draft phase for now)
2627 if seenphases:
2630 if seenphases:
2628 assert max(seenphases) <= phases.draft
2631 assert max(seenphases) <= phases.draft
2629
2632
2630 # if client is pulling non-public changesets, we need to find
2633 # if client is pulling non-public changesets, we need to find
2631 # intermediate public heads.
2634 # intermediate public heads.
2632 draftheads = headsbyphase.get(phases.draft, set())
2635 draftheads = headsbyphase.get(phases.draft, set())
2633 if draftheads:
2636 if draftheads:
2634 publicheads = headsbyphase.get(phases.public, set())
2637 publicheads = headsbyphase.get(phases.public, set())
2635
2638
2636 revset = b'heads(only(%ln, %ln) and public())'
2639 revset = b'heads(only(%ln, %ln) and public())'
2637 extraheads = repo.revs(revset, draftheads, publicheads)
2640 extraheads = repo.revs(revset, draftheads, publicheads)
2638 for r in extraheads:
2641 for r in extraheads:
2639 headsbyphase[phases.public].add(node(r))
2642 headsbyphase[phases.public].add(node(r))
2640
2643
2641 # transform data in a format used by the encoding function
2644 # transform data in a format used by the encoding function
2642 phasemapping = {
2645 phasemapping = {
2643 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2646 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2644 }
2647 }
2645
2648
2646 # generate the actual part
2649 # generate the actual part
2647 phasedata = phases.binaryencode(phasemapping)
2650 phasedata = phases.binaryencode(phasemapping)
2648 bundler.newpart(b'phase-heads', data=phasedata)
2651 bundler.newpart(b'phase-heads', data=phasedata)
2649
2652
2650
2653
2651 @getbundle2partsgenerator(b'hgtagsfnodes')
2654 @getbundle2partsgenerator(b'hgtagsfnodes')
2652 def _getbundletagsfnodes(
2655 def _getbundletagsfnodes(
2653 bundler,
2656 bundler,
2654 repo,
2657 repo,
2655 source,
2658 source,
2656 bundlecaps=None,
2659 bundlecaps=None,
2657 b2caps=None,
2660 b2caps=None,
2658 heads=None,
2661 heads=None,
2659 common=None,
2662 common=None,
2660 **kwargs
2663 **kwargs
2661 ):
2664 ):
2662 """Transfer the .hgtags filenodes mapping.
2665 """Transfer the .hgtags filenodes mapping.
2663
2666
2664 Only values for heads in this bundle will be transferred.
2667 Only values for heads in this bundle will be transferred.
2665
2668
2666 The part data consists of pairs of 20 byte changeset node and .hgtags
2669 The part data consists of pairs of 20 byte changeset node and .hgtags
2667 filenodes raw values.
2670 filenodes raw values.
2668 """
2671 """
2669 # Don't send unless:
2672 # Don't send unless:
2670 # - changeset are being exchanged,
2673 # - changeset are being exchanged,
2671 # - the client supports it.
2674 # - the client supports it.
2672 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2675 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2673 return
2676 return
2674
2677
2675 outgoing = _computeoutgoing(repo, heads, common)
2678 outgoing = _computeoutgoing(repo, heads, common)
2676 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2679 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2677
2680
2678
2681
2679 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2682 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2680 def _getbundlerevbranchcache(
2683 def _getbundlerevbranchcache(
2681 bundler,
2684 bundler,
2682 repo,
2685 repo,
2683 source,
2686 source,
2684 bundlecaps=None,
2687 bundlecaps=None,
2685 b2caps=None,
2688 b2caps=None,
2686 heads=None,
2689 heads=None,
2687 common=None,
2690 common=None,
2688 **kwargs
2691 **kwargs
2689 ):
2692 ):
2690 """Transfer the rev-branch-cache mapping
2693 """Transfer the rev-branch-cache mapping
2691
2694
2692 The payload is a series of data related to each branch
2695 The payload is a series of data related to each branch
2693
2696
2694 1) branch name length
2697 1) branch name length
2695 2) number of open heads
2698 2) number of open heads
2696 3) number of closed heads
2699 3) number of closed heads
2697 4) open heads nodes
2700 4) open heads nodes
2698 5) closed heads nodes
2701 5) closed heads nodes
2699 """
2702 """
2700 # Don't send unless:
2703 # Don't send unless:
2701 # - changeset are being exchanged,
2704 # - changeset are being exchanged,
2702 # - the client supports it.
2705 # - the client supports it.
2703 # - narrow bundle isn't in play (not currently compatible).
2706 # - narrow bundle isn't in play (not currently compatible).
2704 if (
2707 if (
2705 not kwargs.get('cg', True)
2708 not kwargs.get('cg', True)
2706 or not b2caps
2709 or not b2caps
2707 or b'rev-branch-cache' not in b2caps
2710 or b'rev-branch-cache' not in b2caps
2708 or kwargs.get('narrow', False)
2711 or kwargs.get('narrow', False)
2709 or repo.ui.has_section(_NARROWACL_SECTION)
2712 or repo.ui.has_section(_NARROWACL_SECTION)
2710 ):
2713 ):
2711 return
2714 return
2712
2715
2713 outgoing = _computeoutgoing(repo, heads, common)
2716 outgoing = _computeoutgoing(repo, heads, common)
2714 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2717 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2715
2718
2716
2719
2717 def check_heads(repo, their_heads, context):
2720 def check_heads(repo, their_heads, context):
2718 """check if the heads of a repo have been modified
2721 """check if the heads of a repo have been modified
2719
2722
2720 Used by peer for unbundling.
2723 Used by peer for unbundling.
2721 """
2724 """
2722 heads = repo.heads()
2725 heads = repo.heads()
2723 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2726 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2724 if not (
2727 if not (
2725 their_heads == [b'force']
2728 their_heads == [b'force']
2726 or their_heads == heads
2729 or their_heads == heads
2727 or their_heads == [b'hashed', heads_hash]
2730 or their_heads == [b'hashed', heads_hash]
2728 ):
2731 ):
2729 # someone else committed/pushed/unbundled while we
2732 # someone else committed/pushed/unbundled while we
2730 # were transferring data
2733 # were transferring data
2731 raise error.PushRaced(
2734 raise error.PushRaced(
2732 b'repository changed while %s - please try again' % context
2735 b'repository changed while %s - please try again' % context
2733 )
2736 )
2734
2737
2735
2738
2736 def unbundle(repo, cg, heads, source, url):
2739 def unbundle(repo, cg, heads, source, url):
2737 """Apply a bundle to a repo.
2740 """Apply a bundle to a repo.
2738
2741
2739 this function makes sure the repo is locked during the application and have
2742 this function makes sure the repo is locked during the application and have
2740 mechanism to check that no push race occurred between the creation of the
2743 mechanism to check that no push race occurred between the creation of the
2741 bundle and its application.
2744 bundle and its application.
2742
2745
2743 If the push was raced as PushRaced exception is raised."""
2746 If the push was raced as PushRaced exception is raised."""
2744 r = 0
2747 r = 0
2745 # need a transaction when processing a bundle2 stream
2748 # need a transaction when processing a bundle2 stream
2746 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2749 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2747 lockandtr = [None, None, None]
2750 lockandtr = [None, None, None]
2748 recordout = None
2751 recordout = None
2749 # quick fix for output mismatch with bundle2 in 3.4
2752 # quick fix for output mismatch with bundle2 in 3.4
2750 captureoutput = repo.ui.configbool(
2753 captureoutput = repo.ui.configbool(
2751 b'experimental', b'bundle2-output-capture'
2754 b'experimental', b'bundle2-output-capture'
2752 )
2755 )
2753 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2756 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2754 captureoutput = True
2757 captureoutput = True
2755 try:
2758 try:
2756 # note: outside bundle1, 'heads' is expected to be empty and this
2759 # note: outside bundle1, 'heads' is expected to be empty and this
2757 # 'check_heads' call wil be a no-op
2760 # 'check_heads' call wil be a no-op
2758 check_heads(repo, heads, b'uploading changes')
2761 check_heads(repo, heads, b'uploading changes')
2759 # push can proceed
2762 # push can proceed
2760 if not isinstance(cg, bundle2.unbundle20):
2763 if not isinstance(cg, bundle2.unbundle20):
2761 # legacy case: bundle1 (changegroup 01)
2764 # legacy case: bundle1 (changegroup 01)
2762 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2765 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2763 with repo.lock(), repo.transaction(txnname) as tr:
2766 with repo.lock(), repo.transaction(txnname) as tr:
2764 op = bundle2.applybundle(repo, cg, tr, source, url)
2767 op = bundle2.applybundle(repo, cg, tr, source, url)
2765 r = bundle2.combinechangegroupresults(op)
2768 r = bundle2.combinechangegroupresults(op)
2766 else:
2769 else:
2767 r = None
2770 r = None
2768 try:
2771 try:
2769
2772
2770 def gettransaction():
2773 def gettransaction():
2771 if not lockandtr[2]:
2774 if not lockandtr[2]:
2772 if not bookmod.bookmarksinstore(repo):
2775 if not bookmod.bookmarksinstore(repo):
2773 lockandtr[0] = repo.wlock()
2776 lockandtr[0] = repo.wlock()
2774 lockandtr[1] = repo.lock()
2777 lockandtr[1] = repo.lock()
2775 lockandtr[2] = repo.transaction(source)
2778 lockandtr[2] = repo.transaction(source)
2776 lockandtr[2].hookargs[b'source'] = source
2779 lockandtr[2].hookargs[b'source'] = source
2777 lockandtr[2].hookargs[b'url'] = url
2780 lockandtr[2].hookargs[b'url'] = url
2778 lockandtr[2].hookargs[b'bundle2'] = b'1'
2781 lockandtr[2].hookargs[b'bundle2'] = b'1'
2779 return lockandtr[2]
2782 return lockandtr[2]
2780
2783
2781 # Do greedy locking by default until we're satisfied with lazy
2784 # Do greedy locking by default until we're satisfied with lazy
2782 # locking.
2785 # locking.
2783 if not repo.ui.configbool(
2786 if not repo.ui.configbool(
2784 b'experimental', b'bundle2lazylocking'
2787 b'experimental', b'bundle2lazylocking'
2785 ):
2788 ):
2786 gettransaction()
2789 gettransaction()
2787
2790
2788 op = bundle2.bundleoperation(
2791 op = bundle2.bundleoperation(
2789 repo,
2792 repo,
2790 gettransaction,
2793 gettransaction,
2791 captureoutput=captureoutput,
2794 captureoutput=captureoutput,
2792 source=b'push',
2795 source=b'push',
2793 )
2796 )
2794 try:
2797 try:
2795 op = bundle2.processbundle(repo, cg, op=op)
2798 op = bundle2.processbundle(repo, cg, op=op)
2796 finally:
2799 finally:
2797 r = op.reply
2800 r = op.reply
2798 if captureoutput and r is not None:
2801 if captureoutput and r is not None:
2799 repo.ui.pushbuffer(error=True, subproc=True)
2802 repo.ui.pushbuffer(error=True, subproc=True)
2800
2803
2801 def recordout(output):
2804 def recordout(output):
2802 r.newpart(b'output', data=output, mandatory=False)
2805 r.newpart(b'output', data=output, mandatory=False)
2803
2806
2804 if lockandtr[2] is not None:
2807 if lockandtr[2] is not None:
2805 lockandtr[2].close()
2808 lockandtr[2].close()
2806 except BaseException as exc:
2809 except BaseException as exc:
2807 exc.duringunbundle2 = True
2810 exc.duringunbundle2 = True
2808 if captureoutput and r is not None:
2811 if captureoutput and r is not None:
2809 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2812 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2810
2813
2811 def recordout(output):
2814 def recordout(output):
2812 part = bundle2.bundlepart(
2815 part = bundle2.bundlepart(
2813 b'output', data=output, mandatory=False
2816 b'output', data=output, mandatory=False
2814 )
2817 )
2815 parts.append(part)
2818 parts.append(part)
2816
2819
2817 raise
2820 raise
2818 finally:
2821 finally:
2819 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2822 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2820 if recordout is not None:
2823 if recordout is not None:
2821 recordout(repo.ui.popbuffer())
2824 recordout(repo.ui.popbuffer())
2822 return r
2825 return r
2823
2826
2824
2827
2825 def _maybeapplyclonebundle(pullop):
2828 def _maybeapplyclonebundle(pullop):
2826 """Apply a clone bundle from a remote, if possible."""
2829 """Apply a clone bundle from a remote, if possible."""
2827
2830
2828 repo = pullop.repo
2831 repo = pullop.repo
2829 remote = pullop.remote
2832 remote = pullop.remote
2830
2833
2831 if not repo.ui.configbool(b'ui', b'clonebundles'):
2834 if not repo.ui.configbool(b'ui', b'clonebundles'):
2832 return
2835 return
2833
2836
2834 # Only run if local repo is empty.
2837 # Only run if local repo is empty.
2835 if len(repo):
2838 if len(repo):
2836 return
2839 return
2837
2840
2838 if pullop.heads:
2841 if pullop.heads:
2839 return
2842 return
2840
2843
2841 if not remote.capable(b'clonebundles'):
2844 if not remote.capable(b'clonebundles'):
2842 return
2845 return
2843
2846
2844 with remote.commandexecutor() as e:
2847 with remote.commandexecutor() as e:
2845 res = e.callcommand(b'clonebundles', {}).result()
2848 res = e.callcommand(b'clonebundles', {}).result()
2846
2849
2847 # If we call the wire protocol command, that's good enough to record the
2850 # If we call the wire protocol command, that's good enough to record the
2848 # attempt.
2851 # attempt.
2849 pullop.clonebundleattempted = True
2852 pullop.clonebundleattempted = True
2850
2853
2851 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2854 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2852 if not entries:
2855 if not entries:
2853 repo.ui.note(
2856 repo.ui.note(
2854 _(
2857 _(
2855 b'no clone bundles available on remote; '
2858 b'no clone bundles available on remote; '
2856 b'falling back to regular clone\n'
2859 b'falling back to regular clone\n'
2857 )
2860 )
2858 )
2861 )
2859 return
2862 return
2860
2863
2861 entries = bundlecaches.filterclonebundleentries(
2864 entries = bundlecaches.filterclonebundleentries(
2862 repo, entries, streamclonerequested=pullop.streamclonerequested
2865 repo, entries, streamclonerequested=pullop.streamclonerequested
2863 )
2866 )
2864
2867
2865 if not entries:
2868 if not entries:
2866 # There is a thundering herd concern here. However, if a server
2869 # There is a thundering herd concern here. However, if a server
2867 # operator doesn't advertise bundles appropriate for its clients,
2870 # operator doesn't advertise bundles appropriate for its clients,
2868 # they deserve what's coming. Furthermore, from a client's
2871 # they deserve what's coming. Furthermore, from a client's
2869 # perspective, no automatic fallback would mean not being able to
2872 # perspective, no automatic fallback would mean not being able to
2870 # clone!
2873 # clone!
2871 repo.ui.warn(
2874 repo.ui.warn(
2872 _(
2875 _(
2873 b'no compatible clone bundles available on server; '
2876 b'no compatible clone bundles available on server; '
2874 b'falling back to regular clone\n'
2877 b'falling back to regular clone\n'
2875 )
2878 )
2876 )
2879 )
2877 repo.ui.warn(
2880 repo.ui.warn(
2878 _(b'(you may want to report this to the server operator)\n')
2881 _(b'(you may want to report this to the server operator)\n')
2879 )
2882 )
2880 return
2883 return
2881
2884
2882 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2885 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2883
2886
2884 url = entries[0][b'URL']
2887 url = entries[0][b'URL']
2885 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2888 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2886 if trypullbundlefromurl(repo.ui, repo, url, remote):
2889 if trypullbundlefromurl(repo.ui, repo, url, remote):
2887 repo.ui.status(_(b'finished applying clone bundle\n'))
2890 repo.ui.status(_(b'finished applying clone bundle\n'))
2888 # Bundle failed.
2891 # Bundle failed.
2889 #
2892 #
2890 # We abort by default to avoid the thundering herd of
2893 # We abort by default to avoid the thundering herd of
2891 # clients flooding a server that was expecting expensive
2894 # clients flooding a server that was expecting expensive
2892 # clone load to be offloaded.
2895 # clone load to be offloaded.
2893 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2896 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2894 repo.ui.warn(_(b'falling back to normal clone\n'))
2897 repo.ui.warn(_(b'falling back to normal clone\n'))
2895 else:
2898 else:
2896 raise error.Abort(
2899 raise error.Abort(
2897 _(b'error applying bundle'),
2900 _(b'error applying bundle'),
2898 hint=_(
2901 hint=_(
2899 b'if this error persists, consider contacting '
2902 b'if this error persists, consider contacting '
2900 b'the server operator or disable clone '
2903 b'the server operator or disable clone '
2901 b'bundles via '
2904 b'bundles via '
2902 b'"--config ui.clonebundles=false"'
2905 b'"--config ui.clonebundles=false"'
2903 ),
2906 ),
2904 )
2907 )
2905
2908
2906
2909
2907 def inline_clone_bundle_open(ui, url, peer):
2910 def inline_clone_bundle_open(ui, url, peer):
2908 if not peer:
2911 if not peer:
2909 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2912 raise error.Abort(_(b'no remote repository supplied for %s' % url))
2910 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2913 clonebundleid = url[len(bundlecaches.CLONEBUNDLESCHEME) :]
2911 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2914 peerclonebundle = peer.get_cached_bundle_inline(clonebundleid)
2912 return util.chunkbuffer(peerclonebundle)
2915 return util.chunkbuffer(peerclonebundle)
2913
2916
2914
2917
2915 def trypullbundlefromurl(ui, repo, url, peer):
2918 def trypullbundlefromurl(ui, repo, url, peer):
2916 """Attempt to apply a bundle from a URL."""
2919 """Attempt to apply a bundle from a URL."""
2917 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2920 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2918 try:
2921 try:
2919 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2922 if url.startswith(bundlecaches.CLONEBUNDLESCHEME):
2920 fh = inline_clone_bundle_open(ui, url, peer)
2923 fh = inline_clone_bundle_open(ui, url, peer)
2921 else:
2924 else:
2922 fh = urlmod.open(ui, url)
2925 fh = urlmod.open(ui, url)
2923 cg = readbundle(ui, fh, b'stream')
2926 cg = readbundle(ui, fh, b'stream')
2924
2927
2925 if isinstance(cg, streamclone.streamcloneapplier):
2928 if isinstance(cg, streamclone.streamcloneapplier):
2926 cg.apply(repo)
2929 cg.apply(repo)
2927 else:
2930 else:
2928 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2931 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2929 return True
2932 return True
2930 except urlerr.httperror as e:
2933 except urlerr.httperror as e:
2931 ui.warn(
2934 ui.warn(
2932 _(b'HTTP error fetching bundle: %s\n')
2935 _(b'HTTP error fetching bundle: %s\n')
2933 % stringutil.forcebytestr(e)
2936 % stringutil.forcebytestr(e)
2934 )
2937 )
2935 except urlerr.urlerror as e:
2938 except urlerr.urlerror as e:
2936 ui.warn(
2939 ui.warn(
2937 _(b'error fetching bundle: %s\n')
2940 _(b'error fetching bundle: %s\n')
2938 % stringutil.forcebytestr(e.reason)
2941 % stringutil.forcebytestr(e.reason)
2939 )
2942 )
2940
2943
2941 return False
2944 return False
General Comments 0
You need to be logged in to leave comments. Login now