##// END OF EJS Templates
bundlespec: fix the generation of bundlespec for `cg.version`...
marmoute -
r50229:6d15a897 default
parent child Browse files
Show More
@@ -1,2836 +1,2853
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import weakref
10 import weakref
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullrev,
15 nullrev,
16 )
16 )
17 from . import (
17 from . import (
18 bookmarks as bookmod,
18 bookmarks as bookmod,
19 bundle2,
19 bundle2,
20 bundlecaches,
20 bundlecaches,
21 changegroup,
21 changegroup,
22 discovery,
22 discovery,
23 error,
23 error,
24 lock as lockmod,
24 lock as lockmod,
25 logexchange,
25 logexchange,
26 narrowspec,
26 narrowspec,
27 obsolete,
27 obsolete,
28 obsutil,
28 obsutil,
29 phases,
29 phases,
30 pushkey,
30 pushkey,
31 pycompat,
31 pycompat,
32 requirements,
32 requirements,
33 scmutil,
33 scmutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 wireprototypes,
37 wireprototypes,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 hashutil,
40 hashutil,
41 stringutil,
41 stringutil,
42 urlutil,
42 urlutil,
43 )
43 )
44 from .interfaces import repository
44 from .interfaces import repository
45
45
46 urlerr = util.urlerr
46 urlerr = util.urlerr
47 urlreq = util.urlreq
47 urlreq = util.urlreq
48
48
49 _NARROWACL_SECTION = b'narrowacl'
49 _NARROWACL_SECTION = b'narrowacl'
50
50
51
51
52 def readbundle(ui, fh, fname, vfs=None):
52 def readbundle(ui, fh, fname, vfs=None):
53 header = changegroup.readexactly(fh, 4)
53 header = changegroup.readexactly(fh, 4)
54
54
55 alg = None
55 alg = None
56 if not fname:
56 if not fname:
57 fname = b"stream"
57 fname = b"stream"
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
58 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 fh = changegroup.headerlessfixup(fh, header)
59 fh = changegroup.headerlessfixup(fh, header)
60 header = b"HG10"
60 header = b"HG10"
61 alg = b'UN'
61 alg = b'UN'
62 elif vfs:
62 elif vfs:
63 fname = vfs.join(fname)
63 fname = vfs.join(fname)
64
64
65 magic, version = header[0:2], header[2:4]
65 magic, version = header[0:2], header[2:4]
66
66
67 if magic != b'HG':
67 if magic != b'HG':
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
68 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 if version == b'10':
69 if version == b'10':
70 if alg is None:
70 if alg is None:
71 alg = changegroup.readexactly(fh, 2)
71 alg = changegroup.readexactly(fh, 2)
72 return changegroup.cg1unpacker(fh, alg)
72 return changegroup.cg1unpacker(fh, alg)
73 elif version.startswith(b'2'):
73 elif version.startswith(b'2'):
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
74 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 elif version == b'S1':
75 elif version == b'S1':
76 return streamclone.streamcloneapplier(fh)
76 return streamclone.streamcloneapplier(fh)
77 else:
77 else:
78 raise error.Abort(
78 raise error.Abort(
79 _(b'%s: unknown bundle version %s') % (fname, version)
79 _(b'%s: unknown bundle version %s') % (fname, version)
80 )
80 )
81
81
82
82
83 def _format_params(params):
84 parts = []
85 for key, value in sorted(params.items()):
86 value = urlreq.quote(value)
87 parts.append(b"%s=%s" % (key, value))
88 return b';'.join(parts)
89
90
83 def getbundlespec(ui, fh):
91 def getbundlespec(ui, fh):
84 """Infer the bundlespec from a bundle file handle.
92 """Infer the bundlespec from a bundle file handle.
85
93
86 The input file handle is seeked and the original seek position is not
94 The input file handle is seeked and the original seek position is not
87 restored.
95 restored.
88 """
96 """
89
97
90 def speccompression(alg):
98 def speccompression(alg):
91 try:
99 try:
92 return util.compengines.forbundletype(alg).bundletype()[0]
100 return util.compengines.forbundletype(alg).bundletype()[0]
93 except KeyError:
101 except KeyError:
94 return None
102 return None
95
103
104 params = {}
105
96 b = readbundle(ui, fh, None)
106 b = readbundle(ui, fh, None)
97 if isinstance(b, changegroup.cg1unpacker):
107 if isinstance(b, changegroup.cg1unpacker):
98 alg = b._type
108 alg = b._type
99 if alg == b'_truncatedBZ':
109 if alg == b'_truncatedBZ':
100 alg = b'BZ'
110 alg = b'BZ'
101 comp = speccompression(alg)
111 comp = speccompression(alg)
102 if not comp:
112 if not comp:
103 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
113 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
104 return b'%s-v1' % comp
114 return b'%s-v1' % comp
105 elif isinstance(b, bundle2.unbundle20):
115 elif isinstance(b, bundle2.unbundle20):
106 if b'Compression' in b.params:
116 if b'Compression' in b.params:
107 comp = speccompression(b.params[b'Compression'])
117 comp = speccompression(b.params[b'Compression'])
108 if not comp:
118 if not comp:
109 raise error.Abort(
119 raise error.Abort(
110 _(b'unknown compression algorithm: %s') % comp
120 _(b'unknown compression algorithm: %s') % comp
111 )
121 )
112 else:
122 else:
113 comp = b'none'
123 comp = b'none'
114
124
115 version = None
125 version = None
116 for part in b.iterparts():
126 for part in b.iterparts():
117 if part.type == b'changegroup':
127 if part.type == b'changegroup':
118 version = part.params[b'version']
128 cgversion = part.params[b'version']
119 if version in (b'01', b'02'):
129 if cgversion in (b'01', b'02'):
120 version = b'v2'
130 version = b'v2'
131 elif cgversion in (b'03',):
132 version = b'v2'
133 params[b'cg.version'] = cgversion
121 else:
134 else:
122 raise error.Abort(
135 raise error.Abort(
123 _(
136 _(
124 b'changegroup version %s does not have '
137 b'changegroup version %s does not have '
125 b'a known bundlespec'
138 b'a known bundlespec'
126 )
139 )
127 % version,
140 % version,
128 hint=_(b'try upgrading your Mercurial client'),
141 hint=_(b'try upgrading your Mercurial client'),
129 )
142 )
130 elif part.type == b'stream2' and version is None:
143 elif part.type == b'stream2' and version is None:
131 # A stream2 part requires to be part of a v2 bundle
144 # A stream2 part requires to be part of a v2 bundle
132 requirements = urlreq.unquote(part.params[b'requirements'])
145 requirements = urlreq.unquote(part.params[b'requirements'])
133 splitted = requirements.split()
146 splitted = requirements.split()
134 params = bundle2._formatrequirementsparams(splitted)
147 params = bundle2._formatrequirementsparams(splitted)
135 return b'none-v2;stream=v2;%s' % params
148 return b'none-v2;stream=v2;%s' % params
136
149
137 if not version:
150 if not version:
138 raise error.Abort(
151 raise error.Abort(
139 _(b'could not identify changegroup version in bundle')
152 _(b'could not identify changegroup version in bundle')
140 )
153 )
141
154 spec = b'%s-%s' % (comp, version)
142 return b'%s-%s' % (comp, version)
155 if params:
156 spec += b';'
157 spec += _format_params(params)
158 return spec
159
143 elif isinstance(b, streamclone.streamcloneapplier):
160 elif isinstance(b, streamclone.streamcloneapplier):
144 requirements = streamclone.readbundle1header(fh)[2]
161 requirements = streamclone.readbundle1header(fh)[2]
145 formatted = bundle2._formatrequirementsparams(requirements)
162 formatted = bundle2._formatrequirementsparams(requirements)
146 return b'none-packed1;%s' % formatted
163 return b'none-packed1;%s' % formatted
147 else:
164 else:
148 raise error.Abort(_(b'unknown bundle type: %s') % b)
165 raise error.Abort(_(b'unknown bundle type: %s') % b)
149
166
150
167
151 def _computeoutgoing(repo, heads, common):
168 def _computeoutgoing(repo, heads, common):
152 """Computes which revs are outgoing given a set of common
169 """Computes which revs are outgoing given a set of common
153 and a set of heads.
170 and a set of heads.
154
171
155 This is a separate function so extensions can have access to
172 This is a separate function so extensions can have access to
156 the logic.
173 the logic.
157
174
158 Returns a discovery.outgoing object.
175 Returns a discovery.outgoing object.
159 """
176 """
160 cl = repo.changelog
177 cl = repo.changelog
161 if common:
178 if common:
162 hasnode = cl.hasnode
179 hasnode = cl.hasnode
163 common = [n for n in common if hasnode(n)]
180 common = [n for n in common if hasnode(n)]
164 else:
181 else:
165 common = [repo.nullid]
182 common = [repo.nullid]
166 if not heads:
183 if not heads:
167 heads = cl.heads()
184 heads = cl.heads()
168 return discovery.outgoing(repo, common, heads)
185 return discovery.outgoing(repo, common, heads)
169
186
170
187
171 def _checkpublish(pushop):
188 def _checkpublish(pushop):
172 repo = pushop.repo
189 repo = pushop.repo
173 ui = repo.ui
190 ui = repo.ui
174 behavior = ui.config(b'experimental', b'auto-publish')
191 behavior = ui.config(b'experimental', b'auto-publish')
175 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
192 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
176 return
193 return
177 remotephases = listkeys(pushop.remote, b'phases')
194 remotephases = listkeys(pushop.remote, b'phases')
178 if not remotephases.get(b'publishing', False):
195 if not remotephases.get(b'publishing', False):
179 return
196 return
180
197
181 if pushop.revs is None:
198 if pushop.revs is None:
182 published = repo.filtered(b'served').revs(b'not public()')
199 published = repo.filtered(b'served').revs(b'not public()')
183 else:
200 else:
184 published = repo.revs(b'::%ln - public()', pushop.revs)
201 published = repo.revs(b'::%ln - public()', pushop.revs)
185 # we want to use pushop.revs in the revset even if they themselves are
202 # we want to use pushop.revs in the revset even if they themselves are
186 # secret, but we don't want to have anything that the server won't see
203 # secret, but we don't want to have anything that the server won't see
187 # in the result of this expression
204 # in the result of this expression
188 published &= repo.filtered(b'served')
205 published &= repo.filtered(b'served')
189 if published:
206 if published:
190 if behavior == b'warn':
207 if behavior == b'warn':
191 ui.warn(
208 ui.warn(
192 _(b'%i changesets about to be published\n') % len(published)
209 _(b'%i changesets about to be published\n') % len(published)
193 )
210 )
194 elif behavior == b'confirm':
211 elif behavior == b'confirm':
195 if ui.promptchoice(
212 if ui.promptchoice(
196 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
213 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
197 % len(published)
214 % len(published)
198 ):
215 ):
199 raise error.CanceledError(_(b'user quit'))
216 raise error.CanceledError(_(b'user quit'))
200 elif behavior == b'abort':
217 elif behavior == b'abort':
201 msg = _(b'push would publish %i changesets') % len(published)
218 msg = _(b'push would publish %i changesets') % len(published)
202 hint = _(
219 hint = _(
203 b"use --publish or adjust 'experimental.auto-publish'"
220 b"use --publish or adjust 'experimental.auto-publish'"
204 b" config"
221 b" config"
205 )
222 )
206 raise error.Abort(msg, hint=hint)
223 raise error.Abort(msg, hint=hint)
207
224
208
225
209 def _forcebundle1(op):
226 def _forcebundle1(op):
210 """return true if a pull/push must use bundle1
227 """return true if a pull/push must use bundle1
211
228
212 This function is used to allow testing of the older bundle version"""
229 This function is used to allow testing of the older bundle version"""
213 ui = op.repo.ui
230 ui = op.repo.ui
214 # The goal is this config is to allow developer to choose the bundle
231 # The goal is this config is to allow developer to choose the bundle
215 # version used during exchanged. This is especially handy during test.
232 # version used during exchanged. This is especially handy during test.
216 # Value is a list of bundle version to be picked from, highest version
233 # Value is a list of bundle version to be picked from, highest version
217 # should be used.
234 # should be used.
218 #
235 #
219 # developer config: devel.legacy.exchange
236 # developer config: devel.legacy.exchange
220 exchange = ui.configlist(b'devel', b'legacy.exchange')
237 exchange = ui.configlist(b'devel', b'legacy.exchange')
221 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
238 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
222 return forcebundle1 or not op.remote.capable(b'bundle2')
239 return forcebundle1 or not op.remote.capable(b'bundle2')
223
240
224
241
225 class pushoperation:
242 class pushoperation:
226 """A object that represent a single push operation
243 """A object that represent a single push operation
227
244
228 Its purpose is to carry push related state and very common operations.
245 Its purpose is to carry push related state and very common operations.
229
246
230 A new pushoperation should be created at the beginning of each push and
247 A new pushoperation should be created at the beginning of each push and
231 discarded afterward.
248 discarded afterward.
232 """
249 """
233
250
234 def __init__(
251 def __init__(
235 self,
252 self,
236 repo,
253 repo,
237 remote,
254 remote,
238 force=False,
255 force=False,
239 revs=None,
256 revs=None,
240 newbranch=False,
257 newbranch=False,
241 bookmarks=(),
258 bookmarks=(),
242 publish=False,
259 publish=False,
243 pushvars=None,
260 pushvars=None,
244 ):
261 ):
245 # repo we push from
262 # repo we push from
246 self.repo = repo
263 self.repo = repo
247 self.ui = repo.ui
264 self.ui = repo.ui
248 # repo we push to
265 # repo we push to
249 self.remote = remote
266 self.remote = remote
250 # force option provided
267 # force option provided
251 self.force = force
268 self.force = force
252 # revs to be pushed (None is "all")
269 # revs to be pushed (None is "all")
253 self.revs = revs
270 self.revs = revs
254 # bookmark explicitly pushed
271 # bookmark explicitly pushed
255 self.bookmarks = bookmarks
272 self.bookmarks = bookmarks
256 # allow push of new branch
273 # allow push of new branch
257 self.newbranch = newbranch
274 self.newbranch = newbranch
258 # step already performed
275 # step already performed
259 # (used to check what steps have been already performed through bundle2)
276 # (used to check what steps have been already performed through bundle2)
260 self.stepsdone = set()
277 self.stepsdone = set()
261 # Integer version of the changegroup push result
278 # Integer version of the changegroup push result
262 # - None means nothing to push
279 # - None means nothing to push
263 # - 0 means HTTP error
280 # - 0 means HTTP error
264 # - 1 means we pushed and remote head count is unchanged *or*
281 # - 1 means we pushed and remote head count is unchanged *or*
265 # we have outgoing changesets but refused to push
282 # we have outgoing changesets but refused to push
266 # - other values as described by addchangegroup()
283 # - other values as described by addchangegroup()
267 self.cgresult = None
284 self.cgresult = None
268 # Boolean value for the bookmark push
285 # Boolean value for the bookmark push
269 self.bkresult = None
286 self.bkresult = None
270 # discover.outgoing object (contains common and outgoing data)
287 # discover.outgoing object (contains common and outgoing data)
271 self.outgoing = None
288 self.outgoing = None
272 # all remote topological heads before the push
289 # all remote topological heads before the push
273 self.remoteheads = None
290 self.remoteheads = None
274 # Details of the remote branch pre and post push
291 # Details of the remote branch pre and post push
275 #
292 #
276 # mapping: {'branch': ([remoteheads],
293 # mapping: {'branch': ([remoteheads],
277 # [newheads],
294 # [newheads],
278 # [unsyncedheads],
295 # [unsyncedheads],
279 # [discardedheads])}
296 # [discardedheads])}
280 # - branch: the branch name
297 # - branch: the branch name
281 # - remoteheads: the list of remote heads known locally
298 # - remoteheads: the list of remote heads known locally
282 # None if the branch is new
299 # None if the branch is new
283 # - newheads: the new remote heads (known locally) with outgoing pushed
300 # - newheads: the new remote heads (known locally) with outgoing pushed
284 # - unsyncedheads: the list of remote heads unknown locally.
301 # - unsyncedheads: the list of remote heads unknown locally.
285 # - discardedheads: the list of remote heads made obsolete by the push
302 # - discardedheads: the list of remote heads made obsolete by the push
286 self.pushbranchmap = None
303 self.pushbranchmap = None
287 # testable as a boolean indicating if any nodes are missing locally.
304 # testable as a boolean indicating if any nodes are missing locally.
288 self.incoming = None
305 self.incoming = None
289 # summary of the remote phase situation
306 # summary of the remote phase situation
290 self.remotephases = None
307 self.remotephases = None
291 # phases changes that must be pushed along side the changesets
308 # phases changes that must be pushed along side the changesets
292 self.outdatedphases = None
309 self.outdatedphases = None
293 # phases changes that must be pushed if changeset push fails
310 # phases changes that must be pushed if changeset push fails
294 self.fallbackoutdatedphases = None
311 self.fallbackoutdatedphases = None
295 # outgoing obsmarkers
312 # outgoing obsmarkers
296 self.outobsmarkers = set()
313 self.outobsmarkers = set()
297 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
314 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
298 self.outbookmarks = []
315 self.outbookmarks = []
299 # transaction manager
316 # transaction manager
300 self.trmanager = None
317 self.trmanager = None
301 # map { pushkey partid -> callback handling failure}
318 # map { pushkey partid -> callback handling failure}
302 # used to handle exception from mandatory pushkey part failure
319 # used to handle exception from mandatory pushkey part failure
303 self.pkfailcb = {}
320 self.pkfailcb = {}
304 # an iterable of pushvars or None
321 # an iterable of pushvars or None
305 self.pushvars = pushvars
322 self.pushvars = pushvars
306 # publish pushed changesets
323 # publish pushed changesets
307 self.publish = publish
324 self.publish = publish
308
325
309 @util.propertycache
326 @util.propertycache
310 def futureheads(self):
327 def futureheads(self):
311 """future remote heads if the changeset push succeeds"""
328 """future remote heads if the changeset push succeeds"""
312 return self.outgoing.ancestorsof
329 return self.outgoing.ancestorsof
313
330
314 @util.propertycache
331 @util.propertycache
315 def fallbackheads(self):
332 def fallbackheads(self):
316 """future remote heads if the changeset push fails"""
333 """future remote heads if the changeset push fails"""
317 if self.revs is None:
334 if self.revs is None:
318 # not target to push, all common are relevant
335 # not target to push, all common are relevant
319 return self.outgoing.commonheads
336 return self.outgoing.commonheads
320 unfi = self.repo.unfiltered()
337 unfi = self.repo.unfiltered()
321 # I want cheads = heads(::ancestorsof and ::commonheads)
338 # I want cheads = heads(::ancestorsof and ::commonheads)
322 # (ancestorsof is revs with secret changeset filtered out)
339 # (ancestorsof is revs with secret changeset filtered out)
323 #
340 #
324 # This can be expressed as:
341 # This can be expressed as:
325 # cheads = ( (ancestorsof and ::commonheads)
342 # cheads = ( (ancestorsof and ::commonheads)
326 # + (commonheads and ::ancestorsof))"
343 # + (commonheads and ::ancestorsof))"
327 # )
344 # )
328 #
345 #
329 # while trying to push we already computed the following:
346 # while trying to push we already computed the following:
330 # common = (::commonheads)
347 # common = (::commonheads)
331 # missing = ((commonheads::ancestorsof) - commonheads)
348 # missing = ((commonheads::ancestorsof) - commonheads)
332 #
349 #
333 # We can pick:
350 # We can pick:
334 # * ancestorsof part of common (::commonheads)
351 # * ancestorsof part of common (::commonheads)
335 common = self.outgoing.common
352 common = self.outgoing.common
336 rev = self.repo.changelog.index.rev
353 rev = self.repo.changelog.index.rev
337 cheads = [node for node in self.revs if rev(node) in common]
354 cheads = [node for node in self.revs if rev(node) in common]
338 # and
355 # and
339 # * commonheads parents on missing
356 # * commonheads parents on missing
340 revset = unfi.set(
357 revset = unfi.set(
341 b'%ln and parents(roots(%ln))',
358 b'%ln and parents(roots(%ln))',
342 self.outgoing.commonheads,
359 self.outgoing.commonheads,
343 self.outgoing.missing,
360 self.outgoing.missing,
344 )
361 )
345 cheads.extend(c.node() for c in revset)
362 cheads.extend(c.node() for c in revset)
346 return cheads
363 return cheads
347
364
348 @property
365 @property
349 def commonheads(self):
366 def commonheads(self):
350 """set of all common heads after changeset bundle push"""
367 """set of all common heads after changeset bundle push"""
351 if self.cgresult:
368 if self.cgresult:
352 return self.futureheads
369 return self.futureheads
353 else:
370 else:
354 return self.fallbackheads
371 return self.fallbackheads
355
372
356
373
357 # mapping of message used when pushing bookmark
374 # mapping of message used when pushing bookmark
358 bookmsgmap = {
375 bookmsgmap = {
359 b'update': (
376 b'update': (
360 _(b"updating bookmark %s\n"),
377 _(b"updating bookmark %s\n"),
361 _(b'updating bookmark %s failed\n'),
378 _(b'updating bookmark %s failed\n'),
362 ),
379 ),
363 b'export': (
380 b'export': (
364 _(b"exporting bookmark %s\n"),
381 _(b"exporting bookmark %s\n"),
365 _(b'exporting bookmark %s failed\n'),
382 _(b'exporting bookmark %s failed\n'),
366 ),
383 ),
367 b'delete': (
384 b'delete': (
368 _(b"deleting remote bookmark %s\n"),
385 _(b"deleting remote bookmark %s\n"),
369 _(b'deleting remote bookmark %s failed\n'),
386 _(b'deleting remote bookmark %s failed\n'),
370 ),
387 ),
371 }
388 }
372
389
373
390
374 def push(
391 def push(
375 repo,
392 repo,
376 remote,
393 remote,
377 force=False,
394 force=False,
378 revs=None,
395 revs=None,
379 newbranch=False,
396 newbranch=False,
380 bookmarks=(),
397 bookmarks=(),
381 publish=False,
398 publish=False,
382 opargs=None,
399 opargs=None,
383 ):
400 ):
384 """Push outgoing changesets (limited by revs) from a local
401 """Push outgoing changesets (limited by revs) from a local
385 repository to remote. Return an integer:
402 repository to remote. Return an integer:
386 - None means nothing to push
403 - None means nothing to push
387 - 0 means HTTP error
404 - 0 means HTTP error
388 - 1 means we pushed and remote head count is unchanged *or*
405 - 1 means we pushed and remote head count is unchanged *or*
389 we have outgoing changesets but refused to push
406 we have outgoing changesets but refused to push
390 - other values as described by addchangegroup()
407 - other values as described by addchangegroup()
391 """
408 """
392 if opargs is None:
409 if opargs is None:
393 opargs = {}
410 opargs = {}
394 pushop = pushoperation(
411 pushop = pushoperation(
395 repo,
412 repo,
396 remote,
413 remote,
397 force,
414 force,
398 revs,
415 revs,
399 newbranch,
416 newbranch,
400 bookmarks,
417 bookmarks,
401 publish,
418 publish,
402 **pycompat.strkwargs(opargs)
419 **pycompat.strkwargs(opargs)
403 )
420 )
404 if pushop.remote.local():
421 if pushop.remote.local():
405 missing = (
422 missing = (
406 set(pushop.repo.requirements) - pushop.remote.local().supported
423 set(pushop.repo.requirements) - pushop.remote.local().supported
407 )
424 )
408 if missing:
425 if missing:
409 msg = _(
426 msg = _(
410 b"required features are not"
427 b"required features are not"
411 b" supported in the destination:"
428 b" supported in the destination:"
412 b" %s"
429 b" %s"
413 ) % (b', '.join(sorted(missing)))
430 ) % (b', '.join(sorted(missing)))
414 raise error.Abort(msg)
431 raise error.Abort(msg)
415
432
416 if not pushop.remote.canpush():
433 if not pushop.remote.canpush():
417 raise error.Abort(_(b"destination does not support push"))
434 raise error.Abort(_(b"destination does not support push"))
418
435
419 if not pushop.remote.capable(b'unbundle'):
436 if not pushop.remote.capable(b'unbundle'):
420 raise error.Abort(
437 raise error.Abort(
421 _(
438 _(
422 b'cannot push: destination does not support the '
439 b'cannot push: destination does not support the '
423 b'unbundle wire protocol command'
440 b'unbundle wire protocol command'
424 )
441 )
425 )
442 )
426 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
443 for category in sorted(bundle2.read_remote_wanted_sidedata(pushop.remote)):
427 # Check that a computer is registered for that category for at least
444 # Check that a computer is registered for that category for at least
428 # one revlog kind.
445 # one revlog kind.
429 for kind, computers in repo._sidedata_computers.items():
446 for kind, computers in repo._sidedata_computers.items():
430 if computers.get(category):
447 if computers.get(category):
431 break
448 break
432 else:
449 else:
433 raise error.Abort(
450 raise error.Abort(
434 _(
451 _(
435 b'cannot push: required sidedata category not supported'
452 b'cannot push: required sidedata category not supported'
436 b" by this client: '%s'"
453 b" by this client: '%s'"
437 )
454 )
438 % pycompat.bytestr(category)
455 % pycompat.bytestr(category)
439 )
456 )
440 # get lock as we might write phase data
457 # get lock as we might write phase data
441 wlock = lock = None
458 wlock = lock = None
442 try:
459 try:
443 # bundle2 push may receive a reply bundle touching bookmarks
460 # bundle2 push may receive a reply bundle touching bookmarks
444 # requiring the wlock. Take it now to ensure proper ordering.
461 # requiring the wlock. Take it now to ensure proper ordering.
445 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
462 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
446 if (
463 if (
447 (not _forcebundle1(pushop))
464 (not _forcebundle1(pushop))
448 and maypushback
465 and maypushback
449 and not bookmod.bookmarksinstore(repo)
466 and not bookmod.bookmarksinstore(repo)
450 ):
467 ):
451 wlock = pushop.repo.wlock()
468 wlock = pushop.repo.wlock()
452 lock = pushop.repo.lock()
469 lock = pushop.repo.lock()
453 pushop.trmanager = transactionmanager(
470 pushop.trmanager = transactionmanager(
454 pushop.repo, b'push-response', pushop.remote.url()
471 pushop.repo, b'push-response', pushop.remote.url()
455 )
472 )
456 except error.LockUnavailable as err:
473 except error.LockUnavailable as err:
457 # source repo cannot be locked.
474 # source repo cannot be locked.
458 # We do not abort the push, but just disable the local phase
475 # We do not abort the push, but just disable the local phase
459 # synchronisation.
476 # synchronisation.
460 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
477 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
461 err
478 err
462 )
479 )
463 pushop.ui.debug(msg)
480 pushop.ui.debug(msg)
464
481
465 with wlock or util.nullcontextmanager():
482 with wlock or util.nullcontextmanager():
466 with lock or util.nullcontextmanager():
483 with lock or util.nullcontextmanager():
467 with pushop.trmanager or util.nullcontextmanager():
484 with pushop.trmanager or util.nullcontextmanager():
468 pushop.repo.checkpush(pushop)
485 pushop.repo.checkpush(pushop)
469 _checkpublish(pushop)
486 _checkpublish(pushop)
470 _pushdiscovery(pushop)
487 _pushdiscovery(pushop)
471 if not pushop.force:
488 if not pushop.force:
472 _checksubrepostate(pushop)
489 _checksubrepostate(pushop)
473 if not _forcebundle1(pushop):
490 if not _forcebundle1(pushop):
474 _pushbundle2(pushop)
491 _pushbundle2(pushop)
475 _pushchangeset(pushop)
492 _pushchangeset(pushop)
476 _pushsyncphase(pushop)
493 _pushsyncphase(pushop)
477 _pushobsolete(pushop)
494 _pushobsolete(pushop)
478 _pushbookmark(pushop)
495 _pushbookmark(pushop)
479
496
480 if repo.ui.configbool(b'experimental', b'remotenames'):
497 if repo.ui.configbool(b'experimental', b'remotenames'):
481 logexchange.pullremotenames(repo, remote)
498 logexchange.pullremotenames(repo, remote)
482
499
483 return pushop
500 return pushop
484
501
485
502
486 # list of steps to perform discovery before push
503 # list of steps to perform discovery before push
487 pushdiscoveryorder = []
504 pushdiscoveryorder = []
488
505
489 # Mapping between step name and function
506 # Mapping between step name and function
490 #
507 #
491 # This exists to help extensions wrap steps if necessary
508 # This exists to help extensions wrap steps if necessary
492 pushdiscoverymapping = {}
509 pushdiscoverymapping = {}
493
510
494
511
495 def pushdiscovery(stepname):
512 def pushdiscovery(stepname):
496 """decorator for function performing discovery before push
513 """decorator for function performing discovery before push
497
514
498 The function is added to the step -> function mapping and appended to the
515 The function is added to the step -> function mapping and appended to the
499 list of steps. Beware that decorated function will be added in order (this
516 list of steps. Beware that decorated function will be added in order (this
500 may matter).
517 may matter).
501
518
502 You can only use this decorator for a new step, if you want to wrap a step
519 You can only use this decorator for a new step, if you want to wrap a step
503 from an extension, change the pushdiscovery dictionary directly."""
520 from an extension, change the pushdiscovery dictionary directly."""
504
521
505 def dec(func):
522 def dec(func):
506 assert stepname not in pushdiscoverymapping
523 assert stepname not in pushdiscoverymapping
507 pushdiscoverymapping[stepname] = func
524 pushdiscoverymapping[stepname] = func
508 pushdiscoveryorder.append(stepname)
525 pushdiscoveryorder.append(stepname)
509 return func
526 return func
510
527
511 return dec
528 return dec
512
529
513
530
514 def _pushdiscovery(pushop):
531 def _pushdiscovery(pushop):
515 """Run all discovery steps"""
532 """Run all discovery steps"""
516 for stepname in pushdiscoveryorder:
533 for stepname in pushdiscoveryorder:
517 step = pushdiscoverymapping[stepname]
534 step = pushdiscoverymapping[stepname]
518 step(pushop)
535 step(pushop)
519
536
520
537
521 def _checksubrepostate(pushop):
538 def _checksubrepostate(pushop):
522 """Ensure all outgoing referenced subrepo revisions are present locally"""
539 """Ensure all outgoing referenced subrepo revisions are present locally"""
523
540
524 repo = pushop.repo
541 repo = pushop.repo
525
542
526 # If the repository does not use subrepos, skip the expensive
543 # If the repository does not use subrepos, skip the expensive
527 # manifest checks.
544 # manifest checks.
528 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
545 if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')):
529 return
546 return
530
547
531 for n in pushop.outgoing.missing:
548 for n in pushop.outgoing.missing:
532 ctx = repo[n]
549 ctx = repo[n]
533
550
534 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
551 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
535 for subpath in sorted(ctx.substate):
552 for subpath in sorted(ctx.substate):
536 sub = ctx.sub(subpath)
553 sub = ctx.sub(subpath)
537 sub.verify(onpush=True)
554 sub.verify(onpush=True)
538
555
539
556
540 @pushdiscovery(b'changeset')
557 @pushdiscovery(b'changeset')
541 def _pushdiscoverychangeset(pushop):
558 def _pushdiscoverychangeset(pushop):
542 """discover the changeset that need to be pushed"""
559 """discover the changeset that need to be pushed"""
543 fci = discovery.findcommonincoming
560 fci = discovery.findcommonincoming
544 if pushop.revs:
561 if pushop.revs:
545 commoninc = fci(
562 commoninc = fci(
546 pushop.repo,
563 pushop.repo,
547 pushop.remote,
564 pushop.remote,
548 force=pushop.force,
565 force=pushop.force,
549 ancestorsof=pushop.revs,
566 ancestorsof=pushop.revs,
550 )
567 )
551 else:
568 else:
552 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
569 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
553 common, inc, remoteheads = commoninc
570 common, inc, remoteheads = commoninc
554 fco = discovery.findcommonoutgoing
571 fco = discovery.findcommonoutgoing
555 outgoing = fco(
572 outgoing = fco(
556 pushop.repo,
573 pushop.repo,
557 pushop.remote,
574 pushop.remote,
558 onlyheads=pushop.revs,
575 onlyheads=pushop.revs,
559 commoninc=commoninc,
576 commoninc=commoninc,
560 force=pushop.force,
577 force=pushop.force,
561 )
578 )
562 pushop.outgoing = outgoing
579 pushop.outgoing = outgoing
563 pushop.remoteheads = remoteheads
580 pushop.remoteheads = remoteheads
564 pushop.incoming = inc
581 pushop.incoming = inc
565
582
566
583
567 @pushdiscovery(b'phase')
584 @pushdiscovery(b'phase')
568 def _pushdiscoveryphase(pushop):
585 def _pushdiscoveryphase(pushop):
569 """discover the phase that needs to be pushed
586 """discover the phase that needs to be pushed
570
587
571 (computed for both success and failure case for changesets push)"""
588 (computed for both success and failure case for changesets push)"""
572 outgoing = pushop.outgoing
589 outgoing = pushop.outgoing
573 unfi = pushop.repo.unfiltered()
590 unfi = pushop.repo.unfiltered()
574 remotephases = listkeys(pushop.remote, b'phases')
591 remotephases = listkeys(pushop.remote, b'phases')
575
592
576 if (
593 if (
577 pushop.ui.configbool(b'ui', b'_usedassubrepo')
594 pushop.ui.configbool(b'ui', b'_usedassubrepo')
578 and remotephases # server supports phases
595 and remotephases # server supports phases
579 and not pushop.outgoing.missing # no changesets to be pushed
596 and not pushop.outgoing.missing # no changesets to be pushed
580 and remotephases.get(b'publishing', False)
597 and remotephases.get(b'publishing', False)
581 ):
598 ):
582 # When:
599 # When:
583 # - this is a subrepo push
600 # - this is a subrepo push
584 # - and remote support phase
601 # - and remote support phase
585 # - and no changeset are to be pushed
602 # - and no changeset are to be pushed
586 # - and remote is publishing
603 # - and remote is publishing
587 # We may be in issue 3781 case!
604 # We may be in issue 3781 case!
588 # We drop the possible phase synchronisation done by
605 # We drop the possible phase synchronisation done by
589 # courtesy to publish changesets possibly locally draft
606 # courtesy to publish changesets possibly locally draft
590 # on the remote.
607 # on the remote.
591 pushop.outdatedphases = []
608 pushop.outdatedphases = []
592 pushop.fallbackoutdatedphases = []
609 pushop.fallbackoutdatedphases = []
593 return
610 return
594
611
595 pushop.remotephases = phases.remotephasessummary(
612 pushop.remotephases = phases.remotephasessummary(
596 pushop.repo, pushop.fallbackheads, remotephases
613 pushop.repo, pushop.fallbackheads, remotephases
597 )
614 )
598 droots = pushop.remotephases.draftroots
615 droots = pushop.remotephases.draftroots
599
616
600 extracond = b''
617 extracond = b''
601 if not pushop.remotephases.publishing:
618 if not pushop.remotephases.publishing:
602 extracond = b' and public()'
619 extracond = b' and public()'
603 revset = b'heads((%%ln::%%ln) %s)' % extracond
620 revset = b'heads((%%ln::%%ln) %s)' % extracond
604 # Get the list of all revs draft on remote by public here.
621 # Get the list of all revs draft on remote by public here.
605 # XXX Beware that revset break if droots is not strictly
622 # XXX Beware that revset break if droots is not strictly
606 # XXX root we may want to ensure it is but it is costly
623 # XXX root we may want to ensure it is but it is costly
607 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
608 if not pushop.remotephases.publishing and pushop.publish:
625 if not pushop.remotephases.publishing and pushop.publish:
609 future = list(
626 future = list(
610 unfi.set(
627 unfi.set(
611 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
628 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
612 )
629 )
613 )
630 )
614 elif not outgoing.missing:
631 elif not outgoing.missing:
615 future = fallback
632 future = fallback
616 else:
633 else:
617 # adds changeset we are going to push as draft
634 # adds changeset we are going to push as draft
618 #
635 #
619 # should not be necessary for publishing server, but because of an
636 # should not be necessary for publishing server, but because of an
620 # issue fixed in xxxxx we have to do it anyway.
637 # issue fixed in xxxxx we have to do it anyway.
621 fdroots = list(
638 fdroots = list(
622 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
639 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
623 )
640 )
624 fdroots = [f.node() for f in fdroots]
641 fdroots = [f.node() for f in fdroots]
625 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 future = list(unfi.set(revset, fdroots, pushop.futureheads))
626 pushop.outdatedphases = future
643 pushop.outdatedphases = future
627 pushop.fallbackoutdatedphases = fallback
644 pushop.fallbackoutdatedphases = fallback
628
645
629
646
630 @pushdiscovery(b'obsmarker')
647 @pushdiscovery(b'obsmarker')
631 def _pushdiscoveryobsmarkers(pushop):
648 def _pushdiscoveryobsmarkers(pushop):
632 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
649 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
633 return
650 return
634
651
635 if not pushop.repo.obsstore:
652 if not pushop.repo.obsstore:
636 return
653 return
637
654
638 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
655 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
639 return
656 return
640
657
641 repo = pushop.repo
658 repo = pushop.repo
642 # very naive computation, that can be quite expensive on big repo.
659 # very naive computation, that can be quite expensive on big repo.
643 # However: evolution is currently slow on them anyway.
660 # However: evolution is currently slow on them anyway.
644 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
661 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
645 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
662 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
646
663
647
664
648 @pushdiscovery(b'bookmarks')
665 @pushdiscovery(b'bookmarks')
649 def _pushdiscoverybookmarks(pushop):
666 def _pushdiscoverybookmarks(pushop):
650 ui = pushop.ui
667 ui = pushop.ui
651 repo = pushop.repo.unfiltered()
668 repo = pushop.repo.unfiltered()
652 remote = pushop.remote
669 remote = pushop.remote
653 ui.debug(b"checking for updated bookmarks\n")
670 ui.debug(b"checking for updated bookmarks\n")
654 ancestors = ()
671 ancestors = ()
655 if pushop.revs:
672 if pushop.revs:
656 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
673 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
657 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
674 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
658
675
659 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
676 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
660
677
661 explicit = {
678 explicit = {
662 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
679 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
663 }
680 }
664
681
665 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
682 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
666 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
683 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
667
684
668
685
669 def _processcompared(pushop, pushed, explicit, remotebms, comp):
686 def _processcompared(pushop, pushed, explicit, remotebms, comp):
670 """take decision on bookmarks to push to the remote repo
687 """take decision on bookmarks to push to the remote repo
671
688
672 Exists to help extensions alter this behavior.
689 Exists to help extensions alter this behavior.
673 """
690 """
674 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
691 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
675
692
676 repo = pushop.repo
693 repo = pushop.repo
677
694
678 for b, scid, dcid in advsrc:
695 for b, scid, dcid in advsrc:
679 if b in explicit:
696 if b in explicit:
680 explicit.remove(b)
697 explicit.remove(b)
681 if not pushed or repo[scid].rev() in pushed:
698 if not pushed or repo[scid].rev() in pushed:
682 pushop.outbookmarks.append((b, dcid, scid))
699 pushop.outbookmarks.append((b, dcid, scid))
683 # search added bookmark
700 # search added bookmark
684 for b, scid, dcid in addsrc:
701 for b, scid, dcid in addsrc:
685 if b in explicit:
702 if b in explicit:
686 explicit.remove(b)
703 explicit.remove(b)
687 if bookmod.isdivergent(b):
704 if bookmod.isdivergent(b):
688 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
705 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
689 pushop.bkresult = 2
706 pushop.bkresult = 2
690 else:
707 else:
691 pushop.outbookmarks.append((b, b'', scid))
708 pushop.outbookmarks.append((b, b'', scid))
692 # search for overwritten bookmark
709 # search for overwritten bookmark
693 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
710 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
694 if b in explicit:
711 if b in explicit:
695 explicit.remove(b)
712 explicit.remove(b)
696 pushop.outbookmarks.append((b, dcid, scid))
713 pushop.outbookmarks.append((b, dcid, scid))
697 # search for bookmark to delete
714 # search for bookmark to delete
698 for b, scid, dcid in adddst:
715 for b, scid, dcid in adddst:
699 if b in explicit:
716 if b in explicit:
700 explicit.remove(b)
717 explicit.remove(b)
701 # treat as "deleted locally"
718 # treat as "deleted locally"
702 pushop.outbookmarks.append((b, dcid, b''))
719 pushop.outbookmarks.append((b, dcid, b''))
703 # identical bookmarks shouldn't get reported
720 # identical bookmarks shouldn't get reported
704 for b, scid, dcid in same:
721 for b, scid, dcid in same:
705 if b in explicit:
722 if b in explicit:
706 explicit.remove(b)
723 explicit.remove(b)
707
724
708 if explicit:
725 if explicit:
709 explicit = sorted(explicit)
726 explicit = sorted(explicit)
710 # we should probably list all of them
727 # we should probably list all of them
711 pushop.ui.warn(
728 pushop.ui.warn(
712 _(
729 _(
713 b'bookmark %s does not exist on the local '
730 b'bookmark %s does not exist on the local '
714 b'or remote repository!\n'
731 b'or remote repository!\n'
715 )
732 )
716 % explicit[0]
733 % explicit[0]
717 )
734 )
718 pushop.bkresult = 2
735 pushop.bkresult = 2
719
736
720 pushop.outbookmarks.sort()
737 pushop.outbookmarks.sort()
721
738
722
739
723 def _pushcheckoutgoing(pushop):
740 def _pushcheckoutgoing(pushop):
724 outgoing = pushop.outgoing
741 outgoing = pushop.outgoing
725 unfi = pushop.repo.unfiltered()
742 unfi = pushop.repo.unfiltered()
726 if not outgoing.missing:
743 if not outgoing.missing:
727 # nothing to push
744 # nothing to push
728 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
745 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
729 return False
746 return False
730 # something to push
747 # something to push
731 if not pushop.force:
748 if not pushop.force:
732 # if repo.obsstore == False --> no obsolete
749 # if repo.obsstore == False --> no obsolete
733 # then, save the iteration
750 # then, save the iteration
734 if unfi.obsstore:
751 if unfi.obsstore:
735 # this message are here for 80 char limit reason
752 # this message are here for 80 char limit reason
736 mso = _(b"push includes obsolete changeset: %s!")
753 mso = _(b"push includes obsolete changeset: %s!")
737 mspd = _(b"push includes phase-divergent changeset: %s!")
754 mspd = _(b"push includes phase-divergent changeset: %s!")
738 mscd = _(b"push includes content-divergent changeset: %s!")
755 mscd = _(b"push includes content-divergent changeset: %s!")
739 mst = {
756 mst = {
740 b"orphan": _(b"push includes orphan changeset: %s!"),
757 b"orphan": _(b"push includes orphan changeset: %s!"),
741 b"phase-divergent": mspd,
758 b"phase-divergent": mspd,
742 b"content-divergent": mscd,
759 b"content-divergent": mscd,
743 }
760 }
744 # If we are to push if there is at least one
761 # If we are to push if there is at least one
745 # obsolete or unstable changeset in missing, at
762 # obsolete or unstable changeset in missing, at
746 # least one of the missinghead will be obsolete or
763 # least one of the missinghead will be obsolete or
747 # unstable. So checking heads only is ok
764 # unstable. So checking heads only is ok
748 for node in outgoing.ancestorsof:
765 for node in outgoing.ancestorsof:
749 ctx = unfi[node]
766 ctx = unfi[node]
750 if ctx.obsolete():
767 if ctx.obsolete():
751 raise error.Abort(mso % ctx)
768 raise error.Abort(mso % ctx)
752 elif ctx.isunstable():
769 elif ctx.isunstable():
753 # TODO print more than one instability in the abort
770 # TODO print more than one instability in the abort
754 # message
771 # message
755 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
772 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
756
773
757 discovery.checkheads(pushop)
774 discovery.checkheads(pushop)
758 return True
775 return True
759
776
760
777
761 # List of names of steps to perform for an outgoing bundle2, order matters.
778 # List of names of steps to perform for an outgoing bundle2, order matters.
762 b2partsgenorder = []
779 b2partsgenorder = []
763
780
764 # Mapping between step name and function
781 # Mapping between step name and function
765 #
782 #
766 # This exists to help extensions wrap steps if necessary
783 # This exists to help extensions wrap steps if necessary
767 b2partsgenmapping = {}
784 b2partsgenmapping = {}
768
785
769
786
770 def b2partsgenerator(stepname, idx=None):
787 def b2partsgenerator(stepname, idx=None):
771 """decorator for function generating bundle2 part
788 """decorator for function generating bundle2 part
772
789
773 The function is added to the step -> function mapping and appended to the
790 The function is added to the step -> function mapping and appended to the
774 list of steps. Beware that decorated functions will be added in order
791 list of steps. Beware that decorated functions will be added in order
775 (this may matter).
792 (this may matter).
776
793
777 You can only use this decorator for new steps, if you want to wrap a step
794 You can only use this decorator for new steps, if you want to wrap a step
778 from an extension, attack the b2partsgenmapping dictionary directly."""
795 from an extension, attack the b2partsgenmapping dictionary directly."""
779
796
780 def dec(func):
797 def dec(func):
781 assert stepname not in b2partsgenmapping
798 assert stepname not in b2partsgenmapping
782 b2partsgenmapping[stepname] = func
799 b2partsgenmapping[stepname] = func
783 if idx is None:
800 if idx is None:
784 b2partsgenorder.append(stepname)
801 b2partsgenorder.append(stepname)
785 else:
802 else:
786 b2partsgenorder.insert(idx, stepname)
803 b2partsgenorder.insert(idx, stepname)
787 return func
804 return func
788
805
789 return dec
806 return dec
790
807
791
808
792 def _pushb2ctxcheckheads(pushop, bundler):
809 def _pushb2ctxcheckheads(pushop, bundler):
793 """Generate race condition checking parts
810 """Generate race condition checking parts
794
811
795 Exists as an independent function to aid extensions
812 Exists as an independent function to aid extensions
796 """
813 """
797 # * 'force' do not check for push race,
814 # * 'force' do not check for push race,
798 # * if we don't push anything, there are nothing to check.
815 # * if we don't push anything, there are nothing to check.
799 if not pushop.force and pushop.outgoing.ancestorsof:
816 if not pushop.force and pushop.outgoing.ancestorsof:
800 allowunrelated = b'related' in bundler.capabilities.get(
817 allowunrelated = b'related' in bundler.capabilities.get(
801 b'checkheads', ()
818 b'checkheads', ()
802 )
819 )
803 emptyremote = pushop.pushbranchmap is None
820 emptyremote = pushop.pushbranchmap is None
804 if not allowunrelated or emptyremote:
821 if not allowunrelated or emptyremote:
805 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
822 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
806 else:
823 else:
807 affected = set()
824 affected = set()
808 for branch, heads in pushop.pushbranchmap.items():
825 for branch, heads in pushop.pushbranchmap.items():
809 remoteheads, newheads, unsyncedheads, discardedheads = heads
826 remoteheads, newheads, unsyncedheads, discardedheads = heads
810 if remoteheads is not None:
827 if remoteheads is not None:
811 remote = set(remoteheads)
828 remote = set(remoteheads)
812 affected |= set(discardedheads) & remote
829 affected |= set(discardedheads) & remote
813 affected |= remote - set(newheads)
830 affected |= remote - set(newheads)
814 if affected:
831 if affected:
815 data = iter(sorted(affected))
832 data = iter(sorted(affected))
816 bundler.newpart(b'check:updated-heads', data=data)
833 bundler.newpart(b'check:updated-heads', data=data)
817
834
818
835
819 def _pushing(pushop):
836 def _pushing(pushop):
820 """return True if we are pushing anything"""
837 """return True if we are pushing anything"""
821 return bool(
838 return bool(
822 pushop.outgoing.missing
839 pushop.outgoing.missing
823 or pushop.outdatedphases
840 or pushop.outdatedphases
824 or pushop.outobsmarkers
841 or pushop.outobsmarkers
825 or pushop.outbookmarks
842 or pushop.outbookmarks
826 )
843 )
827
844
828
845
829 @b2partsgenerator(b'check-bookmarks')
846 @b2partsgenerator(b'check-bookmarks')
830 def _pushb2checkbookmarks(pushop, bundler):
847 def _pushb2checkbookmarks(pushop, bundler):
831 """insert bookmark move checking"""
848 """insert bookmark move checking"""
832 if not _pushing(pushop) or pushop.force:
849 if not _pushing(pushop) or pushop.force:
833 return
850 return
834 b2caps = bundle2.bundle2caps(pushop.remote)
851 b2caps = bundle2.bundle2caps(pushop.remote)
835 hasbookmarkcheck = b'bookmarks' in b2caps
852 hasbookmarkcheck = b'bookmarks' in b2caps
836 if not (pushop.outbookmarks and hasbookmarkcheck):
853 if not (pushop.outbookmarks and hasbookmarkcheck):
837 return
854 return
838 data = []
855 data = []
839 for book, old, new in pushop.outbookmarks:
856 for book, old, new in pushop.outbookmarks:
840 data.append((book, old))
857 data.append((book, old))
841 checkdata = bookmod.binaryencode(pushop.repo, data)
858 checkdata = bookmod.binaryencode(pushop.repo, data)
842 bundler.newpart(b'check:bookmarks', data=checkdata)
859 bundler.newpart(b'check:bookmarks', data=checkdata)
843
860
844
861
845 @b2partsgenerator(b'check-phases')
862 @b2partsgenerator(b'check-phases')
846 def _pushb2checkphases(pushop, bundler):
863 def _pushb2checkphases(pushop, bundler):
847 """insert phase move checking"""
864 """insert phase move checking"""
848 if not _pushing(pushop) or pushop.force:
865 if not _pushing(pushop) or pushop.force:
849 return
866 return
850 b2caps = bundle2.bundle2caps(pushop.remote)
867 b2caps = bundle2.bundle2caps(pushop.remote)
851 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
868 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
852 if pushop.remotephases is not None and hasphaseheads:
869 if pushop.remotephases is not None and hasphaseheads:
853 # check that the remote phase has not changed
870 # check that the remote phase has not changed
854 checks = {p: [] for p in phases.allphases}
871 checks = {p: [] for p in phases.allphases}
855 checks[phases.public].extend(pushop.remotephases.publicheads)
872 checks[phases.public].extend(pushop.remotephases.publicheads)
856 checks[phases.draft].extend(pushop.remotephases.draftroots)
873 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 if any(checks.values()):
874 if any(checks.values()):
858 for phase in checks:
875 for phase in checks:
859 checks[phase].sort()
876 checks[phase].sort()
860 checkdata = phases.binaryencode(checks)
877 checkdata = phases.binaryencode(checks)
861 bundler.newpart(b'check:phases', data=checkdata)
878 bundler.newpart(b'check:phases', data=checkdata)
862
879
863
880
864 @b2partsgenerator(b'changeset')
881 @b2partsgenerator(b'changeset')
865 def _pushb2ctx(pushop, bundler):
882 def _pushb2ctx(pushop, bundler):
866 """handle changegroup push through bundle2
883 """handle changegroup push through bundle2
867
884
868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
885 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 """
886 """
870 if b'changesets' in pushop.stepsdone:
887 if b'changesets' in pushop.stepsdone:
871 return
888 return
872 pushop.stepsdone.add(b'changesets')
889 pushop.stepsdone.add(b'changesets')
873 # Send known heads to the server for race detection.
890 # Send known heads to the server for race detection.
874 if not _pushcheckoutgoing(pushop):
891 if not _pushcheckoutgoing(pushop):
875 return
892 return
876 pushop.repo.prepushoutgoinghooks(pushop)
893 pushop.repo.prepushoutgoinghooks(pushop)
877
894
878 _pushb2ctxcheckheads(pushop, bundler)
895 _pushb2ctxcheckheads(pushop, bundler)
879
896
880 b2caps = bundle2.bundle2caps(pushop.remote)
897 b2caps = bundle2.bundle2caps(pushop.remote)
881 version = b'01'
898 version = b'01'
882 cgversions = b2caps.get(b'changegroup')
899 cgversions = b2caps.get(b'changegroup')
883 if cgversions: # 3.1 and 3.2 ship with an empty value
900 if cgversions: # 3.1 and 3.2 ship with an empty value
884 cgversions = [
901 cgversions = [
885 v
902 v
886 for v in cgversions
903 for v in cgversions
887 if v in changegroup.supportedoutgoingversions(pushop.repo)
904 if v in changegroup.supportedoutgoingversions(pushop.repo)
888 ]
905 ]
889 if not cgversions:
906 if not cgversions:
890 raise error.Abort(_(b'no common changegroup version'))
907 raise error.Abort(_(b'no common changegroup version'))
891 version = max(cgversions)
908 version = max(cgversions)
892
909
893 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
910 remote_sidedata = bundle2.read_remote_wanted_sidedata(pushop.remote)
894 cgstream = changegroup.makestream(
911 cgstream = changegroup.makestream(
895 pushop.repo,
912 pushop.repo,
896 pushop.outgoing,
913 pushop.outgoing,
897 version,
914 version,
898 b'push',
915 b'push',
899 bundlecaps=b2caps,
916 bundlecaps=b2caps,
900 remote_sidedata=remote_sidedata,
917 remote_sidedata=remote_sidedata,
901 )
918 )
902 cgpart = bundler.newpart(b'changegroup', data=cgstream)
919 cgpart = bundler.newpart(b'changegroup', data=cgstream)
903 if cgversions:
920 if cgversions:
904 cgpart.addparam(b'version', version)
921 cgpart.addparam(b'version', version)
905 if scmutil.istreemanifest(pushop.repo):
922 if scmutil.istreemanifest(pushop.repo):
906 cgpart.addparam(b'treemanifest', b'1')
923 cgpart.addparam(b'treemanifest', b'1')
907 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
924 if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
908 cgpart.addparam(b'exp-sidedata', b'1')
925 cgpart.addparam(b'exp-sidedata', b'1')
909
926
910 def handlereply(op):
927 def handlereply(op):
911 """extract addchangegroup returns from server reply"""
928 """extract addchangegroup returns from server reply"""
912 cgreplies = op.records.getreplies(cgpart.id)
929 cgreplies = op.records.getreplies(cgpart.id)
913 assert len(cgreplies[b'changegroup']) == 1
930 assert len(cgreplies[b'changegroup']) == 1
914 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
931 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
915
932
916 return handlereply
933 return handlereply
917
934
918
935
919 @b2partsgenerator(b'phase')
936 @b2partsgenerator(b'phase')
920 def _pushb2phases(pushop, bundler):
937 def _pushb2phases(pushop, bundler):
921 """handle phase push through bundle2"""
938 """handle phase push through bundle2"""
922 if b'phases' in pushop.stepsdone:
939 if b'phases' in pushop.stepsdone:
923 return
940 return
924 b2caps = bundle2.bundle2caps(pushop.remote)
941 b2caps = bundle2.bundle2caps(pushop.remote)
925 ui = pushop.repo.ui
942 ui = pushop.repo.ui
926
943
927 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
944 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
928 haspushkey = b'pushkey' in b2caps
945 haspushkey = b'pushkey' in b2caps
929 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
946 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
930
947
931 if hasphaseheads and not legacyphase:
948 if hasphaseheads and not legacyphase:
932 return _pushb2phaseheads(pushop, bundler)
949 return _pushb2phaseheads(pushop, bundler)
933 elif haspushkey:
950 elif haspushkey:
934 return _pushb2phasespushkey(pushop, bundler)
951 return _pushb2phasespushkey(pushop, bundler)
935
952
936
953
937 def _pushb2phaseheads(pushop, bundler):
954 def _pushb2phaseheads(pushop, bundler):
938 """push phase information through a bundle2 - binary part"""
955 """push phase information through a bundle2 - binary part"""
939 pushop.stepsdone.add(b'phases')
956 pushop.stepsdone.add(b'phases')
940 if pushop.outdatedphases:
957 if pushop.outdatedphases:
941 updates = {p: [] for p in phases.allphases}
958 updates = {p: [] for p in phases.allphases}
942 updates[0].extend(h.node() for h in pushop.outdatedphases)
959 updates[0].extend(h.node() for h in pushop.outdatedphases)
943 phasedata = phases.binaryencode(updates)
960 phasedata = phases.binaryencode(updates)
944 bundler.newpart(b'phase-heads', data=phasedata)
961 bundler.newpart(b'phase-heads', data=phasedata)
945
962
946
963
947 def _pushb2phasespushkey(pushop, bundler):
964 def _pushb2phasespushkey(pushop, bundler):
948 """push phase information through a bundle2 - pushkey part"""
965 """push phase information through a bundle2 - pushkey part"""
949 pushop.stepsdone.add(b'phases')
966 pushop.stepsdone.add(b'phases')
950 part2node = []
967 part2node = []
951
968
952 def handlefailure(pushop, exc):
969 def handlefailure(pushop, exc):
953 targetid = int(exc.partid)
970 targetid = int(exc.partid)
954 for partid, node in part2node:
971 for partid, node in part2node:
955 if partid == targetid:
972 if partid == targetid:
956 raise error.Abort(_(b'updating %s to public failed') % node)
973 raise error.Abort(_(b'updating %s to public failed') % node)
957
974
958 enc = pushkey.encode
975 enc = pushkey.encode
959 for newremotehead in pushop.outdatedphases:
976 for newremotehead in pushop.outdatedphases:
960 part = bundler.newpart(b'pushkey')
977 part = bundler.newpart(b'pushkey')
961 part.addparam(b'namespace', enc(b'phases'))
978 part.addparam(b'namespace', enc(b'phases'))
962 part.addparam(b'key', enc(newremotehead.hex()))
979 part.addparam(b'key', enc(newremotehead.hex()))
963 part.addparam(b'old', enc(b'%d' % phases.draft))
980 part.addparam(b'old', enc(b'%d' % phases.draft))
964 part.addparam(b'new', enc(b'%d' % phases.public))
981 part.addparam(b'new', enc(b'%d' % phases.public))
965 part2node.append((part.id, newremotehead))
982 part2node.append((part.id, newremotehead))
966 pushop.pkfailcb[part.id] = handlefailure
983 pushop.pkfailcb[part.id] = handlefailure
967
984
968 def handlereply(op):
985 def handlereply(op):
969 for partid, node in part2node:
986 for partid, node in part2node:
970 partrep = op.records.getreplies(partid)
987 partrep = op.records.getreplies(partid)
971 results = partrep[b'pushkey']
988 results = partrep[b'pushkey']
972 assert len(results) <= 1
989 assert len(results) <= 1
973 msg = None
990 msg = None
974 if not results:
991 if not results:
975 msg = _(b'server ignored update of %s to public!\n') % node
992 msg = _(b'server ignored update of %s to public!\n') % node
976 elif not int(results[0][b'return']):
993 elif not int(results[0][b'return']):
977 msg = _(b'updating %s to public failed!\n') % node
994 msg = _(b'updating %s to public failed!\n') % node
978 if msg is not None:
995 if msg is not None:
979 pushop.ui.warn(msg)
996 pushop.ui.warn(msg)
980
997
981 return handlereply
998 return handlereply
982
999
983
1000
984 @b2partsgenerator(b'obsmarkers')
1001 @b2partsgenerator(b'obsmarkers')
985 def _pushb2obsmarkers(pushop, bundler):
1002 def _pushb2obsmarkers(pushop, bundler):
986 if b'obsmarkers' in pushop.stepsdone:
1003 if b'obsmarkers' in pushop.stepsdone:
987 return
1004 return
988 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1005 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
989 if obsolete.commonversion(remoteversions) is None:
1006 if obsolete.commonversion(remoteversions) is None:
990 return
1007 return
991 pushop.stepsdone.add(b'obsmarkers')
1008 pushop.stepsdone.add(b'obsmarkers')
992 if pushop.outobsmarkers:
1009 if pushop.outobsmarkers:
993 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1010 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
994 bundle2.buildobsmarkerspart(bundler, markers)
1011 bundle2.buildobsmarkerspart(bundler, markers)
995
1012
996
1013
997 @b2partsgenerator(b'bookmarks')
1014 @b2partsgenerator(b'bookmarks')
998 def _pushb2bookmarks(pushop, bundler):
1015 def _pushb2bookmarks(pushop, bundler):
999 """handle bookmark push through bundle2"""
1016 """handle bookmark push through bundle2"""
1000 if b'bookmarks' in pushop.stepsdone:
1017 if b'bookmarks' in pushop.stepsdone:
1001 return
1018 return
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1019 b2caps = bundle2.bundle2caps(pushop.remote)
1003
1020
1004 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1021 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1005 legacybooks = b'bookmarks' in legacy
1022 legacybooks = b'bookmarks' in legacy
1006
1023
1007 if not legacybooks and b'bookmarks' in b2caps:
1024 if not legacybooks and b'bookmarks' in b2caps:
1008 return _pushb2bookmarkspart(pushop, bundler)
1025 return _pushb2bookmarkspart(pushop, bundler)
1009 elif b'pushkey' in b2caps:
1026 elif b'pushkey' in b2caps:
1010 return _pushb2bookmarkspushkey(pushop, bundler)
1027 return _pushb2bookmarkspushkey(pushop, bundler)
1011
1028
1012
1029
1013 def _bmaction(old, new):
1030 def _bmaction(old, new):
1014 """small utility for bookmark pushing"""
1031 """small utility for bookmark pushing"""
1015 if not old:
1032 if not old:
1016 return b'export'
1033 return b'export'
1017 elif not new:
1034 elif not new:
1018 return b'delete'
1035 return b'delete'
1019 return b'update'
1036 return b'update'
1020
1037
1021
1038
1022 def _abortonsecretctx(pushop, node, b):
1039 def _abortonsecretctx(pushop, node, b):
1023 """abort if a given bookmark points to a secret changeset"""
1040 """abort if a given bookmark points to a secret changeset"""
1024 if node and pushop.repo[node].phase() == phases.secret:
1041 if node and pushop.repo[node].phase() == phases.secret:
1025 raise error.Abort(
1042 raise error.Abort(
1026 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1043 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1027 )
1044 )
1028
1045
1029
1046
1030 def _pushb2bookmarkspart(pushop, bundler):
1047 def _pushb2bookmarkspart(pushop, bundler):
1031 pushop.stepsdone.add(b'bookmarks')
1048 pushop.stepsdone.add(b'bookmarks')
1032 if not pushop.outbookmarks:
1049 if not pushop.outbookmarks:
1033 return
1050 return
1034
1051
1035 allactions = []
1052 allactions = []
1036 data = []
1053 data = []
1037 for book, old, new in pushop.outbookmarks:
1054 for book, old, new in pushop.outbookmarks:
1038 _abortonsecretctx(pushop, new, book)
1055 _abortonsecretctx(pushop, new, book)
1039 data.append((book, new))
1056 data.append((book, new))
1040 allactions.append((book, _bmaction(old, new)))
1057 allactions.append((book, _bmaction(old, new)))
1041 checkdata = bookmod.binaryencode(pushop.repo, data)
1058 checkdata = bookmod.binaryencode(pushop.repo, data)
1042 bundler.newpart(b'bookmarks', data=checkdata)
1059 bundler.newpart(b'bookmarks', data=checkdata)
1043
1060
1044 def handlereply(op):
1061 def handlereply(op):
1045 ui = pushop.ui
1062 ui = pushop.ui
1046 # if success
1063 # if success
1047 for book, action in allactions:
1064 for book, action in allactions:
1048 ui.status(bookmsgmap[action][0] % book)
1065 ui.status(bookmsgmap[action][0] % book)
1049
1066
1050 return handlereply
1067 return handlereply
1051
1068
1052
1069
1053 def _pushb2bookmarkspushkey(pushop, bundler):
1070 def _pushb2bookmarkspushkey(pushop, bundler):
1054 pushop.stepsdone.add(b'bookmarks')
1071 pushop.stepsdone.add(b'bookmarks')
1055 part2book = []
1072 part2book = []
1056 enc = pushkey.encode
1073 enc = pushkey.encode
1057
1074
1058 def handlefailure(pushop, exc):
1075 def handlefailure(pushop, exc):
1059 targetid = int(exc.partid)
1076 targetid = int(exc.partid)
1060 for partid, book, action in part2book:
1077 for partid, book, action in part2book:
1061 if partid == targetid:
1078 if partid == targetid:
1062 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1079 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1063 # we should not be called for part we did not generated
1080 # we should not be called for part we did not generated
1064 assert False
1081 assert False
1065
1082
1066 for book, old, new in pushop.outbookmarks:
1083 for book, old, new in pushop.outbookmarks:
1067 _abortonsecretctx(pushop, new, book)
1084 _abortonsecretctx(pushop, new, book)
1068 part = bundler.newpart(b'pushkey')
1085 part = bundler.newpart(b'pushkey')
1069 part.addparam(b'namespace', enc(b'bookmarks'))
1086 part.addparam(b'namespace', enc(b'bookmarks'))
1070 part.addparam(b'key', enc(book))
1087 part.addparam(b'key', enc(book))
1071 part.addparam(b'old', enc(hex(old)))
1088 part.addparam(b'old', enc(hex(old)))
1072 part.addparam(b'new', enc(hex(new)))
1089 part.addparam(b'new', enc(hex(new)))
1073 action = b'update'
1090 action = b'update'
1074 if not old:
1091 if not old:
1075 action = b'export'
1092 action = b'export'
1076 elif not new:
1093 elif not new:
1077 action = b'delete'
1094 action = b'delete'
1078 part2book.append((part.id, book, action))
1095 part2book.append((part.id, book, action))
1079 pushop.pkfailcb[part.id] = handlefailure
1096 pushop.pkfailcb[part.id] = handlefailure
1080
1097
1081 def handlereply(op):
1098 def handlereply(op):
1082 ui = pushop.ui
1099 ui = pushop.ui
1083 for partid, book, action in part2book:
1100 for partid, book, action in part2book:
1084 partrep = op.records.getreplies(partid)
1101 partrep = op.records.getreplies(partid)
1085 results = partrep[b'pushkey']
1102 results = partrep[b'pushkey']
1086 assert len(results) <= 1
1103 assert len(results) <= 1
1087 if not results:
1104 if not results:
1088 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1105 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1089 else:
1106 else:
1090 ret = int(results[0][b'return'])
1107 ret = int(results[0][b'return'])
1091 if ret:
1108 if ret:
1092 ui.status(bookmsgmap[action][0] % book)
1109 ui.status(bookmsgmap[action][0] % book)
1093 else:
1110 else:
1094 ui.warn(bookmsgmap[action][1] % book)
1111 ui.warn(bookmsgmap[action][1] % book)
1095 if pushop.bkresult is not None:
1112 if pushop.bkresult is not None:
1096 pushop.bkresult = 1
1113 pushop.bkresult = 1
1097
1114
1098 return handlereply
1115 return handlereply
1099
1116
1100
1117
1101 @b2partsgenerator(b'pushvars', idx=0)
1118 @b2partsgenerator(b'pushvars', idx=0)
1102 def _getbundlesendvars(pushop, bundler):
1119 def _getbundlesendvars(pushop, bundler):
1103 '''send shellvars via bundle2'''
1120 '''send shellvars via bundle2'''
1104 pushvars = pushop.pushvars
1121 pushvars = pushop.pushvars
1105 if pushvars:
1122 if pushvars:
1106 shellvars = {}
1123 shellvars = {}
1107 for raw in pushvars:
1124 for raw in pushvars:
1108 if b'=' not in raw:
1125 if b'=' not in raw:
1109 msg = (
1126 msg = (
1110 b"unable to parse variable '%s', should follow "
1127 b"unable to parse variable '%s', should follow "
1111 b"'KEY=VALUE' or 'KEY=' format"
1128 b"'KEY=VALUE' or 'KEY=' format"
1112 )
1129 )
1113 raise error.Abort(msg % raw)
1130 raise error.Abort(msg % raw)
1114 k, v = raw.split(b'=', 1)
1131 k, v = raw.split(b'=', 1)
1115 shellvars[k] = v
1132 shellvars[k] = v
1116
1133
1117 part = bundler.newpart(b'pushvars')
1134 part = bundler.newpart(b'pushvars')
1118
1135
1119 for key, value in shellvars.items():
1136 for key, value in shellvars.items():
1120 part.addparam(key, value, mandatory=False)
1137 part.addparam(key, value, mandatory=False)
1121
1138
1122
1139
1123 def _pushbundle2(pushop):
1140 def _pushbundle2(pushop):
1124 """push data to the remote using bundle2
1141 """push data to the remote using bundle2
1125
1142
1126 The only currently supported type of data is changegroup but this will
1143 The only currently supported type of data is changegroup but this will
1127 evolve in the future."""
1144 evolve in the future."""
1128 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1145 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1129 pushback = pushop.trmanager and pushop.ui.configbool(
1146 pushback = pushop.trmanager and pushop.ui.configbool(
1130 b'experimental', b'bundle2.pushback'
1147 b'experimental', b'bundle2.pushback'
1131 )
1148 )
1132
1149
1133 # create reply capability
1150 # create reply capability
1134 capsblob = bundle2.encodecaps(
1151 capsblob = bundle2.encodecaps(
1135 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1152 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1136 )
1153 )
1137 bundler.newpart(b'replycaps', data=capsblob)
1154 bundler.newpart(b'replycaps', data=capsblob)
1138 replyhandlers = []
1155 replyhandlers = []
1139 for partgenname in b2partsgenorder:
1156 for partgenname in b2partsgenorder:
1140 partgen = b2partsgenmapping[partgenname]
1157 partgen = b2partsgenmapping[partgenname]
1141 ret = partgen(pushop, bundler)
1158 ret = partgen(pushop, bundler)
1142 if callable(ret):
1159 if callable(ret):
1143 replyhandlers.append(ret)
1160 replyhandlers.append(ret)
1144 # do not push if nothing to push
1161 # do not push if nothing to push
1145 if bundler.nbparts <= 1:
1162 if bundler.nbparts <= 1:
1146 return
1163 return
1147 stream = util.chunkbuffer(bundler.getchunks())
1164 stream = util.chunkbuffer(bundler.getchunks())
1148 try:
1165 try:
1149 try:
1166 try:
1150 with pushop.remote.commandexecutor() as e:
1167 with pushop.remote.commandexecutor() as e:
1151 reply = e.callcommand(
1168 reply = e.callcommand(
1152 b'unbundle',
1169 b'unbundle',
1153 {
1170 {
1154 b'bundle': stream,
1171 b'bundle': stream,
1155 b'heads': [b'force'],
1172 b'heads': [b'force'],
1156 b'url': pushop.remote.url(),
1173 b'url': pushop.remote.url(),
1157 },
1174 },
1158 ).result()
1175 ).result()
1159 except error.BundleValueError as exc:
1176 except error.BundleValueError as exc:
1160 raise error.RemoteError(_(b'missing support for %s') % exc)
1177 raise error.RemoteError(_(b'missing support for %s') % exc)
1161 try:
1178 try:
1162 trgetter = None
1179 trgetter = None
1163 if pushback:
1180 if pushback:
1164 trgetter = pushop.trmanager.transaction
1181 trgetter = pushop.trmanager.transaction
1165 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1182 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1166 except error.BundleValueError as exc:
1183 except error.BundleValueError as exc:
1167 raise error.RemoteError(_(b'missing support for %s') % exc)
1184 raise error.RemoteError(_(b'missing support for %s') % exc)
1168 except bundle2.AbortFromPart as exc:
1185 except bundle2.AbortFromPart as exc:
1169 pushop.ui.error(_(b'remote: %s\n') % exc)
1186 pushop.ui.error(_(b'remote: %s\n') % exc)
1170 if exc.hint is not None:
1187 if exc.hint is not None:
1171 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1188 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1172 raise error.RemoteError(_(b'push failed on remote'))
1189 raise error.RemoteError(_(b'push failed on remote'))
1173 except error.PushkeyFailed as exc:
1190 except error.PushkeyFailed as exc:
1174 partid = int(exc.partid)
1191 partid = int(exc.partid)
1175 if partid not in pushop.pkfailcb:
1192 if partid not in pushop.pkfailcb:
1176 raise
1193 raise
1177 pushop.pkfailcb[partid](pushop, exc)
1194 pushop.pkfailcb[partid](pushop, exc)
1178 for rephand in replyhandlers:
1195 for rephand in replyhandlers:
1179 rephand(op)
1196 rephand(op)
1180
1197
1181
1198
1182 def _pushchangeset(pushop):
1199 def _pushchangeset(pushop):
1183 """Make the actual push of changeset bundle to remote repo"""
1200 """Make the actual push of changeset bundle to remote repo"""
1184 if b'changesets' in pushop.stepsdone:
1201 if b'changesets' in pushop.stepsdone:
1185 return
1202 return
1186 pushop.stepsdone.add(b'changesets')
1203 pushop.stepsdone.add(b'changesets')
1187 if not _pushcheckoutgoing(pushop):
1204 if not _pushcheckoutgoing(pushop):
1188 return
1205 return
1189
1206
1190 # Should have verified this in push().
1207 # Should have verified this in push().
1191 assert pushop.remote.capable(b'unbundle')
1208 assert pushop.remote.capable(b'unbundle')
1192
1209
1193 pushop.repo.prepushoutgoinghooks(pushop)
1210 pushop.repo.prepushoutgoinghooks(pushop)
1194 outgoing = pushop.outgoing
1211 outgoing = pushop.outgoing
1195 # TODO: get bundlecaps from remote
1212 # TODO: get bundlecaps from remote
1196 bundlecaps = None
1213 bundlecaps = None
1197 # create a changegroup from local
1214 # create a changegroup from local
1198 if pushop.revs is None and not (
1215 if pushop.revs is None and not (
1199 outgoing.excluded or pushop.repo.changelog.filteredrevs
1216 outgoing.excluded or pushop.repo.changelog.filteredrevs
1200 ):
1217 ):
1201 # push everything,
1218 # push everything,
1202 # use the fast path, no race possible on push
1219 # use the fast path, no race possible on push
1203 cg = changegroup.makechangegroup(
1220 cg = changegroup.makechangegroup(
1204 pushop.repo,
1221 pushop.repo,
1205 outgoing,
1222 outgoing,
1206 b'01',
1223 b'01',
1207 b'push',
1224 b'push',
1208 fastpath=True,
1225 fastpath=True,
1209 bundlecaps=bundlecaps,
1226 bundlecaps=bundlecaps,
1210 )
1227 )
1211 else:
1228 else:
1212 cg = changegroup.makechangegroup(
1229 cg = changegroup.makechangegroup(
1213 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1230 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1214 )
1231 )
1215
1232
1216 # apply changegroup to remote
1233 # apply changegroup to remote
1217 # local repo finds heads on server, finds out what
1234 # local repo finds heads on server, finds out what
1218 # revs it must push. once revs transferred, if server
1235 # revs it must push. once revs transferred, if server
1219 # finds it has different heads (someone else won
1236 # finds it has different heads (someone else won
1220 # commit/push race), server aborts.
1237 # commit/push race), server aborts.
1221 if pushop.force:
1238 if pushop.force:
1222 remoteheads = [b'force']
1239 remoteheads = [b'force']
1223 else:
1240 else:
1224 remoteheads = pushop.remoteheads
1241 remoteheads = pushop.remoteheads
1225 # ssh: return remote's addchangegroup()
1242 # ssh: return remote's addchangegroup()
1226 # http: return remote's addchangegroup() or 0 for error
1243 # http: return remote's addchangegroup() or 0 for error
1227 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1244 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1228
1245
1229
1246
1230 def _pushsyncphase(pushop):
1247 def _pushsyncphase(pushop):
1231 """synchronise phase information locally and remotely"""
1248 """synchronise phase information locally and remotely"""
1232 cheads = pushop.commonheads
1249 cheads = pushop.commonheads
1233 # even when we don't push, exchanging phase data is useful
1250 # even when we don't push, exchanging phase data is useful
1234 remotephases = listkeys(pushop.remote, b'phases')
1251 remotephases = listkeys(pushop.remote, b'phases')
1235 if (
1252 if (
1236 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1253 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1237 and remotephases # server supports phases
1254 and remotephases # server supports phases
1238 and pushop.cgresult is None # nothing was pushed
1255 and pushop.cgresult is None # nothing was pushed
1239 and remotephases.get(b'publishing', False)
1256 and remotephases.get(b'publishing', False)
1240 ):
1257 ):
1241 # When:
1258 # When:
1242 # - this is a subrepo push
1259 # - this is a subrepo push
1243 # - and remote support phase
1260 # - and remote support phase
1244 # - and no changeset was pushed
1261 # - and no changeset was pushed
1245 # - and remote is publishing
1262 # - and remote is publishing
1246 # We may be in issue 3871 case!
1263 # We may be in issue 3871 case!
1247 # We drop the possible phase synchronisation done by
1264 # We drop the possible phase synchronisation done by
1248 # courtesy to publish changesets possibly locally draft
1265 # courtesy to publish changesets possibly locally draft
1249 # on the remote.
1266 # on the remote.
1250 remotephases = {b'publishing': b'True'}
1267 remotephases = {b'publishing': b'True'}
1251 if not remotephases: # old server or public only reply from non-publishing
1268 if not remotephases: # old server or public only reply from non-publishing
1252 _localphasemove(pushop, cheads)
1269 _localphasemove(pushop, cheads)
1253 # don't push any phase data as there is nothing to push
1270 # don't push any phase data as there is nothing to push
1254 else:
1271 else:
1255 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1272 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1256 pheads, droots = ana
1273 pheads, droots = ana
1257 ### Apply remote phase on local
1274 ### Apply remote phase on local
1258 if remotephases.get(b'publishing', False):
1275 if remotephases.get(b'publishing', False):
1259 _localphasemove(pushop, cheads)
1276 _localphasemove(pushop, cheads)
1260 else: # publish = False
1277 else: # publish = False
1261 _localphasemove(pushop, pheads)
1278 _localphasemove(pushop, pheads)
1262 _localphasemove(pushop, cheads, phases.draft)
1279 _localphasemove(pushop, cheads, phases.draft)
1263 ### Apply local phase on remote
1280 ### Apply local phase on remote
1264
1281
1265 if pushop.cgresult:
1282 if pushop.cgresult:
1266 if b'phases' in pushop.stepsdone:
1283 if b'phases' in pushop.stepsdone:
1267 # phases already pushed though bundle2
1284 # phases already pushed though bundle2
1268 return
1285 return
1269 outdated = pushop.outdatedphases
1286 outdated = pushop.outdatedphases
1270 else:
1287 else:
1271 outdated = pushop.fallbackoutdatedphases
1288 outdated = pushop.fallbackoutdatedphases
1272
1289
1273 pushop.stepsdone.add(b'phases')
1290 pushop.stepsdone.add(b'phases')
1274
1291
1275 # filter heads already turned public by the push
1292 # filter heads already turned public by the push
1276 outdated = [c for c in outdated if c.node() not in pheads]
1293 outdated = [c for c in outdated if c.node() not in pheads]
1277 # fallback to independent pushkey command
1294 # fallback to independent pushkey command
1278 for newremotehead in outdated:
1295 for newremotehead in outdated:
1279 with pushop.remote.commandexecutor() as e:
1296 with pushop.remote.commandexecutor() as e:
1280 r = e.callcommand(
1297 r = e.callcommand(
1281 b'pushkey',
1298 b'pushkey',
1282 {
1299 {
1283 b'namespace': b'phases',
1300 b'namespace': b'phases',
1284 b'key': newremotehead.hex(),
1301 b'key': newremotehead.hex(),
1285 b'old': b'%d' % phases.draft,
1302 b'old': b'%d' % phases.draft,
1286 b'new': b'%d' % phases.public,
1303 b'new': b'%d' % phases.public,
1287 },
1304 },
1288 ).result()
1305 ).result()
1289
1306
1290 if not r:
1307 if not r:
1291 pushop.ui.warn(
1308 pushop.ui.warn(
1292 _(b'updating %s to public failed!\n') % newremotehead
1309 _(b'updating %s to public failed!\n') % newremotehead
1293 )
1310 )
1294
1311
1295
1312
1296 def _localphasemove(pushop, nodes, phase=phases.public):
1313 def _localphasemove(pushop, nodes, phase=phases.public):
1297 """move <nodes> to <phase> in the local source repo"""
1314 """move <nodes> to <phase> in the local source repo"""
1298 if pushop.trmanager:
1315 if pushop.trmanager:
1299 phases.advanceboundary(
1316 phases.advanceboundary(
1300 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1317 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1301 )
1318 )
1302 else:
1319 else:
1303 # repo is not locked, do not change any phases!
1320 # repo is not locked, do not change any phases!
1304 # Informs the user that phases should have been moved when
1321 # Informs the user that phases should have been moved when
1305 # applicable.
1322 # applicable.
1306 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1323 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1307 phasestr = phases.phasenames[phase]
1324 phasestr = phases.phasenames[phase]
1308 if actualmoves:
1325 if actualmoves:
1309 pushop.ui.status(
1326 pushop.ui.status(
1310 _(
1327 _(
1311 b'cannot lock source repo, skipping '
1328 b'cannot lock source repo, skipping '
1312 b'local %s phase update\n'
1329 b'local %s phase update\n'
1313 )
1330 )
1314 % phasestr
1331 % phasestr
1315 )
1332 )
1316
1333
1317
1334
1318 def _pushobsolete(pushop):
1335 def _pushobsolete(pushop):
1319 """utility function to push obsolete markers to a remote"""
1336 """utility function to push obsolete markers to a remote"""
1320 if b'obsmarkers' in pushop.stepsdone:
1337 if b'obsmarkers' in pushop.stepsdone:
1321 return
1338 return
1322 repo = pushop.repo
1339 repo = pushop.repo
1323 remote = pushop.remote
1340 remote = pushop.remote
1324 pushop.stepsdone.add(b'obsmarkers')
1341 pushop.stepsdone.add(b'obsmarkers')
1325 if pushop.outobsmarkers:
1342 if pushop.outobsmarkers:
1326 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1343 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1327 rslts = []
1344 rslts = []
1328 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1345 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1329 remotedata = obsolete._pushkeyescape(markers)
1346 remotedata = obsolete._pushkeyescape(markers)
1330 for key in sorted(remotedata, reverse=True):
1347 for key in sorted(remotedata, reverse=True):
1331 # reverse sort to ensure we end with dump0
1348 # reverse sort to ensure we end with dump0
1332 data = remotedata[key]
1349 data = remotedata[key]
1333 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1350 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1334 if [r for r in rslts if not r]:
1351 if [r for r in rslts if not r]:
1335 msg = _(b'failed to push some obsolete markers!\n')
1352 msg = _(b'failed to push some obsolete markers!\n')
1336 repo.ui.warn(msg)
1353 repo.ui.warn(msg)
1337
1354
1338
1355
1339 def _pushbookmark(pushop):
1356 def _pushbookmark(pushop):
1340 """Update bookmark position on remote"""
1357 """Update bookmark position on remote"""
1341 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1358 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1342 return
1359 return
1343 pushop.stepsdone.add(b'bookmarks')
1360 pushop.stepsdone.add(b'bookmarks')
1344 ui = pushop.ui
1361 ui = pushop.ui
1345 remote = pushop.remote
1362 remote = pushop.remote
1346
1363
1347 for b, old, new in pushop.outbookmarks:
1364 for b, old, new in pushop.outbookmarks:
1348 action = b'update'
1365 action = b'update'
1349 if not old:
1366 if not old:
1350 action = b'export'
1367 action = b'export'
1351 elif not new:
1368 elif not new:
1352 action = b'delete'
1369 action = b'delete'
1353
1370
1354 with remote.commandexecutor() as e:
1371 with remote.commandexecutor() as e:
1355 r = e.callcommand(
1372 r = e.callcommand(
1356 b'pushkey',
1373 b'pushkey',
1357 {
1374 {
1358 b'namespace': b'bookmarks',
1375 b'namespace': b'bookmarks',
1359 b'key': b,
1376 b'key': b,
1360 b'old': hex(old),
1377 b'old': hex(old),
1361 b'new': hex(new),
1378 b'new': hex(new),
1362 },
1379 },
1363 ).result()
1380 ).result()
1364
1381
1365 if r:
1382 if r:
1366 ui.status(bookmsgmap[action][0] % b)
1383 ui.status(bookmsgmap[action][0] % b)
1367 else:
1384 else:
1368 ui.warn(bookmsgmap[action][1] % b)
1385 ui.warn(bookmsgmap[action][1] % b)
1369 # discovery can have set the value form invalid entry
1386 # discovery can have set the value form invalid entry
1370 if pushop.bkresult is not None:
1387 if pushop.bkresult is not None:
1371 pushop.bkresult = 1
1388 pushop.bkresult = 1
1372
1389
1373
1390
1374 class pulloperation:
1391 class pulloperation:
1375 """A object that represent a single pull operation
1392 """A object that represent a single pull operation
1376
1393
1377 It purpose is to carry pull related state and very common operation.
1394 It purpose is to carry pull related state and very common operation.
1378
1395
1379 A new should be created at the beginning of each pull and discarded
1396 A new should be created at the beginning of each pull and discarded
1380 afterward.
1397 afterward.
1381 """
1398 """
1382
1399
1383 def __init__(
1400 def __init__(
1384 self,
1401 self,
1385 repo,
1402 repo,
1386 remote,
1403 remote,
1387 heads=None,
1404 heads=None,
1388 force=False,
1405 force=False,
1389 bookmarks=(),
1406 bookmarks=(),
1390 remotebookmarks=None,
1407 remotebookmarks=None,
1391 streamclonerequested=None,
1408 streamclonerequested=None,
1392 includepats=None,
1409 includepats=None,
1393 excludepats=None,
1410 excludepats=None,
1394 depth=None,
1411 depth=None,
1395 path=None,
1412 path=None,
1396 ):
1413 ):
1397 # repo we pull into
1414 # repo we pull into
1398 self.repo = repo
1415 self.repo = repo
1399 # repo we pull from
1416 # repo we pull from
1400 self.remote = remote
1417 self.remote = remote
1401 # path object used to build this remote
1418 # path object used to build this remote
1402 #
1419 #
1403 # Ideally, the remote peer would carry that directly.
1420 # Ideally, the remote peer would carry that directly.
1404 self.remote_path = path
1421 self.remote_path = path
1405 # revision we try to pull (None is "all")
1422 # revision we try to pull (None is "all")
1406 self.heads = heads
1423 self.heads = heads
1407 # bookmark pulled explicitly
1424 # bookmark pulled explicitly
1408 self.explicitbookmarks = [
1425 self.explicitbookmarks = [
1409 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1426 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1410 ]
1427 ]
1411 # do we force pull?
1428 # do we force pull?
1412 self.force = force
1429 self.force = force
1413 # whether a streaming clone was requested
1430 # whether a streaming clone was requested
1414 self.streamclonerequested = streamclonerequested
1431 self.streamclonerequested = streamclonerequested
1415 # transaction manager
1432 # transaction manager
1416 self.trmanager = None
1433 self.trmanager = None
1417 # set of common changeset between local and remote before pull
1434 # set of common changeset between local and remote before pull
1418 self.common = None
1435 self.common = None
1419 # set of pulled head
1436 # set of pulled head
1420 self.rheads = None
1437 self.rheads = None
1421 # list of missing changeset to fetch remotely
1438 # list of missing changeset to fetch remotely
1422 self.fetch = None
1439 self.fetch = None
1423 # remote bookmarks data
1440 # remote bookmarks data
1424 self.remotebookmarks = remotebookmarks
1441 self.remotebookmarks = remotebookmarks
1425 # result of changegroup pulling (used as return code by pull)
1442 # result of changegroup pulling (used as return code by pull)
1426 self.cgresult = None
1443 self.cgresult = None
1427 # list of step already done
1444 # list of step already done
1428 self.stepsdone = set()
1445 self.stepsdone = set()
1429 # Whether we attempted a clone from pre-generated bundles.
1446 # Whether we attempted a clone from pre-generated bundles.
1430 self.clonebundleattempted = False
1447 self.clonebundleattempted = False
1431 # Set of file patterns to include.
1448 # Set of file patterns to include.
1432 self.includepats = includepats
1449 self.includepats = includepats
1433 # Set of file patterns to exclude.
1450 # Set of file patterns to exclude.
1434 self.excludepats = excludepats
1451 self.excludepats = excludepats
1435 # Number of ancestor changesets to pull from each pulled head.
1452 # Number of ancestor changesets to pull from each pulled head.
1436 self.depth = depth
1453 self.depth = depth
1437
1454
1438 @util.propertycache
1455 @util.propertycache
1439 def pulledsubset(self):
1456 def pulledsubset(self):
1440 """heads of the set of changeset target by the pull"""
1457 """heads of the set of changeset target by the pull"""
1441 # compute target subset
1458 # compute target subset
1442 if self.heads is None:
1459 if self.heads is None:
1443 # We pulled every thing possible
1460 # We pulled every thing possible
1444 # sync on everything common
1461 # sync on everything common
1445 c = set(self.common)
1462 c = set(self.common)
1446 ret = list(self.common)
1463 ret = list(self.common)
1447 for n in self.rheads:
1464 for n in self.rheads:
1448 if n not in c:
1465 if n not in c:
1449 ret.append(n)
1466 ret.append(n)
1450 return ret
1467 return ret
1451 else:
1468 else:
1452 # We pulled a specific subset
1469 # We pulled a specific subset
1453 # sync on this subset
1470 # sync on this subset
1454 return self.heads
1471 return self.heads
1455
1472
1456 @util.propertycache
1473 @util.propertycache
1457 def canusebundle2(self):
1474 def canusebundle2(self):
1458 return not _forcebundle1(self)
1475 return not _forcebundle1(self)
1459
1476
1460 @util.propertycache
1477 @util.propertycache
1461 def remotebundle2caps(self):
1478 def remotebundle2caps(self):
1462 return bundle2.bundle2caps(self.remote)
1479 return bundle2.bundle2caps(self.remote)
1463
1480
1464 def gettransaction(self):
1481 def gettransaction(self):
1465 # deprecated; talk to trmanager directly
1482 # deprecated; talk to trmanager directly
1466 return self.trmanager.transaction()
1483 return self.trmanager.transaction()
1467
1484
1468
1485
1469 class transactionmanager(util.transactional):
1486 class transactionmanager(util.transactional):
1470 """An object to manage the life cycle of a transaction
1487 """An object to manage the life cycle of a transaction
1471
1488
1472 It creates the transaction on demand and calls the appropriate hooks when
1489 It creates the transaction on demand and calls the appropriate hooks when
1473 closing the transaction."""
1490 closing the transaction."""
1474
1491
1475 def __init__(self, repo, source, url):
1492 def __init__(self, repo, source, url):
1476 self.repo = repo
1493 self.repo = repo
1477 self.source = source
1494 self.source = source
1478 self.url = url
1495 self.url = url
1479 self._tr = None
1496 self._tr = None
1480
1497
1481 def transaction(self):
1498 def transaction(self):
1482 """Return an open transaction object, constructing if necessary"""
1499 """Return an open transaction object, constructing if necessary"""
1483 if not self._tr:
1500 if not self._tr:
1484 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1501 trname = b'%s\n%s' % (self.source, urlutil.hidepassword(self.url))
1485 self._tr = self.repo.transaction(trname)
1502 self._tr = self.repo.transaction(trname)
1486 self._tr.hookargs[b'source'] = self.source
1503 self._tr.hookargs[b'source'] = self.source
1487 self._tr.hookargs[b'url'] = self.url
1504 self._tr.hookargs[b'url'] = self.url
1488 return self._tr
1505 return self._tr
1489
1506
1490 def close(self):
1507 def close(self):
1491 """close transaction if created"""
1508 """close transaction if created"""
1492 if self._tr is not None:
1509 if self._tr is not None:
1493 self._tr.close()
1510 self._tr.close()
1494
1511
1495 def release(self):
1512 def release(self):
1496 """release transaction if created"""
1513 """release transaction if created"""
1497 if self._tr is not None:
1514 if self._tr is not None:
1498 self._tr.release()
1515 self._tr.release()
1499
1516
1500
1517
1501 def listkeys(remote, namespace):
1518 def listkeys(remote, namespace):
1502 with remote.commandexecutor() as e:
1519 with remote.commandexecutor() as e:
1503 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1520 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1504
1521
1505
1522
1506 def _fullpullbundle2(repo, pullop):
1523 def _fullpullbundle2(repo, pullop):
1507 # The server may send a partial reply, i.e. when inlining
1524 # The server may send a partial reply, i.e. when inlining
1508 # pre-computed bundles. In that case, update the common
1525 # pre-computed bundles. In that case, update the common
1509 # set based on the results and pull another bundle.
1526 # set based on the results and pull another bundle.
1510 #
1527 #
1511 # There are two indicators that the process is finished:
1528 # There are two indicators that the process is finished:
1512 # - no changeset has been added, or
1529 # - no changeset has been added, or
1513 # - all remote heads are known locally.
1530 # - all remote heads are known locally.
1514 # The head check must use the unfiltered view as obsoletion
1531 # The head check must use the unfiltered view as obsoletion
1515 # markers can hide heads.
1532 # markers can hide heads.
1516 unfi = repo.unfiltered()
1533 unfi = repo.unfiltered()
1517 unficl = unfi.changelog
1534 unficl = unfi.changelog
1518
1535
1519 def headsofdiff(h1, h2):
1536 def headsofdiff(h1, h2):
1520 """Returns heads(h1 % h2)"""
1537 """Returns heads(h1 % h2)"""
1521 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1538 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1522 return {ctx.node() for ctx in res}
1539 return {ctx.node() for ctx in res}
1523
1540
1524 def headsofunion(h1, h2):
1541 def headsofunion(h1, h2):
1525 """Returns heads((h1 + h2) - null)"""
1542 """Returns heads((h1 + h2) - null)"""
1526 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1543 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1527 return {ctx.node() for ctx in res}
1544 return {ctx.node() for ctx in res}
1528
1545
1529 while True:
1546 while True:
1530 old_heads = unficl.heads()
1547 old_heads = unficl.heads()
1531 clstart = len(unficl)
1548 clstart = len(unficl)
1532 _pullbundle2(pullop)
1549 _pullbundle2(pullop)
1533 if requirements.NARROW_REQUIREMENT in repo.requirements:
1550 if requirements.NARROW_REQUIREMENT in repo.requirements:
1534 # XXX narrow clones filter the heads on the server side during
1551 # XXX narrow clones filter the heads on the server side during
1535 # XXX getbundle and result in partial replies as well.
1552 # XXX getbundle and result in partial replies as well.
1536 # XXX Disable pull bundles in this case as band aid to avoid
1553 # XXX Disable pull bundles in this case as band aid to avoid
1537 # XXX extra round trips.
1554 # XXX extra round trips.
1538 break
1555 break
1539 if clstart == len(unficl):
1556 if clstart == len(unficl):
1540 break
1557 break
1541 if all(unficl.hasnode(n) for n in pullop.rheads):
1558 if all(unficl.hasnode(n) for n in pullop.rheads):
1542 break
1559 break
1543 new_heads = headsofdiff(unficl.heads(), old_heads)
1560 new_heads = headsofdiff(unficl.heads(), old_heads)
1544 pullop.common = headsofunion(new_heads, pullop.common)
1561 pullop.common = headsofunion(new_heads, pullop.common)
1545 pullop.rheads = set(pullop.rheads) - pullop.common
1562 pullop.rheads = set(pullop.rheads) - pullop.common
1546
1563
1547
1564
1548 def add_confirm_callback(repo, pullop):
1565 def add_confirm_callback(repo, pullop):
1549 """adds a finalize callback to transaction which can be used to show stats
1566 """adds a finalize callback to transaction which can be used to show stats
1550 to user and confirm the pull before committing transaction"""
1567 to user and confirm the pull before committing transaction"""
1551
1568
1552 tr = pullop.trmanager.transaction()
1569 tr = pullop.trmanager.transaction()
1553 scmutil.registersummarycallback(
1570 scmutil.registersummarycallback(
1554 repo, tr, txnname=b'pull', as_validator=True
1571 repo, tr, txnname=b'pull', as_validator=True
1555 )
1572 )
1556 reporef = weakref.ref(repo.unfiltered())
1573 reporef = weakref.ref(repo.unfiltered())
1557
1574
1558 def prompt(tr):
1575 def prompt(tr):
1559 repo = reporef()
1576 repo = reporef()
1560 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1577 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1561 if repo.ui.promptchoice(cm):
1578 if repo.ui.promptchoice(cm):
1562 raise error.Abort(b"user aborted")
1579 raise error.Abort(b"user aborted")
1563
1580
1564 tr.addvalidator(b'900-pull-prompt', prompt)
1581 tr.addvalidator(b'900-pull-prompt', prompt)
1565
1582
1566
1583
1567 def pull(
1584 def pull(
1568 repo,
1585 repo,
1569 remote,
1586 remote,
1570 path=None,
1587 path=None,
1571 heads=None,
1588 heads=None,
1572 force=False,
1589 force=False,
1573 bookmarks=(),
1590 bookmarks=(),
1574 opargs=None,
1591 opargs=None,
1575 streamclonerequested=None,
1592 streamclonerequested=None,
1576 includepats=None,
1593 includepats=None,
1577 excludepats=None,
1594 excludepats=None,
1578 depth=None,
1595 depth=None,
1579 confirm=None,
1596 confirm=None,
1580 ):
1597 ):
1581 """Fetch repository data from a remote.
1598 """Fetch repository data from a remote.
1582
1599
1583 This is the main function used to retrieve data from a remote repository.
1600 This is the main function used to retrieve data from a remote repository.
1584
1601
1585 ``repo`` is the local repository to clone into.
1602 ``repo`` is the local repository to clone into.
1586 ``remote`` is a peer instance.
1603 ``remote`` is a peer instance.
1587 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1604 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1588 default) means to pull everything from the remote.
1605 default) means to pull everything from the remote.
1589 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1606 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1590 default, all remote bookmarks are pulled.
1607 default, all remote bookmarks are pulled.
1591 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1608 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1592 initialization.
1609 initialization.
1593 ``streamclonerequested`` is a boolean indicating whether a "streaming
1610 ``streamclonerequested`` is a boolean indicating whether a "streaming
1594 clone" is requested. A "streaming clone" is essentially a raw file copy
1611 clone" is requested. A "streaming clone" is essentially a raw file copy
1595 of revlogs from the server. This only works when the local repository is
1612 of revlogs from the server. This only works when the local repository is
1596 empty. The default value of ``None`` means to respect the server
1613 empty. The default value of ``None`` means to respect the server
1597 configuration for preferring stream clones.
1614 configuration for preferring stream clones.
1598 ``includepats`` and ``excludepats`` define explicit file patterns to
1615 ``includepats`` and ``excludepats`` define explicit file patterns to
1599 include and exclude in storage, respectively. If not defined, narrow
1616 include and exclude in storage, respectively. If not defined, narrow
1600 patterns from the repo instance are used, if available.
1617 patterns from the repo instance are used, if available.
1601 ``depth`` is an integer indicating the DAG depth of history we're
1618 ``depth`` is an integer indicating the DAG depth of history we're
1602 interested in. If defined, for each revision specified in ``heads``, we
1619 interested in. If defined, for each revision specified in ``heads``, we
1603 will fetch up to this many of its ancestors and data associated with them.
1620 will fetch up to this many of its ancestors and data associated with them.
1604 ``confirm`` is a boolean indicating whether the pull should be confirmed
1621 ``confirm`` is a boolean indicating whether the pull should be confirmed
1605 before committing the transaction. This overrides HGPLAIN.
1622 before committing the transaction. This overrides HGPLAIN.
1606
1623
1607 Returns the ``pulloperation`` created for this pull.
1624 Returns the ``pulloperation`` created for this pull.
1608 """
1625 """
1609 if opargs is None:
1626 if opargs is None:
1610 opargs = {}
1627 opargs = {}
1611
1628
1612 # We allow the narrow patterns to be passed in explicitly to provide more
1629 # We allow the narrow patterns to be passed in explicitly to provide more
1613 # flexibility for API consumers.
1630 # flexibility for API consumers.
1614 if includepats or excludepats:
1631 if includepats or excludepats:
1615 includepats = includepats or set()
1632 includepats = includepats or set()
1616 excludepats = excludepats or set()
1633 excludepats = excludepats or set()
1617 else:
1634 else:
1618 includepats, excludepats = repo.narrowpats
1635 includepats, excludepats = repo.narrowpats
1619
1636
1620 narrowspec.validatepatterns(includepats)
1637 narrowspec.validatepatterns(includepats)
1621 narrowspec.validatepatterns(excludepats)
1638 narrowspec.validatepatterns(excludepats)
1622
1639
1623 pullop = pulloperation(
1640 pullop = pulloperation(
1624 repo,
1641 repo,
1625 remote,
1642 remote,
1626 path=path,
1643 path=path,
1627 heads=heads,
1644 heads=heads,
1628 force=force,
1645 force=force,
1629 bookmarks=bookmarks,
1646 bookmarks=bookmarks,
1630 streamclonerequested=streamclonerequested,
1647 streamclonerequested=streamclonerequested,
1631 includepats=includepats,
1648 includepats=includepats,
1632 excludepats=excludepats,
1649 excludepats=excludepats,
1633 depth=depth,
1650 depth=depth,
1634 **pycompat.strkwargs(opargs)
1651 **pycompat.strkwargs(opargs)
1635 )
1652 )
1636
1653
1637 peerlocal = pullop.remote.local()
1654 peerlocal = pullop.remote.local()
1638 if peerlocal:
1655 if peerlocal:
1639 missing = set(peerlocal.requirements) - pullop.repo.supported
1656 missing = set(peerlocal.requirements) - pullop.repo.supported
1640 if missing:
1657 if missing:
1641 msg = _(
1658 msg = _(
1642 b"required features are not"
1659 b"required features are not"
1643 b" supported in the destination:"
1660 b" supported in the destination:"
1644 b" %s"
1661 b" %s"
1645 ) % (b', '.join(sorted(missing)))
1662 ) % (b', '.join(sorted(missing)))
1646 raise error.Abort(msg)
1663 raise error.Abort(msg)
1647
1664
1648 for category in repo._wanted_sidedata:
1665 for category in repo._wanted_sidedata:
1649 # Check that a computer is registered for that category for at least
1666 # Check that a computer is registered for that category for at least
1650 # one revlog kind.
1667 # one revlog kind.
1651 for kind, computers in repo._sidedata_computers.items():
1668 for kind, computers in repo._sidedata_computers.items():
1652 if computers.get(category):
1669 if computers.get(category):
1653 break
1670 break
1654 else:
1671 else:
1655 # This should never happen since repos are supposed to be able to
1672 # This should never happen since repos are supposed to be able to
1656 # generate the sidedata they require.
1673 # generate the sidedata they require.
1657 raise error.ProgrammingError(
1674 raise error.ProgrammingError(
1658 _(
1675 _(
1659 b'sidedata category requested by local side without local'
1676 b'sidedata category requested by local side without local'
1660 b"support: '%s'"
1677 b"support: '%s'"
1661 )
1678 )
1662 % pycompat.bytestr(category)
1679 % pycompat.bytestr(category)
1663 )
1680 )
1664
1681
1665 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1682 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1666 wlock = util.nullcontextmanager()
1683 wlock = util.nullcontextmanager()
1667 if not bookmod.bookmarksinstore(repo):
1684 if not bookmod.bookmarksinstore(repo):
1668 wlock = repo.wlock()
1685 wlock = repo.wlock()
1669 with wlock, repo.lock(), pullop.trmanager:
1686 with wlock, repo.lock(), pullop.trmanager:
1670 if confirm or (
1687 if confirm or (
1671 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1688 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1672 ):
1689 ):
1673 add_confirm_callback(repo, pullop)
1690 add_confirm_callback(repo, pullop)
1674
1691
1675 # This should ideally be in _pullbundle2(). However, it needs to run
1692 # This should ideally be in _pullbundle2(). However, it needs to run
1676 # before discovery to avoid extra work.
1693 # before discovery to avoid extra work.
1677 _maybeapplyclonebundle(pullop)
1694 _maybeapplyclonebundle(pullop)
1678 streamclone.maybeperformlegacystreamclone(pullop)
1695 streamclone.maybeperformlegacystreamclone(pullop)
1679 _pulldiscovery(pullop)
1696 _pulldiscovery(pullop)
1680 if pullop.canusebundle2:
1697 if pullop.canusebundle2:
1681 _fullpullbundle2(repo, pullop)
1698 _fullpullbundle2(repo, pullop)
1682 _pullchangeset(pullop)
1699 _pullchangeset(pullop)
1683 _pullphase(pullop)
1700 _pullphase(pullop)
1684 _pullbookmarks(pullop)
1701 _pullbookmarks(pullop)
1685 _pullobsolete(pullop)
1702 _pullobsolete(pullop)
1686
1703
1687 # storing remotenames
1704 # storing remotenames
1688 if repo.ui.configbool(b'experimental', b'remotenames'):
1705 if repo.ui.configbool(b'experimental', b'remotenames'):
1689 logexchange.pullremotenames(repo, remote)
1706 logexchange.pullremotenames(repo, remote)
1690
1707
1691 return pullop
1708 return pullop
1692
1709
1693
1710
1694 # list of steps to perform discovery before pull
1711 # list of steps to perform discovery before pull
1695 pulldiscoveryorder = []
1712 pulldiscoveryorder = []
1696
1713
1697 # Mapping between step name and function
1714 # Mapping between step name and function
1698 #
1715 #
1699 # This exists to help extensions wrap steps if necessary
1716 # This exists to help extensions wrap steps if necessary
1700 pulldiscoverymapping = {}
1717 pulldiscoverymapping = {}
1701
1718
1702
1719
1703 def pulldiscovery(stepname):
1720 def pulldiscovery(stepname):
1704 """decorator for function performing discovery before pull
1721 """decorator for function performing discovery before pull
1705
1722
1706 The function is added to the step -> function mapping and appended to the
1723 The function is added to the step -> function mapping and appended to the
1707 list of steps. Beware that decorated function will be added in order (this
1724 list of steps. Beware that decorated function will be added in order (this
1708 may matter).
1725 may matter).
1709
1726
1710 You can only use this decorator for a new step, if you want to wrap a step
1727 You can only use this decorator for a new step, if you want to wrap a step
1711 from an extension, change the pulldiscovery dictionary directly."""
1728 from an extension, change the pulldiscovery dictionary directly."""
1712
1729
1713 def dec(func):
1730 def dec(func):
1714 assert stepname not in pulldiscoverymapping
1731 assert stepname not in pulldiscoverymapping
1715 pulldiscoverymapping[stepname] = func
1732 pulldiscoverymapping[stepname] = func
1716 pulldiscoveryorder.append(stepname)
1733 pulldiscoveryorder.append(stepname)
1717 return func
1734 return func
1718
1735
1719 return dec
1736 return dec
1720
1737
1721
1738
1722 def _pulldiscovery(pullop):
1739 def _pulldiscovery(pullop):
1723 """Run all discovery steps"""
1740 """Run all discovery steps"""
1724 for stepname in pulldiscoveryorder:
1741 for stepname in pulldiscoveryorder:
1725 step = pulldiscoverymapping[stepname]
1742 step = pulldiscoverymapping[stepname]
1726 step(pullop)
1743 step(pullop)
1727
1744
1728
1745
1729 @pulldiscovery(b'b1:bookmarks')
1746 @pulldiscovery(b'b1:bookmarks')
1730 def _pullbookmarkbundle1(pullop):
1747 def _pullbookmarkbundle1(pullop):
1731 """fetch bookmark data in bundle1 case
1748 """fetch bookmark data in bundle1 case
1732
1749
1733 If not using bundle2, we have to fetch bookmarks before changeset
1750 If not using bundle2, we have to fetch bookmarks before changeset
1734 discovery to reduce the chance and impact of race conditions."""
1751 discovery to reduce the chance and impact of race conditions."""
1735 if pullop.remotebookmarks is not None:
1752 if pullop.remotebookmarks is not None:
1736 return
1753 return
1737 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1754 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1738 # all known bundle2 servers now support listkeys, but lets be nice with
1755 # all known bundle2 servers now support listkeys, but lets be nice with
1739 # new implementation.
1756 # new implementation.
1740 return
1757 return
1741 books = listkeys(pullop.remote, b'bookmarks')
1758 books = listkeys(pullop.remote, b'bookmarks')
1742 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1759 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1743
1760
1744
1761
1745 @pulldiscovery(b'changegroup')
1762 @pulldiscovery(b'changegroup')
1746 def _pulldiscoverychangegroup(pullop):
1763 def _pulldiscoverychangegroup(pullop):
1747 """discovery phase for the pull
1764 """discovery phase for the pull
1748
1765
1749 Current handle changeset discovery only, will change handle all discovery
1766 Current handle changeset discovery only, will change handle all discovery
1750 at some point."""
1767 at some point."""
1751 tmp = discovery.findcommonincoming(
1768 tmp = discovery.findcommonincoming(
1752 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1769 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1753 )
1770 )
1754 common, fetch, rheads = tmp
1771 common, fetch, rheads = tmp
1755 has_node = pullop.repo.unfiltered().changelog.index.has_node
1772 has_node = pullop.repo.unfiltered().changelog.index.has_node
1756 if fetch and rheads:
1773 if fetch and rheads:
1757 # If a remote heads is filtered locally, put in back in common.
1774 # If a remote heads is filtered locally, put in back in common.
1758 #
1775 #
1759 # This is a hackish solution to catch most of "common but locally
1776 # This is a hackish solution to catch most of "common but locally
1760 # hidden situation". We do not performs discovery on unfiltered
1777 # hidden situation". We do not performs discovery on unfiltered
1761 # repository because it end up doing a pathological amount of round
1778 # repository because it end up doing a pathological amount of round
1762 # trip for w huge amount of changeset we do not care about.
1779 # trip for w huge amount of changeset we do not care about.
1763 #
1780 #
1764 # If a set of such "common but filtered" changeset exist on the server
1781 # If a set of such "common but filtered" changeset exist on the server
1765 # but are not including a remote heads, we'll not be able to detect it,
1782 # but are not including a remote heads, we'll not be able to detect it,
1766 scommon = set(common)
1783 scommon = set(common)
1767 for n in rheads:
1784 for n in rheads:
1768 if has_node(n):
1785 if has_node(n):
1769 if n not in scommon:
1786 if n not in scommon:
1770 common.append(n)
1787 common.append(n)
1771 if set(rheads).issubset(set(common)):
1788 if set(rheads).issubset(set(common)):
1772 fetch = []
1789 fetch = []
1773 pullop.common = common
1790 pullop.common = common
1774 pullop.fetch = fetch
1791 pullop.fetch = fetch
1775 pullop.rheads = rheads
1792 pullop.rheads = rheads
1776
1793
1777
1794
1778 def _pullbundle2(pullop):
1795 def _pullbundle2(pullop):
1779 """pull data using bundle2
1796 """pull data using bundle2
1780
1797
1781 For now, the only supported data are changegroup."""
1798 For now, the only supported data are changegroup."""
1782 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1799 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1783
1800
1784 # make ui easier to access
1801 # make ui easier to access
1785 ui = pullop.repo.ui
1802 ui = pullop.repo.ui
1786
1803
1787 # At the moment we don't do stream clones over bundle2. If that is
1804 # At the moment we don't do stream clones over bundle2. If that is
1788 # implemented then here's where the check for that will go.
1805 # implemented then here's where the check for that will go.
1789 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1806 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1790
1807
1791 # declare pull perimeters
1808 # declare pull perimeters
1792 kwargs[b'common'] = pullop.common
1809 kwargs[b'common'] = pullop.common
1793 kwargs[b'heads'] = pullop.heads or pullop.rheads
1810 kwargs[b'heads'] = pullop.heads or pullop.rheads
1794
1811
1795 # check server supports narrow and then adding includepats and excludepats
1812 # check server supports narrow and then adding includepats and excludepats
1796 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1813 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1797 if servernarrow and pullop.includepats:
1814 if servernarrow and pullop.includepats:
1798 kwargs[b'includepats'] = pullop.includepats
1815 kwargs[b'includepats'] = pullop.includepats
1799 if servernarrow and pullop.excludepats:
1816 if servernarrow and pullop.excludepats:
1800 kwargs[b'excludepats'] = pullop.excludepats
1817 kwargs[b'excludepats'] = pullop.excludepats
1801
1818
1802 if streaming:
1819 if streaming:
1803 kwargs[b'cg'] = False
1820 kwargs[b'cg'] = False
1804 kwargs[b'stream'] = True
1821 kwargs[b'stream'] = True
1805 pullop.stepsdone.add(b'changegroup')
1822 pullop.stepsdone.add(b'changegroup')
1806 pullop.stepsdone.add(b'phases')
1823 pullop.stepsdone.add(b'phases')
1807
1824
1808 else:
1825 else:
1809 # pulling changegroup
1826 # pulling changegroup
1810 pullop.stepsdone.add(b'changegroup')
1827 pullop.stepsdone.add(b'changegroup')
1811
1828
1812 kwargs[b'cg'] = pullop.fetch
1829 kwargs[b'cg'] = pullop.fetch
1813
1830
1814 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1831 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1815 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1832 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1816 if not legacyphase and hasbinaryphase:
1833 if not legacyphase and hasbinaryphase:
1817 kwargs[b'phases'] = True
1834 kwargs[b'phases'] = True
1818 pullop.stepsdone.add(b'phases')
1835 pullop.stepsdone.add(b'phases')
1819
1836
1820 if b'listkeys' in pullop.remotebundle2caps:
1837 if b'listkeys' in pullop.remotebundle2caps:
1821 if b'phases' not in pullop.stepsdone:
1838 if b'phases' not in pullop.stepsdone:
1822 kwargs[b'listkeys'] = [b'phases']
1839 kwargs[b'listkeys'] = [b'phases']
1823
1840
1824 bookmarksrequested = False
1841 bookmarksrequested = False
1825 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1842 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1826 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1843 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1827
1844
1828 if pullop.remotebookmarks is not None:
1845 if pullop.remotebookmarks is not None:
1829 pullop.stepsdone.add(b'request-bookmarks')
1846 pullop.stepsdone.add(b'request-bookmarks')
1830
1847
1831 if (
1848 if (
1832 b'request-bookmarks' not in pullop.stepsdone
1849 b'request-bookmarks' not in pullop.stepsdone
1833 and pullop.remotebookmarks is None
1850 and pullop.remotebookmarks is None
1834 and not legacybookmark
1851 and not legacybookmark
1835 and hasbinarybook
1852 and hasbinarybook
1836 ):
1853 ):
1837 kwargs[b'bookmarks'] = True
1854 kwargs[b'bookmarks'] = True
1838 bookmarksrequested = True
1855 bookmarksrequested = True
1839
1856
1840 if b'listkeys' in pullop.remotebundle2caps:
1857 if b'listkeys' in pullop.remotebundle2caps:
1841 if b'request-bookmarks' not in pullop.stepsdone:
1858 if b'request-bookmarks' not in pullop.stepsdone:
1842 # make sure to always includes bookmark data when migrating
1859 # make sure to always includes bookmark data when migrating
1843 # `hg incoming --bundle` to using this function.
1860 # `hg incoming --bundle` to using this function.
1844 pullop.stepsdone.add(b'request-bookmarks')
1861 pullop.stepsdone.add(b'request-bookmarks')
1845 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1862 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1846
1863
1847 # If this is a full pull / clone and the server supports the clone bundles
1864 # If this is a full pull / clone and the server supports the clone bundles
1848 # feature, tell the server whether we attempted a clone bundle. The
1865 # feature, tell the server whether we attempted a clone bundle. The
1849 # presence of this flag indicates the client supports clone bundles. This
1866 # presence of this flag indicates the client supports clone bundles. This
1850 # will enable the server to treat clients that support clone bundles
1867 # will enable the server to treat clients that support clone bundles
1851 # differently from those that don't.
1868 # differently from those that don't.
1852 if (
1869 if (
1853 pullop.remote.capable(b'clonebundles')
1870 pullop.remote.capable(b'clonebundles')
1854 and pullop.heads is None
1871 and pullop.heads is None
1855 and list(pullop.common) == [pullop.repo.nullid]
1872 and list(pullop.common) == [pullop.repo.nullid]
1856 ):
1873 ):
1857 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1874 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1858
1875
1859 if streaming:
1876 if streaming:
1860 pullop.repo.ui.status(_(b'streaming all changes\n'))
1877 pullop.repo.ui.status(_(b'streaming all changes\n'))
1861 elif not pullop.fetch:
1878 elif not pullop.fetch:
1862 pullop.repo.ui.status(_(b"no changes found\n"))
1879 pullop.repo.ui.status(_(b"no changes found\n"))
1863 pullop.cgresult = 0
1880 pullop.cgresult = 0
1864 else:
1881 else:
1865 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1882 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1866 pullop.repo.ui.status(_(b"requesting all changes\n"))
1883 pullop.repo.ui.status(_(b"requesting all changes\n"))
1867 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1884 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1868 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1885 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1869 if obsolete.commonversion(remoteversions) is not None:
1886 if obsolete.commonversion(remoteversions) is not None:
1870 kwargs[b'obsmarkers'] = True
1887 kwargs[b'obsmarkers'] = True
1871 pullop.stepsdone.add(b'obsmarkers')
1888 pullop.stepsdone.add(b'obsmarkers')
1872 _pullbundle2extraprepare(pullop, kwargs)
1889 _pullbundle2extraprepare(pullop, kwargs)
1873
1890
1874 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1891 remote_sidedata = bundle2.read_remote_wanted_sidedata(pullop.remote)
1875 if remote_sidedata:
1892 if remote_sidedata:
1876 kwargs[b'remote_sidedata'] = remote_sidedata
1893 kwargs[b'remote_sidedata'] = remote_sidedata
1877
1894
1878 with pullop.remote.commandexecutor() as e:
1895 with pullop.remote.commandexecutor() as e:
1879 args = dict(kwargs)
1896 args = dict(kwargs)
1880 args[b'source'] = b'pull'
1897 args[b'source'] = b'pull'
1881 bundle = e.callcommand(b'getbundle', args).result()
1898 bundle = e.callcommand(b'getbundle', args).result()
1882
1899
1883 try:
1900 try:
1884 op = bundle2.bundleoperation(
1901 op = bundle2.bundleoperation(
1885 pullop.repo, pullop.gettransaction, source=b'pull'
1902 pullop.repo, pullop.gettransaction, source=b'pull'
1886 )
1903 )
1887 op.modes[b'bookmarks'] = b'records'
1904 op.modes[b'bookmarks'] = b'records'
1888 bundle2.processbundle(pullop.repo, bundle, op=op)
1905 bundle2.processbundle(pullop.repo, bundle, op=op)
1889 except bundle2.AbortFromPart as exc:
1906 except bundle2.AbortFromPart as exc:
1890 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1907 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1891 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1908 raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
1892 except error.BundleValueError as exc:
1909 except error.BundleValueError as exc:
1893 raise error.RemoteError(_(b'missing support for %s') % exc)
1910 raise error.RemoteError(_(b'missing support for %s') % exc)
1894
1911
1895 if pullop.fetch:
1912 if pullop.fetch:
1896 pullop.cgresult = bundle2.combinechangegroupresults(op)
1913 pullop.cgresult = bundle2.combinechangegroupresults(op)
1897
1914
1898 # processing phases change
1915 # processing phases change
1899 for namespace, value in op.records[b'listkeys']:
1916 for namespace, value in op.records[b'listkeys']:
1900 if namespace == b'phases':
1917 if namespace == b'phases':
1901 _pullapplyphases(pullop, value)
1918 _pullapplyphases(pullop, value)
1902
1919
1903 # processing bookmark update
1920 # processing bookmark update
1904 if bookmarksrequested:
1921 if bookmarksrequested:
1905 books = {}
1922 books = {}
1906 for record in op.records[b'bookmarks']:
1923 for record in op.records[b'bookmarks']:
1907 books[record[b'bookmark']] = record[b"node"]
1924 books[record[b'bookmark']] = record[b"node"]
1908 pullop.remotebookmarks = books
1925 pullop.remotebookmarks = books
1909 else:
1926 else:
1910 for namespace, value in op.records[b'listkeys']:
1927 for namespace, value in op.records[b'listkeys']:
1911 if namespace == b'bookmarks':
1928 if namespace == b'bookmarks':
1912 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1929 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1913
1930
1914 # bookmark data were either already there or pulled in the bundle
1931 # bookmark data were either already there or pulled in the bundle
1915 if pullop.remotebookmarks is not None:
1932 if pullop.remotebookmarks is not None:
1916 _pullbookmarks(pullop)
1933 _pullbookmarks(pullop)
1917
1934
1918
1935
1919 def _pullbundle2extraprepare(pullop, kwargs):
1936 def _pullbundle2extraprepare(pullop, kwargs):
1920 """hook function so that extensions can extend the getbundle call"""
1937 """hook function so that extensions can extend the getbundle call"""
1921
1938
1922
1939
1923 def _pullchangeset(pullop):
1940 def _pullchangeset(pullop):
1924 """pull changeset from unbundle into the local repo"""
1941 """pull changeset from unbundle into the local repo"""
1925 # We delay the open of the transaction as late as possible so we
1942 # We delay the open of the transaction as late as possible so we
1926 # don't open transaction for nothing or you break future useful
1943 # don't open transaction for nothing or you break future useful
1927 # rollback call
1944 # rollback call
1928 if b'changegroup' in pullop.stepsdone:
1945 if b'changegroup' in pullop.stepsdone:
1929 return
1946 return
1930 pullop.stepsdone.add(b'changegroup')
1947 pullop.stepsdone.add(b'changegroup')
1931 if not pullop.fetch:
1948 if not pullop.fetch:
1932 pullop.repo.ui.status(_(b"no changes found\n"))
1949 pullop.repo.ui.status(_(b"no changes found\n"))
1933 pullop.cgresult = 0
1950 pullop.cgresult = 0
1934 return
1951 return
1935 tr = pullop.gettransaction()
1952 tr = pullop.gettransaction()
1936 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1953 if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
1937 pullop.repo.ui.status(_(b"requesting all changes\n"))
1954 pullop.repo.ui.status(_(b"requesting all changes\n"))
1938 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1955 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1939 # issue1320, avoid a race if remote changed after discovery
1956 # issue1320, avoid a race if remote changed after discovery
1940 pullop.heads = pullop.rheads
1957 pullop.heads = pullop.rheads
1941
1958
1942 if pullop.remote.capable(b'getbundle'):
1959 if pullop.remote.capable(b'getbundle'):
1943 # TODO: get bundlecaps from remote
1960 # TODO: get bundlecaps from remote
1944 cg = pullop.remote.getbundle(
1961 cg = pullop.remote.getbundle(
1945 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1962 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1946 )
1963 )
1947 elif pullop.heads is None:
1964 elif pullop.heads is None:
1948 with pullop.remote.commandexecutor() as e:
1965 with pullop.remote.commandexecutor() as e:
1949 cg = e.callcommand(
1966 cg = e.callcommand(
1950 b'changegroup',
1967 b'changegroup',
1951 {
1968 {
1952 b'nodes': pullop.fetch,
1969 b'nodes': pullop.fetch,
1953 b'source': b'pull',
1970 b'source': b'pull',
1954 },
1971 },
1955 ).result()
1972 ).result()
1956
1973
1957 elif not pullop.remote.capable(b'changegroupsubset'):
1974 elif not pullop.remote.capable(b'changegroupsubset'):
1958 raise error.Abort(
1975 raise error.Abort(
1959 _(
1976 _(
1960 b"partial pull cannot be done because "
1977 b"partial pull cannot be done because "
1961 b"other repository doesn't support "
1978 b"other repository doesn't support "
1962 b"changegroupsubset."
1979 b"changegroupsubset."
1963 )
1980 )
1964 )
1981 )
1965 else:
1982 else:
1966 with pullop.remote.commandexecutor() as e:
1983 with pullop.remote.commandexecutor() as e:
1967 cg = e.callcommand(
1984 cg = e.callcommand(
1968 b'changegroupsubset',
1985 b'changegroupsubset',
1969 {
1986 {
1970 b'bases': pullop.fetch,
1987 b'bases': pullop.fetch,
1971 b'heads': pullop.heads,
1988 b'heads': pullop.heads,
1972 b'source': b'pull',
1989 b'source': b'pull',
1973 },
1990 },
1974 ).result()
1991 ).result()
1975
1992
1976 bundleop = bundle2.applybundle(
1993 bundleop = bundle2.applybundle(
1977 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1994 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1978 )
1995 )
1979 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1996 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1980
1997
1981
1998
1982 def _pullphase(pullop):
1999 def _pullphase(pullop):
1983 # Get remote phases data from remote
2000 # Get remote phases data from remote
1984 if b'phases' in pullop.stepsdone:
2001 if b'phases' in pullop.stepsdone:
1985 return
2002 return
1986 remotephases = listkeys(pullop.remote, b'phases')
2003 remotephases = listkeys(pullop.remote, b'phases')
1987 _pullapplyphases(pullop, remotephases)
2004 _pullapplyphases(pullop, remotephases)
1988
2005
1989
2006
1990 def _pullapplyphases(pullop, remotephases):
2007 def _pullapplyphases(pullop, remotephases):
1991 """apply phase movement from observed remote state"""
2008 """apply phase movement from observed remote state"""
1992 if b'phases' in pullop.stepsdone:
2009 if b'phases' in pullop.stepsdone:
1993 return
2010 return
1994 pullop.stepsdone.add(b'phases')
2011 pullop.stepsdone.add(b'phases')
1995 publishing = bool(remotephases.get(b'publishing', False))
2012 publishing = bool(remotephases.get(b'publishing', False))
1996 if remotephases and not publishing:
2013 if remotephases and not publishing:
1997 # remote is new and non-publishing
2014 # remote is new and non-publishing
1998 pheads, _dr = phases.analyzeremotephases(
2015 pheads, _dr = phases.analyzeremotephases(
1999 pullop.repo, pullop.pulledsubset, remotephases
2016 pullop.repo, pullop.pulledsubset, remotephases
2000 )
2017 )
2001 dheads = pullop.pulledsubset
2018 dheads = pullop.pulledsubset
2002 else:
2019 else:
2003 # Remote is old or publishing all common changesets
2020 # Remote is old or publishing all common changesets
2004 # should be seen as public
2021 # should be seen as public
2005 pheads = pullop.pulledsubset
2022 pheads = pullop.pulledsubset
2006 dheads = []
2023 dheads = []
2007 unfi = pullop.repo.unfiltered()
2024 unfi = pullop.repo.unfiltered()
2008 phase = unfi._phasecache.phase
2025 phase = unfi._phasecache.phase
2009 rev = unfi.changelog.index.get_rev
2026 rev = unfi.changelog.index.get_rev
2010 public = phases.public
2027 public = phases.public
2011 draft = phases.draft
2028 draft = phases.draft
2012
2029
2013 # exclude changesets already public locally and update the others
2030 # exclude changesets already public locally and update the others
2014 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2031 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2015 if pheads:
2032 if pheads:
2016 tr = pullop.gettransaction()
2033 tr = pullop.gettransaction()
2017 phases.advanceboundary(pullop.repo, tr, public, pheads)
2034 phases.advanceboundary(pullop.repo, tr, public, pheads)
2018
2035
2019 # exclude changesets already draft locally and update the others
2036 # exclude changesets already draft locally and update the others
2020 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2037 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2021 if dheads:
2038 if dheads:
2022 tr = pullop.gettransaction()
2039 tr = pullop.gettransaction()
2023 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2040 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2024
2041
2025
2042
2026 def _pullbookmarks(pullop):
2043 def _pullbookmarks(pullop):
2027 """process the remote bookmark information to update the local one"""
2044 """process the remote bookmark information to update the local one"""
2028 if b'bookmarks' in pullop.stepsdone:
2045 if b'bookmarks' in pullop.stepsdone:
2029 return
2046 return
2030 pullop.stepsdone.add(b'bookmarks')
2047 pullop.stepsdone.add(b'bookmarks')
2031 repo = pullop.repo
2048 repo = pullop.repo
2032 remotebookmarks = pullop.remotebookmarks
2049 remotebookmarks = pullop.remotebookmarks
2033 bookmarks_mode = None
2050 bookmarks_mode = None
2034 if pullop.remote_path is not None:
2051 if pullop.remote_path is not None:
2035 bookmarks_mode = pullop.remote_path.bookmarks_mode
2052 bookmarks_mode = pullop.remote_path.bookmarks_mode
2036 bookmod.updatefromremote(
2053 bookmod.updatefromremote(
2037 repo.ui,
2054 repo.ui,
2038 repo,
2055 repo,
2039 remotebookmarks,
2056 remotebookmarks,
2040 pullop.remote.url(),
2057 pullop.remote.url(),
2041 pullop.gettransaction,
2058 pullop.gettransaction,
2042 explicit=pullop.explicitbookmarks,
2059 explicit=pullop.explicitbookmarks,
2043 mode=bookmarks_mode,
2060 mode=bookmarks_mode,
2044 )
2061 )
2045
2062
2046
2063
2047 def _pullobsolete(pullop):
2064 def _pullobsolete(pullop):
2048 """utility function to pull obsolete markers from a remote
2065 """utility function to pull obsolete markers from a remote
2049
2066
2050 The `gettransaction` is function that return the pull transaction, creating
2067 The `gettransaction` is function that return the pull transaction, creating
2051 one if necessary. We return the transaction to inform the calling code that
2068 one if necessary. We return the transaction to inform the calling code that
2052 a new transaction have been created (when applicable).
2069 a new transaction have been created (when applicable).
2053
2070
2054 Exists mostly to allow overriding for experimentation purpose"""
2071 Exists mostly to allow overriding for experimentation purpose"""
2055 if b'obsmarkers' in pullop.stepsdone:
2072 if b'obsmarkers' in pullop.stepsdone:
2056 return
2073 return
2057 pullop.stepsdone.add(b'obsmarkers')
2074 pullop.stepsdone.add(b'obsmarkers')
2058 tr = None
2075 tr = None
2059 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2076 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2060 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2077 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2061 remoteobs = listkeys(pullop.remote, b'obsolete')
2078 remoteobs = listkeys(pullop.remote, b'obsolete')
2062 if b'dump0' in remoteobs:
2079 if b'dump0' in remoteobs:
2063 tr = pullop.gettransaction()
2080 tr = pullop.gettransaction()
2064 markers = []
2081 markers = []
2065 for key in sorted(remoteobs, reverse=True):
2082 for key in sorted(remoteobs, reverse=True):
2066 if key.startswith(b'dump'):
2083 if key.startswith(b'dump'):
2067 data = util.b85decode(remoteobs[key])
2084 data = util.b85decode(remoteobs[key])
2068 version, newmarks = obsolete._readmarkers(data)
2085 version, newmarks = obsolete._readmarkers(data)
2069 markers += newmarks
2086 markers += newmarks
2070 if markers:
2087 if markers:
2071 pullop.repo.obsstore.add(tr, markers)
2088 pullop.repo.obsstore.add(tr, markers)
2072 pullop.repo.invalidatevolatilesets()
2089 pullop.repo.invalidatevolatilesets()
2073 return tr
2090 return tr
2074
2091
2075
2092
2076 def applynarrowacl(repo, kwargs):
2093 def applynarrowacl(repo, kwargs):
2077 """Apply narrow fetch access control.
2094 """Apply narrow fetch access control.
2078
2095
2079 This massages the named arguments for getbundle wire protocol commands
2096 This massages the named arguments for getbundle wire protocol commands
2080 so requested data is filtered through access control rules.
2097 so requested data is filtered through access control rules.
2081 """
2098 """
2082 ui = repo.ui
2099 ui = repo.ui
2083 # TODO this assumes existence of HTTP and is a layering violation.
2100 # TODO this assumes existence of HTTP and is a layering violation.
2084 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2101 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2085 user_includes = ui.configlist(
2102 user_includes = ui.configlist(
2086 _NARROWACL_SECTION,
2103 _NARROWACL_SECTION,
2087 username + b'.includes',
2104 username + b'.includes',
2088 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2105 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2089 )
2106 )
2090 user_excludes = ui.configlist(
2107 user_excludes = ui.configlist(
2091 _NARROWACL_SECTION,
2108 _NARROWACL_SECTION,
2092 username + b'.excludes',
2109 username + b'.excludes',
2093 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2110 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2094 )
2111 )
2095 if not user_includes:
2112 if not user_includes:
2096 raise error.Abort(
2113 raise error.Abort(
2097 _(b"%s configuration for user %s is empty")
2114 _(b"%s configuration for user %s is empty")
2098 % (_NARROWACL_SECTION, username)
2115 % (_NARROWACL_SECTION, username)
2099 )
2116 )
2100
2117
2101 user_includes = [
2118 user_includes = [
2102 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2119 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2103 ]
2120 ]
2104 user_excludes = [
2121 user_excludes = [
2105 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2122 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2106 ]
2123 ]
2107
2124
2108 req_includes = set(kwargs.get('includepats', []))
2125 req_includes = set(kwargs.get('includepats', []))
2109 req_excludes = set(kwargs.get('excludepats', []))
2126 req_excludes = set(kwargs.get('excludepats', []))
2110
2127
2111 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2128 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2112 req_includes, req_excludes, user_includes, user_excludes
2129 req_includes, req_excludes, user_includes, user_excludes
2113 )
2130 )
2114
2131
2115 if invalid_includes:
2132 if invalid_includes:
2116 raise error.Abort(
2133 raise error.Abort(
2117 _(b"The following includes are not accessible for %s: %s")
2134 _(b"The following includes are not accessible for %s: %s")
2118 % (username, stringutil.pprint(invalid_includes))
2135 % (username, stringutil.pprint(invalid_includes))
2119 )
2136 )
2120
2137
2121 new_args = {}
2138 new_args = {}
2122 new_args.update(kwargs)
2139 new_args.update(kwargs)
2123 new_args['narrow'] = True
2140 new_args['narrow'] = True
2124 new_args['narrow_acl'] = True
2141 new_args['narrow_acl'] = True
2125 new_args['includepats'] = req_includes
2142 new_args['includepats'] = req_includes
2126 if req_excludes:
2143 if req_excludes:
2127 new_args['excludepats'] = req_excludes
2144 new_args['excludepats'] = req_excludes
2128
2145
2129 return new_args
2146 return new_args
2130
2147
2131
2148
2132 def _computeellipsis(repo, common, heads, known, match, depth=None):
2149 def _computeellipsis(repo, common, heads, known, match, depth=None):
2133 """Compute the shape of a narrowed DAG.
2150 """Compute the shape of a narrowed DAG.
2134
2151
2135 Args:
2152 Args:
2136 repo: The repository we're transferring.
2153 repo: The repository we're transferring.
2137 common: The roots of the DAG range we're transferring.
2154 common: The roots of the DAG range we're transferring.
2138 May be just [nullid], which means all ancestors of heads.
2155 May be just [nullid], which means all ancestors of heads.
2139 heads: The heads of the DAG range we're transferring.
2156 heads: The heads of the DAG range we're transferring.
2140 match: The narrowmatcher that allows us to identify relevant changes.
2157 match: The narrowmatcher that allows us to identify relevant changes.
2141 depth: If not None, only consider nodes to be full nodes if they are at
2158 depth: If not None, only consider nodes to be full nodes if they are at
2142 most depth changesets away from one of heads.
2159 most depth changesets away from one of heads.
2143
2160
2144 Returns:
2161 Returns:
2145 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2162 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2146
2163
2147 visitnodes: The list of nodes (either full or ellipsis) which
2164 visitnodes: The list of nodes (either full or ellipsis) which
2148 need to be sent to the client.
2165 need to be sent to the client.
2149 relevant_nodes: The set of changelog nodes which change a file inside
2166 relevant_nodes: The set of changelog nodes which change a file inside
2150 the narrowspec. The client needs these as non-ellipsis nodes.
2167 the narrowspec. The client needs these as non-ellipsis nodes.
2151 ellipsisroots: A dict of {rev: parents} that is used in
2168 ellipsisroots: A dict of {rev: parents} that is used in
2152 narrowchangegroup to produce ellipsis nodes with the
2169 narrowchangegroup to produce ellipsis nodes with the
2153 correct parents.
2170 correct parents.
2154 """
2171 """
2155 cl = repo.changelog
2172 cl = repo.changelog
2156 mfl = repo.manifestlog
2173 mfl = repo.manifestlog
2157
2174
2158 clrev = cl.rev
2175 clrev = cl.rev
2159
2176
2160 commonrevs = {clrev(n) for n in common} | {nullrev}
2177 commonrevs = {clrev(n) for n in common} | {nullrev}
2161 headsrevs = {clrev(n) for n in heads}
2178 headsrevs = {clrev(n) for n in heads}
2162
2179
2163 if depth:
2180 if depth:
2164 revdepth = {h: 0 for h in headsrevs}
2181 revdepth = {h: 0 for h in headsrevs}
2165
2182
2166 ellipsisheads = collections.defaultdict(set)
2183 ellipsisheads = collections.defaultdict(set)
2167 ellipsisroots = collections.defaultdict(set)
2184 ellipsisroots = collections.defaultdict(set)
2168
2185
2169 def addroot(head, curchange):
2186 def addroot(head, curchange):
2170 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2187 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2171 ellipsisroots[head].add(curchange)
2188 ellipsisroots[head].add(curchange)
2172 # Recursively split ellipsis heads with 3 roots by finding the
2189 # Recursively split ellipsis heads with 3 roots by finding the
2173 # roots' youngest common descendant which is an elided merge commit.
2190 # roots' youngest common descendant which is an elided merge commit.
2174 # That descendant takes 2 of the 3 roots as its own, and becomes a
2191 # That descendant takes 2 of the 3 roots as its own, and becomes a
2175 # root of the head.
2192 # root of the head.
2176 while len(ellipsisroots[head]) > 2:
2193 while len(ellipsisroots[head]) > 2:
2177 child, roots = splithead(head)
2194 child, roots = splithead(head)
2178 splitroots(head, child, roots)
2195 splitroots(head, child, roots)
2179 head = child # Recurse in case we just added a 3rd root
2196 head = child # Recurse in case we just added a 3rd root
2180
2197
2181 def splitroots(head, child, roots):
2198 def splitroots(head, child, roots):
2182 ellipsisroots[head].difference_update(roots)
2199 ellipsisroots[head].difference_update(roots)
2183 ellipsisroots[head].add(child)
2200 ellipsisroots[head].add(child)
2184 ellipsisroots[child].update(roots)
2201 ellipsisroots[child].update(roots)
2185 ellipsisroots[child].discard(child)
2202 ellipsisroots[child].discard(child)
2186
2203
2187 def splithead(head):
2204 def splithead(head):
2188 r1, r2, r3 = sorted(ellipsisroots[head])
2205 r1, r2, r3 = sorted(ellipsisroots[head])
2189 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2206 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2190 mid = repo.revs(
2207 mid = repo.revs(
2191 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2208 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2192 )
2209 )
2193 for j in mid:
2210 for j in mid:
2194 if j == nr2:
2211 if j == nr2:
2195 return nr2, (nr1, nr2)
2212 return nr2, (nr1, nr2)
2196 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2213 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2197 return j, (nr1, nr2)
2214 return j, (nr1, nr2)
2198 raise error.Abort(
2215 raise error.Abort(
2199 _(
2216 _(
2200 b'Failed to split up ellipsis node! head: %d, '
2217 b'Failed to split up ellipsis node! head: %d, '
2201 b'roots: %d %d %d'
2218 b'roots: %d %d %d'
2202 )
2219 )
2203 % (head, r1, r2, r3)
2220 % (head, r1, r2, r3)
2204 )
2221 )
2205
2222
2206 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2223 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2207 visit = reversed(missing)
2224 visit = reversed(missing)
2208 relevant_nodes = set()
2225 relevant_nodes = set()
2209 visitnodes = [cl.node(m) for m in missing]
2226 visitnodes = [cl.node(m) for m in missing]
2210 required = set(headsrevs) | known
2227 required = set(headsrevs) | known
2211 for rev in visit:
2228 for rev in visit:
2212 clrev = cl.changelogrevision(rev)
2229 clrev = cl.changelogrevision(rev)
2213 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2230 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2214 if depth is not None:
2231 if depth is not None:
2215 curdepth = revdepth[rev]
2232 curdepth = revdepth[rev]
2216 for p in ps:
2233 for p in ps:
2217 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2234 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2218 needed = False
2235 needed = False
2219 shallow_enough = depth is None or revdepth[rev] <= depth
2236 shallow_enough = depth is None or revdepth[rev] <= depth
2220 if shallow_enough:
2237 if shallow_enough:
2221 curmf = mfl[clrev.manifest].read()
2238 curmf = mfl[clrev.manifest].read()
2222 if ps:
2239 if ps:
2223 # We choose to not trust the changed files list in
2240 # We choose to not trust the changed files list in
2224 # changesets because it's not always correct. TODO: could
2241 # changesets because it's not always correct. TODO: could
2225 # we trust it for the non-merge case?
2242 # we trust it for the non-merge case?
2226 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2243 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2227 needed = bool(curmf.diff(p1mf, match))
2244 needed = bool(curmf.diff(p1mf, match))
2228 if not needed and len(ps) > 1:
2245 if not needed and len(ps) > 1:
2229 # For merge changes, the list of changed files is not
2246 # For merge changes, the list of changed files is not
2230 # helpful, since we need to emit the merge if a file
2247 # helpful, since we need to emit the merge if a file
2231 # in the narrow spec has changed on either side of the
2248 # in the narrow spec has changed on either side of the
2232 # merge. As a result, we do a manifest diff to check.
2249 # merge. As a result, we do a manifest diff to check.
2233 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2250 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2234 needed = bool(curmf.diff(p2mf, match))
2251 needed = bool(curmf.diff(p2mf, match))
2235 else:
2252 else:
2236 # For a root node, we need to include the node if any
2253 # For a root node, we need to include the node if any
2237 # files in the node match the narrowspec.
2254 # files in the node match the narrowspec.
2238 needed = any(curmf.walk(match))
2255 needed = any(curmf.walk(match))
2239
2256
2240 if needed:
2257 if needed:
2241 for head in ellipsisheads[rev]:
2258 for head in ellipsisheads[rev]:
2242 addroot(head, rev)
2259 addroot(head, rev)
2243 for p in ps:
2260 for p in ps:
2244 required.add(p)
2261 required.add(p)
2245 relevant_nodes.add(cl.node(rev))
2262 relevant_nodes.add(cl.node(rev))
2246 else:
2263 else:
2247 if not ps:
2264 if not ps:
2248 ps = [nullrev]
2265 ps = [nullrev]
2249 if rev in required:
2266 if rev in required:
2250 for head in ellipsisheads[rev]:
2267 for head in ellipsisheads[rev]:
2251 addroot(head, rev)
2268 addroot(head, rev)
2252 for p in ps:
2269 for p in ps:
2253 ellipsisheads[p].add(rev)
2270 ellipsisheads[p].add(rev)
2254 else:
2271 else:
2255 for p in ps:
2272 for p in ps:
2256 ellipsisheads[p] |= ellipsisheads[rev]
2273 ellipsisheads[p] |= ellipsisheads[rev]
2257
2274
2258 # add common changesets as roots of their reachable ellipsis heads
2275 # add common changesets as roots of their reachable ellipsis heads
2259 for c in commonrevs:
2276 for c in commonrevs:
2260 for head in ellipsisheads[c]:
2277 for head in ellipsisheads[c]:
2261 addroot(head, c)
2278 addroot(head, c)
2262 return visitnodes, relevant_nodes, ellipsisroots
2279 return visitnodes, relevant_nodes, ellipsisroots
2263
2280
2264
2281
2265 def caps20to10(repo, role):
2282 def caps20to10(repo, role):
2266 """return a set with appropriate options to use bundle20 during getbundle"""
2283 """return a set with appropriate options to use bundle20 during getbundle"""
2267 caps = {b'HG20'}
2284 caps = {b'HG20'}
2268 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2285 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2269 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2286 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2270 return caps
2287 return caps
2271
2288
2272
2289
2273 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2290 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2274 getbundle2partsorder = []
2291 getbundle2partsorder = []
2275
2292
2276 # Mapping between step name and function
2293 # Mapping between step name and function
2277 #
2294 #
2278 # This exists to help extensions wrap steps if necessary
2295 # This exists to help extensions wrap steps if necessary
2279 getbundle2partsmapping = {}
2296 getbundle2partsmapping = {}
2280
2297
2281
2298
2282 def getbundle2partsgenerator(stepname, idx=None):
2299 def getbundle2partsgenerator(stepname, idx=None):
2283 """decorator for function generating bundle2 part for getbundle
2300 """decorator for function generating bundle2 part for getbundle
2284
2301
2285 The function is added to the step -> function mapping and appended to the
2302 The function is added to the step -> function mapping and appended to the
2286 list of steps. Beware that decorated functions will be added in order
2303 list of steps. Beware that decorated functions will be added in order
2287 (this may matter).
2304 (this may matter).
2288
2305
2289 You can only use this decorator for new steps, if you want to wrap a step
2306 You can only use this decorator for new steps, if you want to wrap a step
2290 from an extension, attack the getbundle2partsmapping dictionary directly."""
2307 from an extension, attack the getbundle2partsmapping dictionary directly."""
2291
2308
2292 def dec(func):
2309 def dec(func):
2293 assert stepname not in getbundle2partsmapping
2310 assert stepname not in getbundle2partsmapping
2294 getbundle2partsmapping[stepname] = func
2311 getbundle2partsmapping[stepname] = func
2295 if idx is None:
2312 if idx is None:
2296 getbundle2partsorder.append(stepname)
2313 getbundle2partsorder.append(stepname)
2297 else:
2314 else:
2298 getbundle2partsorder.insert(idx, stepname)
2315 getbundle2partsorder.insert(idx, stepname)
2299 return func
2316 return func
2300
2317
2301 return dec
2318 return dec
2302
2319
2303
2320
2304 def bundle2requested(bundlecaps):
2321 def bundle2requested(bundlecaps):
2305 if bundlecaps is not None:
2322 if bundlecaps is not None:
2306 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2323 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2307 return False
2324 return False
2308
2325
2309
2326
2310 def getbundlechunks(
2327 def getbundlechunks(
2311 repo,
2328 repo,
2312 source,
2329 source,
2313 heads=None,
2330 heads=None,
2314 common=None,
2331 common=None,
2315 bundlecaps=None,
2332 bundlecaps=None,
2316 remote_sidedata=None,
2333 remote_sidedata=None,
2317 **kwargs
2334 **kwargs
2318 ):
2335 ):
2319 """Return chunks constituting a bundle's raw data.
2336 """Return chunks constituting a bundle's raw data.
2320
2337
2321 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2338 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2322 passed.
2339 passed.
2323
2340
2324 Returns a 2-tuple of a dict with metadata about the generated bundle
2341 Returns a 2-tuple of a dict with metadata about the generated bundle
2325 and an iterator over raw chunks (of varying sizes).
2342 and an iterator over raw chunks (of varying sizes).
2326 """
2343 """
2327 kwargs = pycompat.byteskwargs(kwargs)
2344 kwargs = pycompat.byteskwargs(kwargs)
2328 info = {}
2345 info = {}
2329 usebundle2 = bundle2requested(bundlecaps)
2346 usebundle2 = bundle2requested(bundlecaps)
2330 # bundle10 case
2347 # bundle10 case
2331 if not usebundle2:
2348 if not usebundle2:
2332 if bundlecaps and not kwargs.get(b'cg', True):
2349 if bundlecaps and not kwargs.get(b'cg', True):
2333 raise ValueError(
2350 raise ValueError(
2334 _(b'request for bundle10 must include changegroup')
2351 _(b'request for bundle10 must include changegroup')
2335 )
2352 )
2336
2353
2337 if kwargs:
2354 if kwargs:
2338 raise ValueError(
2355 raise ValueError(
2339 _(b'unsupported getbundle arguments: %s')
2356 _(b'unsupported getbundle arguments: %s')
2340 % b', '.join(sorted(kwargs.keys()))
2357 % b', '.join(sorted(kwargs.keys()))
2341 )
2358 )
2342 outgoing = _computeoutgoing(repo, heads, common)
2359 outgoing = _computeoutgoing(repo, heads, common)
2343 info[b'bundleversion'] = 1
2360 info[b'bundleversion'] = 1
2344 return (
2361 return (
2345 info,
2362 info,
2346 changegroup.makestream(
2363 changegroup.makestream(
2347 repo,
2364 repo,
2348 outgoing,
2365 outgoing,
2349 b'01',
2366 b'01',
2350 source,
2367 source,
2351 bundlecaps=bundlecaps,
2368 bundlecaps=bundlecaps,
2352 remote_sidedata=remote_sidedata,
2369 remote_sidedata=remote_sidedata,
2353 ),
2370 ),
2354 )
2371 )
2355
2372
2356 # bundle20 case
2373 # bundle20 case
2357 info[b'bundleversion'] = 2
2374 info[b'bundleversion'] = 2
2358 b2caps = {}
2375 b2caps = {}
2359 for bcaps in bundlecaps:
2376 for bcaps in bundlecaps:
2360 if bcaps.startswith(b'bundle2='):
2377 if bcaps.startswith(b'bundle2='):
2361 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2378 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2362 b2caps.update(bundle2.decodecaps(blob))
2379 b2caps.update(bundle2.decodecaps(blob))
2363 bundler = bundle2.bundle20(repo.ui, b2caps)
2380 bundler = bundle2.bundle20(repo.ui, b2caps)
2364
2381
2365 kwargs[b'heads'] = heads
2382 kwargs[b'heads'] = heads
2366 kwargs[b'common'] = common
2383 kwargs[b'common'] = common
2367
2384
2368 for name in getbundle2partsorder:
2385 for name in getbundle2partsorder:
2369 func = getbundle2partsmapping[name]
2386 func = getbundle2partsmapping[name]
2370 func(
2387 func(
2371 bundler,
2388 bundler,
2372 repo,
2389 repo,
2373 source,
2390 source,
2374 bundlecaps=bundlecaps,
2391 bundlecaps=bundlecaps,
2375 b2caps=b2caps,
2392 b2caps=b2caps,
2376 remote_sidedata=remote_sidedata,
2393 remote_sidedata=remote_sidedata,
2377 **pycompat.strkwargs(kwargs)
2394 **pycompat.strkwargs(kwargs)
2378 )
2395 )
2379
2396
2380 info[b'prefercompressed'] = bundler.prefercompressed
2397 info[b'prefercompressed'] = bundler.prefercompressed
2381
2398
2382 return info, bundler.getchunks()
2399 return info, bundler.getchunks()
2383
2400
2384
2401
2385 @getbundle2partsgenerator(b'stream2')
2402 @getbundle2partsgenerator(b'stream2')
2386 def _getbundlestream2(bundler, repo, *args, **kwargs):
2403 def _getbundlestream2(bundler, repo, *args, **kwargs):
2387 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2404 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2388
2405
2389
2406
2390 @getbundle2partsgenerator(b'changegroup')
2407 @getbundle2partsgenerator(b'changegroup')
2391 def _getbundlechangegrouppart(
2408 def _getbundlechangegrouppart(
2392 bundler,
2409 bundler,
2393 repo,
2410 repo,
2394 source,
2411 source,
2395 bundlecaps=None,
2412 bundlecaps=None,
2396 b2caps=None,
2413 b2caps=None,
2397 heads=None,
2414 heads=None,
2398 common=None,
2415 common=None,
2399 remote_sidedata=None,
2416 remote_sidedata=None,
2400 **kwargs
2417 **kwargs
2401 ):
2418 ):
2402 """add a changegroup part to the requested bundle"""
2419 """add a changegroup part to the requested bundle"""
2403 if not kwargs.get('cg', True) or not b2caps:
2420 if not kwargs.get('cg', True) or not b2caps:
2404 return
2421 return
2405
2422
2406 version = b'01'
2423 version = b'01'
2407 cgversions = b2caps.get(b'changegroup')
2424 cgversions = b2caps.get(b'changegroup')
2408 if cgversions: # 3.1 and 3.2 ship with an empty value
2425 if cgversions: # 3.1 and 3.2 ship with an empty value
2409 cgversions = [
2426 cgversions = [
2410 v
2427 v
2411 for v in cgversions
2428 for v in cgversions
2412 if v in changegroup.supportedoutgoingversions(repo)
2429 if v in changegroup.supportedoutgoingversions(repo)
2413 ]
2430 ]
2414 if not cgversions:
2431 if not cgversions:
2415 raise error.Abort(_(b'no common changegroup version'))
2432 raise error.Abort(_(b'no common changegroup version'))
2416 version = max(cgversions)
2433 version = max(cgversions)
2417
2434
2418 outgoing = _computeoutgoing(repo, heads, common)
2435 outgoing = _computeoutgoing(repo, heads, common)
2419 if not outgoing.missing:
2436 if not outgoing.missing:
2420 return
2437 return
2421
2438
2422 if kwargs.get('narrow', False):
2439 if kwargs.get('narrow', False):
2423 include = sorted(filter(bool, kwargs.get('includepats', [])))
2440 include = sorted(filter(bool, kwargs.get('includepats', [])))
2424 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2441 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2425 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2442 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2426 else:
2443 else:
2427 matcher = None
2444 matcher = None
2428
2445
2429 cgstream = changegroup.makestream(
2446 cgstream = changegroup.makestream(
2430 repo,
2447 repo,
2431 outgoing,
2448 outgoing,
2432 version,
2449 version,
2433 source,
2450 source,
2434 bundlecaps=bundlecaps,
2451 bundlecaps=bundlecaps,
2435 matcher=matcher,
2452 matcher=matcher,
2436 remote_sidedata=remote_sidedata,
2453 remote_sidedata=remote_sidedata,
2437 )
2454 )
2438
2455
2439 part = bundler.newpart(b'changegroup', data=cgstream)
2456 part = bundler.newpart(b'changegroup', data=cgstream)
2440 if cgversions:
2457 if cgversions:
2441 part.addparam(b'version', version)
2458 part.addparam(b'version', version)
2442
2459
2443 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2460 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2444
2461
2445 if scmutil.istreemanifest(repo):
2462 if scmutil.istreemanifest(repo):
2446 part.addparam(b'treemanifest', b'1')
2463 part.addparam(b'treemanifest', b'1')
2447
2464
2448 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2465 if repository.REPO_FEATURE_SIDE_DATA in repo.features:
2449 part.addparam(b'exp-sidedata', b'1')
2466 part.addparam(b'exp-sidedata', b'1')
2450 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2467 sidedata = bundle2.format_remote_wanted_sidedata(repo)
2451 part.addparam(b'exp-wanted-sidedata', sidedata)
2468 part.addparam(b'exp-wanted-sidedata', sidedata)
2452
2469
2453 if (
2470 if (
2454 kwargs.get('narrow', False)
2471 kwargs.get('narrow', False)
2455 and kwargs.get('narrow_acl', False)
2472 and kwargs.get('narrow_acl', False)
2456 and (include or exclude)
2473 and (include or exclude)
2457 ):
2474 ):
2458 # this is mandatory because otherwise ACL clients won't work
2475 # this is mandatory because otherwise ACL clients won't work
2459 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2476 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2460 narrowspecpart.data = b'%s\0%s' % (
2477 narrowspecpart.data = b'%s\0%s' % (
2461 b'\n'.join(include),
2478 b'\n'.join(include),
2462 b'\n'.join(exclude),
2479 b'\n'.join(exclude),
2463 )
2480 )
2464
2481
2465
2482
2466 @getbundle2partsgenerator(b'bookmarks')
2483 @getbundle2partsgenerator(b'bookmarks')
2467 def _getbundlebookmarkpart(
2484 def _getbundlebookmarkpart(
2468 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2485 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2469 ):
2486 ):
2470 """add a bookmark part to the requested bundle"""
2487 """add a bookmark part to the requested bundle"""
2471 if not kwargs.get('bookmarks', False):
2488 if not kwargs.get('bookmarks', False):
2472 return
2489 return
2473 if not b2caps or b'bookmarks' not in b2caps:
2490 if not b2caps or b'bookmarks' not in b2caps:
2474 raise error.Abort(_(b'no common bookmarks exchange method'))
2491 raise error.Abort(_(b'no common bookmarks exchange method'))
2475 books = bookmod.listbinbookmarks(repo)
2492 books = bookmod.listbinbookmarks(repo)
2476 data = bookmod.binaryencode(repo, books)
2493 data = bookmod.binaryencode(repo, books)
2477 if data:
2494 if data:
2478 bundler.newpart(b'bookmarks', data=data)
2495 bundler.newpart(b'bookmarks', data=data)
2479
2496
2480
2497
2481 @getbundle2partsgenerator(b'listkeys')
2498 @getbundle2partsgenerator(b'listkeys')
2482 def _getbundlelistkeysparts(
2499 def _getbundlelistkeysparts(
2483 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2500 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2484 ):
2501 ):
2485 """add parts containing listkeys namespaces to the requested bundle"""
2502 """add parts containing listkeys namespaces to the requested bundle"""
2486 listkeys = kwargs.get('listkeys', ())
2503 listkeys = kwargs.get('listkeys', ())
2487 for namespace in listkeys:
2504 for namespace in listkeys:
2488 part = bundler.newpart(b'listkeys')
2505 part = bundler.newpart(b'listkeys')
2489 part.addparam(b'namespace', namespace)
2506 part.addparam(b'namespace', namespace)
2490 keys = repo.listkeys(namespace).items()
2507 keys = repo.listkeys(namespace).items()
2491 part.data = pushkey.encodekeys(keys)
2508 part.data = pushkey.encodekeys(keys)
2492
2509
2493
2510
2494 @getbundle2partsgenerator(b'obsmarkers')
2511 @getbundle2partsgenerator(b'obsmarkers')
2495 def _getbundleobsmarkerpart(
2512 def _getbundleobsmarkerpart(
2496 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2513 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2497 ):
2514 ):
2498 """add an obsolescence markers part to the requested bundle"""
2515 """add an obsolescence markers part to the requested bundle"""
2499 if kwargs.get('obsmarkers', False):
2516 if kwargs.get('obsmarkers', False):
2500 if heads is None:
2517 if heads is None:
2501 heads = repo.heads()
2518 heads = repo.heads()
2502 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2519 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2503 markers = repo.obsstore.relevantmarkers(subset)
2520 markers = repo.obsstore.relevantmarkers(subset)
2504 markers = obsutil.sortedmarkers(markers)
2521 markers = obsutil.sortedmarkers(markers)
2505 bundle2.buildobsmarkerspart(bundler, markers)
2522 bundle2.buildobsmarkerspart(bundler, markers)
2506
2523
2507
2524
2508 @getbundle2partsgenerator(b'phases')
2525 @getbundle2partsgenerator(b'phases')
2509 def _getbundlephasespart(
2526 def _getbundlephasespart(
2510 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2527 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2511 ):
2528 ):
2512 """add phase heads part to the requested bundle"""
2529 """add phase heads part to the requested bundle"""
2513 if kwargs.get('phases', False):
2530 if kwargs.get('phases', False):
2514 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2531 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2515 raise error.Abort(_(b'no common phases exchange method'))
2532 raise error.Abort(_(b'no common phases exchange method'))
2516 if heads is None:
2533 if heads is None:
2517 heads = repo.heads()
2534 heads = repo.heads()
2518
2535
2519 headsbyphase = collections.defaultdict(set)
2536 headsbyphase = collections.defaultdict(set)
2520 if repo.publishing():
2537 if repo.publishing():
2521 headsbyphase[phases.public] = heads
2538 headsbyphase[phases.public] = heads
2522 else:
2539 else:
2523 # find the appropriate heads to move
2540 # find the appropriate heads to move
2524
2541
2525 phase = repo._phasecache.phase
2542 phase = repo._phasecache.phase
2526 node = repo.changelog.node
2543 node = repo.changelog.node
2527 rev = repo.changelog.rev
2544 rev = repo.changelog.rev
2528 for h in heads:
2545 for h in heads:
2529 headsbyphase[phase(repo, rev(h))].add(h)
2546 headsbyphase[phase(repo, rev(h))].add(h)
2530 seenphases = list(headsbyphase.keys())
2547 seenphases = list(headsbyphase.keys())
2531
2548
2532 # We do not handle anything but public and draft phase for now)
2549 # We do not handle anything but public and draft phase for now)
2533 if seenphases:
2550 if seenphases:
2534 assert max(seenphases) <= phases.draft
2551 assert max(seenphases) <= phases.draft
2535
2552
2536 # if client is pulling non-public changesets, we need to find
2553 # if client is pulling non-public changesets, we need to find
2537 # intermediate public heads.
2554 # intermediate public heads.
2538 draftheads = headsbyphase.get(phases.draft, set())
2555 draftheads = headsbyphase.get(phases.draft, set())
2539 if draftheads:
2556 if draftheads:
2540 publicheads = headsbyphase.get(phases.public, set())
2557 publicheads = headsbyphase.get(phases.public, set())
2541
2558
2542 revset = b'heads(only(%ln, %ln) and public())'
2559 revset = b'heads(only(%ln, %ln) and public())'
2543 extraheads = repo.revs(revset, draftheads, publicheads)
2560 extraheads = repo.revs(revset, draftheads, publicheads)
2544 for r in extraheads:
2561 for r in extraheads:
2545 headsbyphase[phases.public].add(node(r))
2562 headsbyphase[phases.public].add(node(r))
2546
2563
2547 # transform data in a format used by the encoding function
2564 # transform data in a format used by the encoding function
2548 phasemapping = {
2565 phasemapping = {
2549 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2566 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2550 }
2567 }
2551
2568
2552 # generate the actual part
2569 # generate the actual part
2553 phasedata = phases.binaryencode(phasemapping)
2570 phasedata = phases.binaryencode(phasemapping)
2554 bundler.newpart(b'phase-heads', data=phasedata)
2571 bundler.newpart(b'phase-heads', data=phasedata)
2555
2572
2556
2573
2557 @getbundle2partsgenerator(b'hgtagsfnodes')
2574 @getbundle2partsgenerator(b'hgtagsfnodes')
2558 def _getbundletagsfnodes(
2575 def _getbundletagsfnodes(
2559 bundler,
2576 bundler,
2560 repo,
2577 repo,
2561 source,
2578 source,
2562 bundlecaps=None,
2579 bundlecaps=None,
2563 b2caps=None,
2580 b2caps=None,
2564 heads=None,
2581 heads=None,
2565 common=None,
2582 common=None,
2566 **kwargs
2583 **kwargs
2567 ):
2584 ):
2568 """Transfer the .hgtags filenodes mapping.
2585 """Transfer the .hgtags filenodes mapping.
2569
2586
2570 Only values for heads in this bundle will be transferred.
2587 Only values for heads in this bundle will be transferred.
2571
2588
2572 The part data consists of pairs of 20 byte changeset node and .hgtags
2589 The part data consists of pairs of 20 byte changeset node and .hgtags
2573 filenodes raw values.
2590 filenodes raw values.
2574 """
2591 """
2575 # Don't send unless:
2592 # Don't send unless:
2576 # - changeset are being exchanged,
2593 # - changeset are being exchanged,
2577 # - the client supports it.
2594 # - the client supports it.
2578 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2595 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2579 return
2596 return
2580
2597
2581 outgoing = _computeoutgoing(repo, heads, common)
2598 outgoing = _computeoutgoing(repo, heads, common)
2582 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2599 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2583
2600
2584
2601
2585 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2602 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2586 def _getbundlerevbranchcache(
2603 def _getbundlerevbranchcache(
2587 bundler,
2604 bundler,
2588 repo,
2605 repo,
2589 source,
2606 source,
2590 bundlecaps=None,
2607 bundlecaps=None,
2591 b2caps=None,
2608 b2caps=None,
2592 heads=None,
2609 heads=None,
2593 common=None,
2610 common=None,
2594 **kwargs
2611 **kwargs
2595 ):
2612 ):
2596 """Transfer the rev-branch-cache mapping
2613 """Transfer the rev-branch-cache mapping
2597
2614
2598 The payload is a series of data related to each branch
2615 The payload is a series of data related to each branch
2599
2616
2600 1) branch name length
2617 1) branch name length
2601 2) number of open heads
2618 2) number of open heads
2602 3) number of closed heads
2619 3) number of closed heads
2603 4) open heads nodes
2620 4) open heads nodes
2604 5) closed heads nodes
2621 5) closed heads nodes
2605 """
2622 """
2606 # Don't send unless:
2623 # Don't send unless:
2607 # - changeset are being exchanged,
2624 # - changeset are being exchanged,
2608 # - the client supports it.
2625 # - the client supports it.
2609 # - narrow bundle isn't in play (not currently compatible).
2626 # - narrow bundle isn't in play (not currently compatible).
2610 if (
2627 if (
2611 not kwargs.get('cg', True)
2628 not kwargs.get('cg', True)
2612 or not b2caps
2629 or not b2caps
2613 or b'rev-branch-cache' not in b2caps
2630 or b'rev-branch-cache' not in b2caps
2614 or kwargs.get('narrow', False)
2631 or kwargs.get('narrow', False)
2615 or repo.ui.has_section(_NARROWACL_SECTION)
2632 or repo.ui.has_section(_NARROWACL_SECTION)
2616 ):
2633 ):
2617 return
2634 return
2618
2635
2619 outgoing = _computeoutgoing(repo, heads, common)
2636 outgoing = _computeoutgoing(repo, heads, common)
2620 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2637 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2621
2638
2622
2639
2623 def check_heads(repo, their_heads, context):
2640 def check_heads(repo, their_heads, context):
2624 """check if the heads of a repo have been modified
2641 """check if the heads of a repo have been modified
2625
2642
2626 Used by peer for unbundling.
2643 Used by peer for unbundling.
2627 """
2644 """
2628 heads = repo.heads()
2645 heads = repo.heads()
2629 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2646 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2630 if not (
2647 if not (
2631 their_heads == [b'force']
2648 their_heads == [b'force']
2632 or their_heads == heads
2649 or their_heads == heads
2633 or their_heads == [b'hashed', heads_hash]
2650 or their_heads == [b'hashed', heads_hash]
2634 ):
2651 ):
2635 # someone else committed/pushed/unbundled while we
2652 # someone else committed/pushed/unbundled while we
2636 # were transferring data
2653 # were transferring data
2637 raise error.PushRaced(
2654 raise error.PushRaced(
2638 b'repository changed while %s - please try again' % context
2655 b'repository changed while %s - please try again' % context
2639 )
2656 )
2640
2657
2641
2658
2642 def unbundle(repo, cg, heads, source, url):
2659 def unbundle(repo, cg, heads, source, url):
2643 """Apply a bundle to a repo.
2660 """Apply a bundle to a repo.
2644
2661
2645 this function makes sure the repo is locked during the application and have
2662 this function makes sure the repo is locked during the application and have
2646 mechanism to check that no push race occurred between the creation of the
2663 mechanism to check that no push race occurred between the creation of the
2647 bundle and its application.
2664 bundle and its application.
2648
2665
2649 If the push was raced as PushRaced exception is raised."""
2666 If the push was raced as PushRaced exception is raised."""
2650 r = 0
2667 r = 0
2651 # need a transaction when processing a bundle2 stream
2668 # need a transaction when processing a bundle2 stream
2652 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2669 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2653 lockandtr = [None, None, None]
2670 lockandtr = [None, None, None]
2654 recordout = None
2671 recordout = None
2655 # quick fix for output mismatch with bundle2 in 3.4
2672 # quick fix for output mismatch with bundle2 in 3.4
2656 captureoutput = repo.ui.configbool(
2673 captureoutput = repo.ui.configbool(
2657 b'experimental', b'bundle2-output-capture'
2674 b'experimental', b'bundle2-output-capture'
2658 )
2675 )
2659 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2676 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2660 captureoutput = True
2677 captureoutput = True
2661 try:
2678 try:
2662 # note: outside bundle1, 'heads' is expected to be empty and this
2679 # note: outside bundle1, 'heads' is expected to be empty and this
2663 # 'check_heads' call wil be a no-op
2680 # 'check_heads' call wil be a no-op
2664 check_heads(repo, heads, b'uploading changes')
2681 check_heads(repo, heads, b'uploading changes')
2665 # push can proceed
2682 # push can proceed
2666 if not isinstance(cg, bundle2.unbundle20):
2683 if not isinstance(cg, bundle2.unbundle20):
2667 # legacy case: bundle1 (changegroup 01)
2684 # legacy case: bundle1 (changegroup 01)
2668 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2685 txnname = b"\n".join([source, urlutil.hidepassword(url)])
2669 with repo.lock(), repo.transaction(txnname) as tr:
2686 with repo.lock(), repo.transaction(txnname) as tr:
2670 op = bundle2.applybundle(repo, cg, tr, source, url)
2687 op = bundle2.applybundle(repo, cg, tr, source, url)
2671 r = bundle2.combinechangegroupresults(op)
2688 r = bundle2.combinechangegroupresults(op)
2672 else:
2689 else:
2673 r = None
2690 r = None
2674 try:
2691 try:
2675
2692
2676 def gettransaction():
2693 def gettransaction():
2677 if not lockandtr[2]:
2694 if not lockandtr[2]:
2678 if not bookmod.bookmarksinstore(repo):
2695 if not bookmod.bookmarksinstore(repo):
2679 lockandtr[0] = repo.wlock()
2696 lockandtr[0] = repo.wlock()
2680 lockandtr[1] = repo.lock()
2697 lockandtr[1] = repo.lock()
2681 lockandtr[2] = repo.transaction(source)
2698 lockandtr[2] = repo.transaction(source)
2682 lockandtr[2].hookargs[b'source'] = source
2699 lockandtr[2].hookargs[b'source'] = source
2683 lockandtr[2].hookargs[b'url'] = url
2700 lockandtr[2].hookargs[b'url'] = url
2684 lockandtr[2].hookargs[b'bundle2'] = b'1'
2701 lockandtr[2].hookargs[b'bundle2'] = b'1'
2685 return lockandtr[2]
2702 return lockandtr[2]
2686
2703
2687 # Do greedy locking by default until we're satisfied with lazy
2704 # Do greedy locking by default until we're satisfied with lazy
2688 # locking.
2705 # locking.
2689 if not repo.ui.configbool(
2706 if not repo.ui.configbool(
2690 b'experimental', b'bundle2lazylocking'
2707 b'experimental', b'bundle2lazylocking'
2691 ):
2708 ):
2692 gettransaction()
2709 gettransaction()
2693
2710
2694 op = bundle2.bundleoperation(
2711 op = bundle2.bundleoperation(
2695 repo,
2712 repo,
2696 gettransaction,
2713 gettransaction,
2697 captureoutput=captureoutput,
2714 captureoutput=captureoutput,
2698 source=b'push',
2715 source=b'push',
2699 )
2716 )
2700 try:
2717 try:
2701 op = bundle2.processbundle(repo, cg, op=op)
2718 op = bundle2.processbundle(repo, cg, op=op)
2702 finally:
2719 finally:
2703 r = op.reply
2720 r = op.reply
2704 if captureoutput and r is not None:
2721 if captureoutput and r is not None:
2705 repo.ui.pushbuffer(error=True, subproc=True)
2722 repo.ui.pushbuffer(error=True, subproc=True)
2706
2723
2707 def recordout(output):
2724 def recordout(output):
2708 r.newpart(b'output', data=output, mandatory=False)
2725 r.newpart(b'output', data=output, mandatory=False)
2709
2726
2710 if lockandtr[2] is not None:
2727 if lockandtr[2] is not None:
2711 lockandtr[2].close()
2728 lockandtr[2].close()
2712 except BaseException as exc:
2729 except BaseException as exc:
2713 exc.duringunbundle2 = True
2730 exc.duringunbundle2 = True
2714 if captureoutput and r is not None:
2731 if captureoutput and r is not None:
2715 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2732 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2716
2733
2717 def recordout(output):
2734 def recordout(output):
2718 part = bundle2.bundlepart(
2735 part = bundle2.bundlepart(
2719 b'output', data=output, mandatory=False
2736 b'output', data=output, mandatory=False
2720 )
2737 )
2721 parts.append(part)
2738 parts.append(part)
2722
2739
2723 raise
2740 raise
2724 finally:
2741 finally:
2725 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2742 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2726 if recordout is not None:
2743 if recordout is not None:
2727 recordout(repo.ui.popbuffer())
2744 recordout(repo.ui.popbuffer())
2728 return r
2745 return r
2729
2746
2730
2747
2731 def _maybeapplyclonebundle(pullop):
2748 def _maybeapplyclonebundle(pullop):
2732 """Apply a clone bundle from a remote, if possible."""
2749 """Apply a clone bundle from a remote, if possible."""
2733
2750
2734 repo = pullop.repo
2751 repo = pullop.repo
2735 remote = pullop.remote
2752 remote = pullop.remote
2736
2753
2737 if not repo.ui.configbool(b'ui', b'clonebundles'):
2754 if not repo.ui.configbool(b'ui', b'clonebundles'):
2738 return
2755 return
2739
2756
2740 # Only run if local repo is empty.
2757 # Only run if local repo is empty.
2741 if len(repo):
2758 if len(repo):
2742 return
2759 return
2743
2760
2744 if pullop.heads:
2761 if pullop.heads:
2745 return
2762 return
2746
2763
2747 if not remote.capable(b'clonebundles'):
2764 if not remote.capable(b'clonebundles'):
2748 return
2765 return
2749
2766
2750 with remote.commandexecutor() as e:
2767 with remote.commandexecutor() as e:
2751 res = e.callcommand(b'clonebundles', {}).result()
2768 res = e.callcommand(b'clonebundles', {}).result()
2752
2769
2753 # If we call the wire protocol command, that's good enough to record the
2770 # If we call the wire protocol command, that's good enough to record the
2754 # attempt.
2771 # attempt.
2755 pullop.clonebundleattempted = True
2772 pullop.clonebundleattempted = True
2756
2773
2757 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2774 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2758 if not entries:
2775 if not entries:
2759 repo.ui.note(
2776 repo.ui.note(
2760 _(
2777 _(
2761 b'no clone bundles available on remote; '
2778 b'no clone bundles available on remote; '
2762 b'falling back to regular clone\n'
2779 b'falling back to regular clone\n'
2763 )
2780 )
2764 )
2781 )
2765 return
2782 return
2766
2783
2767 entries = bundlecaches.filterclonebundleentries(
2784 entries = bundlecaches.filterclonebundleentries(
2768 repo, entries, streamclonerequested=pullop.streamclonerequested
2785 repo, entries, streamclonerequested=pullop.streamclonerequested
2769 )
2786 )
2770
2787
2771 if not entries:
2788 if not entries:
2772 # There is a thundering herd concern here. However, if a server
2789 # There is a thundering herd concern here. However, if a server
2773 # operator doesn't advertise bundles appropriate for its clients,
2790 # operator doesn't advertise bundles appropriate for its clients,
2774 # they deserve what's coming. Furthermore, from a client's
2791 # they deserve what's coming. Furthermore, from a client's
2775 # perspective, no automatic fallback would mean not being able to
2792 # perspective, no automatic fallback would mean not being able to
2776 # clone!
2793 # clone!
2777 repo.ui.warn(
2794 repo.ui.warn(
2778 _(
2795 _(
2779 b'no compatible clone bundles available on server; '
2796 b'no compatible clone bundles available on server; '
2780 b'falling back to regular clone\n'
2797 b'falling back to regular clone\n'
2781 )
2798 )
2782 )
2799 )
2783 repo.ui.warn(
2800 repo.ui.warn(
2784 _(b'(you may want to report this to the server operator)\n')
2801 _(b'(you may want to report this to the server operator)\n')
2785 )
2802 )
2786 return
2803 return
2787
2804
2788 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2805 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2789
2806
2790 url = entries[0][b'URL']
2807 url = entries[0][b'URL']
2791 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2808 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2792 if trypullbundlefromurl(repo.ui, repo, url):
2809 if trypullbundlefromurl(repo.ui, repo, url):
2793 repo.ui.status(_(b'finished applying clone bundle\n'))
2810 repo.ui.status(_(b'finished applying clone bundle\n'))
2794 # Bundle failed.
2811 # Bundle failed.
2795 #
2812 #
2796 # We abort by default to avoid the thundering herd of
2813 # We abort by default to avoid the thundering herd of
2797 # clients flooding a server that was expecting expensive
2814 # clients flooding a server that was expecting expensive
2798 # clone load to be offloaded.
2815 # clone load to be offloaded.
2799 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2816 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2800 repo.ui.warn(_(b'falling back to normal clone\n'))
2817 repo.ui.warn(_(b'falling back to normal clone\n'))
2801 else:
2818 else:
2802 raise error.Abort(
2819 raise error.Abort(
2803 _(b'error applying bundle'),
2820 _(b'error applying bundle'),
2804 hint=_(
2821 hint=_(
2805 b'if this error persists, consider contacting '
2822 b'if this error persists, consider contacting '
2806 b'the server operator or disable clone '
2823 b'the server operator or disable clone '
2807 b'bundles via '
2824 b'bundles via '
2808 b'"--config ui.clonebundles=false"'
2825 b'"--config ui.clonebundles=false"'
2809 ),
2826 ),
2810 )
2827 )
2811
2828
2812
2829
2813 def trypullbundlefromurl(ui, repo, url):
2830 def trypullbundlefromurl(ui, repo, url):
2814 """Attempt to apply a bundle from a URL."""
2831 """Attempt to apply a bundle from a URL."""
2815 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2832 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2816 try:
2833 try:
2817 fh = urlmod.open(ui, url)
2834 fh = urlmod.open(ui, url)
2818 cg = readbundle(ui, fh, b'stream')
2835 cg = readbundle(ui, fh, b'stream')
2819
2836
2820 if isinstance(cg, streamclone.streamcloneapplier):
2837 if isinstance(cg, streamclone.streamcloneapplier):
2821 cg.apply(repo)
2838 cg.apply(repo)
2822 else:
2839 else:
2823 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2840 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2824 return True
2841 return True
2825 except urlerr.httperror as e:
2842 except urlerr.httperror as e:
2826 ui.warn(
2843 ui.warn(
2827 _(b'HTTP error fetching bundle: %s\n')
2844 _(b'HTTP error fetching bundle: %s\n')
2828 % stringutil.forcebytestr(e)
2845 % stringutil.forcebytestr(e)
2829 )
2846 )
2830 except urlerr.urlerror as e:
2847 except urlerr.urlerror as e:
2831 ui.warn(
2848 ui.warn(
2832 _(b'error fetching bundle: %s\n')
2849 _(b'error fetching bundle: %s\n')
2833 % stringutil.forcebytestr(e.reason)
2850 % stringutil.forcebytestr(e.reason)
2834 )
2851 )
2835
2852
2836 return False
2853 return False
@@ -1,270 +1,268
1 bundle w/o type option
1 bundle w/o type option
2
2
3 $ hg init t1
3 $ hg init t1
4 $ hg init t2
4 $ hg init t2
5 $ cd t1
5 $ cd t1
6 $ echo blablablablabla > file.txt
6 $ echo blablablablabla > file.txt
7 $ hg ci -Ama
7 $ hg ci -Ama
8 adding file.txt
8 adding file.txt
9 $ hg log | grep summary
9 $ hg log | grep summary
10 summary: a
10 summary: a
11 $ hg bundle ../b1 ../t2
11 $ hg bundle ../b1 ../t2
12 searching for changes
12 searching for changes
13 1 changesets found
13 1 changesets found
14
14
15 $ cd ../t2
15 $ cd ../t2
16 $ hg unbundle ../b1
16 $ hg unbundle ../b1
17 adding changesets
17 adding changesets
18 adding manifests
18 adding manifests
19 adding file changes
19 adding file changes
20 added 1 changesets with 1 changes to 1 files
20 added 1 changesets with 1 changes to 1 files
21 new changesets c35a0f9217e6 (1 drafts)
21 new changesets c35a0f9217e6 (1 drafts)
22 (run 'hg update' to get a working copy)
22 (run 'hg update' to get a working copy)
23 $ hg up
23 $ hg up
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25 $ hg log | grep summary
25 $ hg log | grep summary
26 summary: a
26 summary: a
27 $ cd ..
27 $ cd ..
28
28
29 Unknown compression type is rejected
29 Unknown compression type is rejected
30
30
31 $ hg init t3
31 $ hg init t3
32 $ cd t3
32 $ cd t3
33 $ hg -q unbundle ../b1
33 $ hg -q unbundle ../b1
34 $ hg bundle -a -t unknown out.hg
34 $ hg bundle -a -t unknown out.hg
35 abort: unknown is not a recognized bundle specification
35 abort: unknown is not a recognized bundle specification
36 (see 'hg help bundlespec' for supported values for --type)
36 (see 'hg help bundlespec' for supported values for --type)
37 [10]
37 [10]
38
38
39 $ hg bundle -a -t unknown-v2 out.hg
39 $ hg bundle -a -t unknown-v2 out.hg
40 abort: unknown compression is not supported
40 abort: unknown compression is not supported
41 (see 'hg help bundlespec' for supported values for --type)
41 (see 'hg help bundlespec' for supported values for --type)
42 [10]
42 [10]
43
43
44 $ cd ..
44 $ cd ..
45
45
46 test bundle types
46 test bundle types
47
47
48 $ testbundle() {
48 $ testbundle() {
49 > echo % test bundle type $1
49 > echo % test bundle type $1
50 > hg init t$1
50 > hg init t$1
51 > cd t1
51 > cd t1
52 > hg bundle -t $1 ../b$1 ../t$1
52 > hg bundle -t $1 ../b$1 ../t$1
53 > f -q -B6 -D ../b$1; echo
53 > f -q -B6 -D ../b$1; echo
54 > cd ../t$1
54 > cd ../t$1
55 > hg debugbundle ../b$1
55 > hg debugbundle ../b$1
56 > hg debugbundle --spec ../b$1
56 > hg debugbundle --spec ../b$1
57 > echo
57 > echo
58 > cd ..
58 > cd ..
59 > }
59 > }
60
60
61 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
61 $ for t in "None" "bzip2" "gzip" "none-v2" "v2" "v1" "gzip-v1"; do
62 > testbundle $t
62 > testbundle $t
63 > done
63 > done
64 % test bundle type None
64 % test bundle type None
65 searching for changes
65 searching for changes
66 1 changesets found
66 1 changesets found
67 HG20\x00\x00 (esc)
67 HG20\x00\x00 (esc)
68 Stream params: {}
68 Stream params: {}
69 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
69 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
70 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
70 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
71 cache:rev-branch-cache -- {} (mandatory: False)
71 cache:rev-branch-cache -- {} (mandatory: False)
72 none-v2
72 none-v2
73
73
74 % test bundle type bzip2
74 % test bundle type bzip2
75 searching for changes
75 searching for changes
76 1 changesets found
76 1 changesets found
77 HG20\x00\x00 (esc)
77 HG20\x00\x00 (esc)
78 Stream params: {Compression: BZ}
78 Stream params: {Compression: BZ}
79 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
79 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
80 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
80 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
81 cache:rev-branch-cache -- {} (mandatory: False)
81 cache:rev-branch-cache -- {} (mandatory: False)
82 bzip2-v2
82 bzip2-v2
83
83
84 % test bundle type gzip
84 % test bundle type gzip
85 searching for changes
85 searching for changes
86 1 changesets found
86 1 changesets found
87 HG20\x00\x00 (esc)
87 HG20\x00\x00 (esc)
88 Stream params: {Compression: GZ}
88 Stream params: {Compression: GZ}
89 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
89 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
90 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
90 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
91 cache:rev-branch-cache -- {} (mandatory: False)
91 cache:rev-branch-cache -- {} (mandatory: False)
92 gzip-v2
92 gzip-v2
93
93
94 % test bundle type none-v2
94 % test bundle type none-v2
95 searching for changes
95 searching for changes
96 1 changesets found
96 1 changesets found
97 HG20\x00\x00 (esc)
97 HG20\x00\x00 (esc)
98 Stream params: {}
98 Stream params: {}
99 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
99 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
100 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
100 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
101 cache:rev-branch-cache -- {} (mandatory: False)
101 cache:rev-branch-cache -- {} (mandatory: False)
102 none-v2
102 none-v2
103
103
104 % test bundle type v2
104 % test bundle type v2
105 searching for changes
105 searching for changes
106 1 changesets found
106 1 changesets found
107 HG20\x00\x00 (esc)
107 HG20\x00\x00 (esc)
108 Stream params: {Compression: BZ}
108 Stream params: {Compression: BZ}
109 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
109 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
110 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
110 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
111 cache:rev-branch-cache -- {} (mandatory: False)
111 cache:rev-branch-cache -- {} (mandatory: False)
112 bzip2-v2
112 bzip2-v2
113
113
114 % test bundle type v1
114 % test bundle type v1
115 searching for changes
115 searching for changes
116 1 changesets found
116 1 changesets found
117 HG10BZ
117 HG10BZ
118 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
118 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
119 bzip2-v1
119 bzip2-v1
120
120
121 % test bundle type gzip-v1
121 % test bundle type gzip-v1
122 searching for changes
122 searching for changes
123 1 changesets found
123 1 changesets found
124 HG10GZ
124 HG10GZ
125 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
125 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
126 gzip-v1
126 gzip-v1
127
127
128
128
129 Compression level can be adjusted for bundle2 bundles
129 Compression level can be adjusted for bundle2 bundles
130
130
131 $ hg init test-complevel
131 $ hg init test-complevel
132 $ cd test-complevel
132 $ cd test-complevel
133
133
134 $ cat > file0 << EOF
134 $ cat > file0 << EOF
135 > this is a file
135 > this is a file
136 > with some text
136 > with some text
137 > and some more text
137 > and some more text
138 > and other content
138 > and other content
139 > EOF
139 > EOF
140 $ cat > file1 << EOF
140 $ cat > file1 << EOF
141 > this is another file
141 > this is another file
142 > with some other content
142 > with some other content
143 > and repeated, repeated, repeated, repeated content
143 > and repeated, repeated, repeated, repeated content
144 > EOF
144 > EOF
145 $ hg -q commit -A -m initial
145 $ hg -q commit -A -m initial
146
146
147 $ hg bundle -a -t gzip-v2 gzip-v2.hg
147 $ hg bundle -a -t gzip-v2 gzip-v2.hg
148 1 changesets found
148 1 changesets found
149 $ f --size gzip-v2.hg
149 $ f --size gzip-v2.hg
150 gzip-v2.hg: size=468
150 gzip-v2.hg: size=468
151
151
152 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
152 $ hg --config experimental.bundlecomplevel=1 bundle -a -t gzip-v2 gzip-v2-level1.hg
153 1 changesets found
153 1 changesets found
154 $ f --size gzip-v2-level1.hg
154 $ f --size gzip-v2-level1.hg
155 gzip-v2-level1.hg: size=475
155 gzip-v2-level1.hg: size=475
156
156
157 $ hg --config experimental.bundlecomplevel.gzip=1 --config experimental.bundlelevel=9 bundle -a -t gzip-v2 gzip-v2-level1.hg
157 $ hg --config experimental.bundlecomplevel.gzip=1 --config experimental.bundlelevel=9 bundle -a -t gzip-v2 gzip-v2-level1.hg
158 1 changesets found
158 1 changesets found
159 $ f --size gzip-v2-level1.hg
159 $ f --size gzip-v2-level1.hg
160 gzip-v2-level1.hg: size=475
160 gzip-v2-level1.hg: size=475
161
161
162 $ cd ..
162 $ cd ..
163
163
164 #if zstd
164 #if zstd
165
165
166 $ for t in "zstd" "zstd-v2"; do
166 $ for t in "zstd" "zstd-v2"; do
167 > testbundle $t
167 > testbundle $t
168 > done
168 > done
169 % test bundle type zstd
169 % test bundle type zstd
170 searching for changes
170 searching for changes
171 1 changesets found
171 1 changesets found
172 HG20\x00\x00 (esc)
172 HG20\x00\x00 (esc)
173 Stream params: {Compression: ZS}
173 Stream params: {Compression: ZS}
174 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
174 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
175 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
175 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
176 cache:rev-branch-cache -- {} (mandatory: False)
176 cache:rev-branch-cache -- {} (mandatory: False)
177 zstd-v2
177 zstd-v2
178
178
179 % test bundle type zstd-v2
179 % test bundle type zstd-v2
180 searching for changes
180 searching for changes
181 1 changesets found
181 1 changesets found
182 HG20\x00\x00 (esc)
182 HG20\x00\x00 (esc)
183 Stream params: {Compression: ZS}
183 Stream params: {Compression: ZS}
184 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
184 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
185 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
185 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
186 cache:rev-branch-cache -- {} (mandatory: False)
186 cache:rev-branch-cache -- {} (mandatory: False)
187 zstd-v2
187 zstd-v2
188
188
189
189
190 Explicit request for zstd on non-generaldelta repos
190 Explicit request for zstd on non-generaldelta repos
191
191
192 $ hg --config format.usegeneraldelta=false init nogd
192 $ hg --config format.usegeneraldelta=false init nogd
193 $ hg -q -R nogd pull t1
193 $ hg -q -R nogd pull t1
194 $ hg -R nogd bundle -a -t zstd nogd-zstd
194 $ hg -R nogd bundle -a -t zstd nogd-zstd
195 1 changesets found
195 1 changesets found
196
196
197 zstd-v1 always fails
197 zstd-v1 always fails
198
198
199 $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
199 $ hg -R tzstd bundle -a -t zstd-v1 zstd-v1
200 abort: compression engine zstd is not supported on v1 bundles
200 abort: compression engine zstd is not supported on v1 bundles
201 (see 'hg help bundlespec' for supported values for --type)
201 (see 'hg help bundlespec' for supported values for --type)
202 [10]
202 [10]
203
203
204 zstd supports threading
204 zstd supports threading
205
205
206 $ hg init test-compthreads
206 $ hg init test-compthreads
207 $ cd test-compthreads
207 $ cd test-compthreads
208 $ hg debugbuilddag +3
208 $ hg debugbuilddag +3
209 $ hg --config experimental.bundlecompthreads=1 bundle -a -t zstd-v2 zstd-v2-threaded.hg
209 $ hg --config experimental.bundlecompthreads=1 bundle -a -t zstd-v2 zstd-v2-threaded.hg
210 3 changesets found
210 3 changesets found
211 $ cd ..
211 $ cd ..
212
212
213 #else
213 #else
214
214
215 zstd is a valid engine but isn't available
215 zstd is a valid engine but isn't available
216
216
217 $ hg -R t1 bundle -a -t zstd irrelevant.hg
217 $ hg -R t1 bundle -a -t zstd irrelevant.hg
218 abort: compression engine zstd could not be loaded
218 abort: compression engine zstd could not be loaded
219 [255]
219 [255]
220
220
221 #endif
221 #endif
222
222
223 test garbage file
223 test garbage file
224
224
225 $ echo garbage > bgarbage
225 $ echo garbage > bgarbage
226 $ hg init tgarbage
226 $ hg init tgarbage
227 $ cd tgarbage
227 $ cd tgarbage
228 $ hg pull ../bgarbage
228 $ hg pull ../bgarbage
229 pulling from ../bgarbage
229 pulling from ../bgarbage
230 abort: ../bgarbage: not a Mercurial bundle
230 abort: ../bgarbage: not a Mercurial bundle
231 [255]
231 [255]
232 $ cd ..
232 $ cd ..
233
233
234 test invalid bundle type
234 test invalid bundle type
235
235
236 $ cd t1
236 $ cd t1
237 $ hg bundle -a -t garbage ../bgarbage
237 $ hg bundle -a -t garbage ../bgarbage
238 abort: garbage is not a recognized bundle specification
238 abort: garbage is not a recognized bundle specification
239 (see 'hg help bundlespec' for supported values for --type)
239 (see 'hg help bundlespec' for supported values for --type)
240 [10]
240 [10]
241 $ cd ..
241 $ cd ..
242
242
243 Test controlling the changegroup version
243 Test controlling the changegroup version
244
244
245 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t v2 ./v2-cg-default.hg
245 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t v2 ./v2-cg-default.hg
246 1 changesets found
246 1 changesets found
247 $ hg debugbundle ./v2-cg-default.hg --part-type changegroup
247 $ hg debugbundle ./v2-cg-default.hg --part-type changegroup
248 Stream params: {Compression: BZ}
248 Stream params: {Compression: BZ}
249 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
249 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
250 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
250 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
251 $ hg debugbundle ./v2-cg-default.hg --spec
251 $ hg debugbundle ./v2-cg-default.hg --spec
252 bzip2-v2
252 bzip2-v2
253 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=02' ./v2-cg-02.hg
253 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=02' ./v2-cg-02.hg
254 1 changesets found
254 1 changesets found
255 $ hg debugbundle ./v2-cg-02.hg --part-type changegroup
255 $ hg debugbundle ./v2-cg-02.hg --part-type changegroup
256 Stream params: {Compression: BZ}
256 Stream params: {Compression: BZ}
257 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
257 changegroup -- {nbchanges: 1, version: 02} (mandatory: True)
258 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
258 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
259 $ hg debugbundle ./v2-cg-02.hg --spec
259 $ hg debugbundle ./v2-cg-02.hg --spec
260 bzip2-v2
260 bzip2-v2
261 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=03' ./v2-cg-03.hg
261 $ hg -R t1 bundle --config experimental.changegroup3=yes -a -t 'v2;cg.version=03' ./v2-cg-03.hg
262 1 changesets found
262 1 changesets found
263 $ hg debugbundle ./v2-cg-03.hg --part-type changegroup
263 $ hg debugbundle ./v2-cg-03.hg --part-type changegroup
264 Stream params: {Compression: BZ}
264 Stream params: {Compression: BZ}
265 changegroup -- {nbchanges: 1, version: 03} (mandatory: True)
265 changegroup -- {nbchanges: 1, version: 03} (mandatory: True)
266 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
266 c35a0f9217e65d1fdb90c936ffa7dbe679f83ddf
267 $ hg debugbundle ./v2-cg-03.hg --spec
267 $ hg debugbundle ./v2-cg-03.hg --spec
268 abort: changegroup version 03 does not have a known bundlespec (known-bad-output !)
268 bzip2-v2;cg.version=03
269 (try upgrading your Mercurial client) (known-bad-output !)
270 [255]
General Comments 0
You need to be logged in to leave comments. Login now