##// END OF EJS Templates
exchange: guard against method invocation on `b2caps=None` args...
Matt Harbison -
r44158:0dbea180 default draft
parent child Browse files
Show More
@@ -1,3084 +1,3085 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 sslutil,
36 sslutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 wireprototypes,
40 wireprototypes,
41 )
41 )
42 from .interfaces import repository
42 from .interfaces import repository
43 from .utils import stringutil
43 from .utils import stringutil
44
44
45 urlerr = util.urlerr
45 urlerr = util.urlerr
46 urlreq = util.urlreq
46 urlreq = util.urlreq
47
47
48 _NARROWACL_SECTION = b'narrowacl'
48 _NARROWACL_SECTION = b'narrowacl'
49
49
50 # Maps bundle version human names to changegroup versions.
50 # Maps bundle version human names to changegroup versions.
51 _bundlespeccgversions = {
51 _bundlespeccgversions = {
52 b'v1': b'01',
52 b'v1': b'01',
53 b'v2': b'02',
53 b'v2': b'02',
54 b'packed1': b's1',
54 b'packed1': b's1',
55 b'bundle2': b'02', # legacy
55 b'bundle2': b'02', # legacy
56 }
56 }
57
57
58 # Maps bundle version with content opts to choose which part to bundle
58 # Maps bundle version with content opts to choose which part to bundle
59 _bundlespeccontentopts = {
59 _bundlespeccontentopts = {
60 b'v1': {
60 b'v1': {
61 b'changegroup': True,
61 b'changegroup': True,
62 b'cg.version': b'01',
62 b'cg.version': b'01',
63 b'obsolescence': False,
63 b'obsolescence': False,
64 b'phases': False,
64 b'phases': False,
65 b'tagsfnodescache': False,
65 b'tagsfnodescache': False,
66 b'revbranchcache': False,
66 b'revbranchcache': False,
67 },
67 },
68 b'v2': {
68 b'v2': {
69 b'changegroup': True,
69 b'changegroup': True,
70 b'cg.version': b'02',
70 b'cg.version': b'02',
71 b'obsolescence': False,
71 b'obsolescence': False,
72 b'phases': False,
72 b'phases': False,
73 b'tagsfnodescache': True,
73 b'tagsfnodescache': True,
74 b'revbranchcache': True,
74 b'revbranchcache': True,
75 },
75 },
76 b'packed1': {b'cg.version': b's1'},
76 b'packed1': {b'cg.version': b's1'},
77 }
77 }
78 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
79
79
80 _bundlespecvariants = {
80 _bundlespecvariants = {
81 b"streamv2": {
81 b"streamv2": {
82 b"changegroup": False,
82 b"changegroup": False,
83 b"streamv2": True,
83 b"streamv2": True,
84 b"tagsfnodescache": False,
84 b"tagsfnodescache": False,
85 b"revbranchcache": False,
85 b"revbranchcache": False,
86 }
86 }
87 }
87 }
88
88
89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
91
91
92
92
93 @attr.s
93 @attr.s
94 class bundlespec(object):
94 class bundlespec(object):
95 compression = attr.ib()
95 compression = attr.ib()
96 wirecompression = attr.ib()
96 wirecompression = attr.ib()
97 version = attr.ib()
97 version = attr.ib()
98 wireversion = attr.ib()
98 wireversion = attr.ib()
99 params = attr.ib()
99 params = attr.ib()
100 contentopts = attr.ib()
100 contentopts = attr.ib()
101
101
102
102
103 def parsebundlespec(repo, spec, strict=True):
103 def parsebundlespec(repo, spec, strict=True):
104 """Parse a bundle string specification into parts.
104 """Parse a bundle string specification into parts.
105
105
106 Bundle specifications denote a well-defined bundle/exchange format.
106 Bundle specifications denote a well-defined bundle/exchange format.
107 The content of a given specification should not change over time in
107 The content of a given specification should not change over time in
108 order to ensure that bundles produced by a newer version of Mercurial are
108 order to ensure that bundles produced by a newer version of Mercurial are
109 readable from an older version.
109 readable from an older version.
110
110
111 The string currently has the form:
111 The string currently has the form:
112
112
113 <compression>-<type>[;<parameter0>[;<parameter1>]]
113 <compression>-<type>[;<parameter0>[;<parameter1>]]
114
114
115 Where <compression> is one of the supported compression formats
115 Where <compression> is one of the supported compression formats
116 and <type> is (currently) a version string. A ";" can follow the type and
116 and <type> is (currently) a version string. A ";" can follow the type and
117 all text afterwards is interpreted as URI encoded, ";" delimited key=value
117 all text afterwards is interpreted as URI encoded, ";" delimited key=value
118 pairs.
118 pairs.
119
119
120 If ``strict`` is True (the default) <compression> is required. Otherwise,
120 If ``strict`` is True (the default) <compression> is required. Otherwise,
121 it is optional.
121 it is optional.
122
122
123 Returns a bundlespec object of (compression, version, parameters).
123 Returns a bundlespec object of (compression, version, parameters).
124 Compression will be ``None`` if not in strict mode and a compression isn't
124 Compression will be ``None`` if not in strict mode and a compression isn't
125 defined.
125 defined.
126
126
127 An ``InvalidBundleSpecification`` is raised when the specification is
127 An ``InvalidBundleSpecification`` is raised when the specification is
128 not syntactically well formed.
128 not syntactically well formed.
129
129
130 An ``UnsupportedBundleSpecification`` is raised when the compression or
130 An ``UnsupportedBundleSpecification`` is raised when the compression or
131 bundle type/version is not recognized.
131 bundle type/version is not recognized.
132
132
133 Note: this function will likely eventually return a more complex data
133 Note: this function will likely eventually return a more complex data
134 structure, including bundle2 part information.
134 structure, including bundle2 part information.
135 """
135 """
136
136
137 def parseparams(s):
137 def parseparams(s):
138 if b';' not in s:
138 if b';' not in s:
139 return s, {}
139 return s, {}
140
140
141 params = {}
141 params = {}
142 version, paramstr = s.split(b';', 1)
142 version, paramstr = s.split(b';', 1)
143
143
144 for p in paramstr.split(b';'):
144 for p in paramstr.split(b';'):
145 if b'=' not in p:
145 if b'=' not in p:
146 raise error.InvalidBundleSpecification(
146 raise error.InvalidBundleSpecification(
147 _(
147 _(
148 b'invalid bundle specification: '
148 b'invalid bundle specification: '
149 b'missing "=" in parameter: %s'
149 b'missing "=" in parameter: %s'
150 )
150 )
151 % p
151 % p
152 )
152 )
153
153
154 key, value = p.split(b'=', 1)
154 key, value = p.split(b'=', 1)
155 key = urlreq.unquote(key)
155 key = urlreq.unquote(key)
156 value = urlreq.unquote(value)
156 value = urlreq.unquote(value)
157 params[key] = value
157 params[key] = value
158
158
159 return version, params
159 return version, params
160
160
161 if strict and b'-' not in spec:
161 if strict and b'-' not in spec:
162 raise error.InvalidBundleSpecification(
162 raise error.InvalidBundleSpecification(
163 _(
163 _(
164 b'invalid bundle specification; '
164 b'invalid bundle specification; '
165 b'must be prefixed with compression: %s'
165 b'must be prefixed with compression: %s'
166 )
166 )
167 % spec
167 % spec
168 )
168 )
169
169
170 if b'-' in spec:
170 if b'-' in spec:
171 compression, version = spec.split(b'-', 1)
171 compression, version = spec.split(b'-', 1)
172
172
173 if compression not in util.compengines.supportedbundlenames:
173 if compression not in util.compengines.supportedbundlenames:
174 raise error.UnsupportedBundleSpecification(
174 raise error.UnsupportedBundleSpecification(
175 _(b'%s compression is not supported') % compression
175 _(b'%s compression is not supported') % compression
176 )
176 )
177
177
178 version, params = parseparams(version)
178 version, params = parseparams(version)
179
179
180 if version not in _bundlespeccgversions:
180 if version not in _bundlespeccgversions:
181 raise error.UnsupportedBundleSpecification(
181 raise error.UnsupportedBundleSpecification(
182 _(b'%s is not a recognized bundle version') % version
182 _(b'%s is not a recognized bundle version') % version
183 )
183 )
184 else:
184 else:
185 # Value could be just the compression or just the version, in which
185 # Value could be just the compression or just the version, in which
186 # case some defaults are assumed (but only when not in strict mode).
186 # case some defaults are assumed (but only when not in strict mode).
187 assert not strict
187 assert not strict
188
188
189 spec, params = parseparams(spec)
189 spec, params = parseparams(spec)
190
190
191 if spec in util.compengines.supportedbundlenames:
191 if spec in util.compengines.supportedbundlenames:
192 compression = spec
192 compression = spec
193 version = b'v1'
193 version = b'v1'
194 # Generaldelta repos require v2.
194 # Generaldelta repos require v2.
195 if b'generaldelta' in repo.requirements:
195 if b'generaldelta' in repo.requirements:
196 version = b'v2'
196 version = b'v2'
197 # Modern compression engines require v2.
197 # Modern compression engines require v2.
198 if compression not in _bundlespecv1compengines:
198 if compression not in _bundlespecv1compengines:
199 version = b'v2'
199 version = b'v2'
200 elif spec in _bundlespeccgversions:
200 elif spec in _bundlespeccgversions:
201 if spec == b'packed1':
201 if spec == b'packed1':
202 compression = b'none'
202 compression = b'none'
203 else:
203 else:
204 compression = b'bzip2'
204 compression = b'bzip2'
205 version = spec
205 version = spec
206 else:
206 else:
207 raise error.UnsupportedBundleSpecification(
207 raise error.UnsupportedBundleSpecification(
208 _(b'%s is not a recognized bundle specification') % spec
208 _(b'%s is not a recognized bundle specification') % spec
209 )
209 )
210
210
211 # Bundle version 1 only supports a known set of compression engines.
211 # Bundle version 1 only supports a known set of compression engines.
212 if version == b'v1' and compression not in _bundlespecv1compengines:
212 if version == b'v1' and compression not in _bundlespecv1compengines:
213 raise error.UnsupportedBundleSpecification(
213 raise error.UnsupportedBundleSpecification(
214 _(b'compression engine %s is not supported on v1 bundles')
214 _(b'compression engine %s is not supported on v1 bundles')
215 % compression
215 % compression
216 )
216 )
217
217
218 # The specification for packed1 can optionally declare the data formats
218 # The specification for packed1 can optionally declare the data formats
219 # required to apply it. If we see this metadata, compare against what the
219 # required to apply it. If we see this metadata, compare against what the
220 # repo supports and error if the bundle isn't compatible.
220 # repo supports and error if the bundle isn't compatible.
221 if version == b'packed1' and b'requirements' in params:
221 if version == b'packed1' and b'requirements' in params:
222 requirements = set(params[b'requirements'].split(b','))
222 requirements = set(params[b'requirements'].split(b','))
223 missingreqs = requirements - repo.supportedformats
223 missingreqs = requirements - repo.supportedformats
224 if missingreqs:
224 if missingreqs:
225 raise error.UnsupportedBundleSpecification(
225 raise error.UnsupportedBundleSpecification(
226 _(b'missing support for repository features: %s')
226 _(b'missing support for repository features: %s')
227 % b', '.join(sorted(missingreqs))
227 % b', '.join(sorted(missingreqs))
228 )
228 )
229
229
230 # Compute contentopts based on the version
230 # Compute contentopts based on the version
231 contentopts = _bundlespeccontentopts.get(version, {}).copy()
231 contentopts = _bundlespeccontentopts.get(version, {}).copy()
232
232
233 # Process the variants
233 # Process the variants
234 if b"stream" in params and params[b"stream"] == b"v2":
234 if b"stream" in params and params[b"stream"] == b"v2":
235 variant = _bundlespecvariants[b"streamv2"]
235 variant = _bundlespecvariants[b"streamv2"]
236 contentopts.update(variant)
236 contentopts.update(variant)
237
237
238 engine = util.compengines.forbundlename(compression)
238 engine = util.compengines.forbundlename(compression)
239 compression, wirecompression = engine.bundletype()
239 compression, wirecompression = engine.bundletype()
240 wireversion = _bundlespeccgversions[version]
240 wireversion = _bundlespeccgversions[version]
241
241
242 return bundlespec(
242 return bundlespec(
243 compression, wirecompression, version, wireversion, params, contentopts
243 compression, wirecompression, version, wireversion, params, contentopts
244 )
244 )
245
245
246
246
247 def readbundle(ui, fh, fname, vfs=None):
247 def readbundle(ui, fh, fname, vfs=None):
248 header = changegroup.readexactly(fh, 4)
248 header = changegroup.readexactly(fh, 4)
249
249
250 alg = None
250 alg = None
251 if not fname:
251 if not fname:
252 fname = b"stream"
252 fname = b"stream"
253 if not header.startswith(b'HG') and header.startswith(b'\0'):
253 if not header.startswith(b'HG') and header.startswith(b'\0'):
254 fh = changegroup.headerlessfixup(fh, header)
254 fh = changegroup.headerlessfixup(fh, header)
255 header = b"HG10"
255 header = b"HG10"
256 alg = b'UN'
256 alg = b'UN'
257 elif vfs:
257 elif vfs:
258 fname = vfs.join(fname)
258 fname = vfs.join(fname)
259
259
260 magic, version = header[0:2], header[2:4]
260 magic, version = header[0:2], header[2:4]
261
261
262 if magic != b'HG':
262 if magic != b'HG':
263 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
263 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
264 if version == b'10':
264 if version == b'10':
265 if alg is None:
265 if alg is None:
266 alg = changegroup.readexactly(fh, 2)
266 alg = changegroup.readexactly(fh, 2)
267 return changegroup.cg1unpacker(fh, alg)
267 return changegroup.cg1unpacker(fh, alg)
268 elif version.startswith(b'2'):
268 elif version.startswith(b'2'):
269 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
269 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
270 elif version == b'S1':
270 elif version == b'S1':
271 return streamclone.streamcloneapplier(fh)
271 return streamclone.streamcloneapplier(fh)
272 else:
272 else:
273 raise error.Abort(
273 raise error.Abort(
274 _(b'%s: unknown bundle version %s') % (fname, version)
274 _(b'%s: unknown bundle version %s') % (fname, version)
275 )
275 )
276
276
277
277
278 def getbundlespec(ui, fh):
278 def getbundlespec(ui, fh):
279 """Infer the bundlespec from a bundle file handle.
279 """Infer the bundlespec from a bundle file handle.
280
280
281 The input file handle is seeked and the original seek position is not
281 The input file handle is seeked and the original seek position is not
282 restored.
282 restored.
283 """
283 """
284
284
285 def speccompression(alg):
285 def speccompression(alg):
286 try:
286 try:
287 return util.compengines.forbundletype(alg).bundletype()[0]
287 return util.compengines.forbundletype(alg).bundletype()[0]
288 except KeyError:
288 except KeyError:
289 return None
289 return None
290
290
291 b = readbundle(ui, fh, None)
291 b = readbundle(ui, fh, None)
292 if isinstance(b, changegroup.cg1unpacker):
292 if isinstance(b, changegroup.cg1unpacker):
293 alg = b._type
293 alg = b._type
294 if alg == b'_truncatedBZ':
294 if alg == b'_truncatedBZ':
295 alg = b'BZ'
295 alg = b'BZ'
296 comp = speccompression(alg)
296 comp = speccompression(alg)
297 if not comp:
297 if not comp:
298 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
298 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
299 return b'%s-v1' % comp
299 return b'%s-v1' % comp
300 elif isinstance(b, bundle2.unbundle20):
300 elif isinstance(b, bundle2.unbundle20):
301 if b'Compression' in b.params:
301 if b'Compression' in b.params:
302 comp = speccompression(b.params[b'Compression'])
302 comp = speccompression(b.params[b'Compression'])
303 if not comp:
303 if not comp:
304 raise error.Abort(
304 raise error.Abort(
305 _(b'unknown compression algorithm: %s') % comp
305 _(b'unknown compression algorithm: %s') % comp
306 )
306 )
307 else:
307 else:
308 comp = b'none'
308 comp = b'none'
309
309
310 version = None
310 version = None
311 for part in b.iterparts():
311 for part in b.iterparts():
312 if part.type == b'changegroup':
312 if part.type == b'changegroup':
313 version = part.params[b'version']
313 version = part.params[b'version']
314 if version in (b'01', b'02'):
314 if version in (b'01', b'02'):
315 version = b'v2'
315 version = b'v2'
316 else:
316 else:
317 raise error.Abort(
317 raise error.Abort(
318 _(
318 _(
319 b'changegroup version %s does not have '
319 b'changegroup version %s does not have '
320 b'a known bundlespec'
320 b'a known bundlespec'
321 )
321 )
322 % version,
322 % version,
323 hint=_(b'try upgrading your Mercurial client'),
323 hint=_(b'try upgrading your Mercurial client'),
324 )
324 )
325 elif part.type == b'stream2' and version is None:
325 elif part.type == b'stream2' and version is None:
326 # A stream2 part requires to be part of a v2 bundle
326 # A stream2 part requires to be part of a v2 bundle
327 requirements = urlreq.unquote(part.params[b'requirements'])
327 requirements = urlreq.unquote(part.params[b'requirements'])
328 splitted = requirements.split()
328 splitted = requirements.split()
329 params = bundle2._formatrequirementsparams(splitted)
329 params = bundle2._formatrequirementsparams(splitted)
330 return b'none-v2;stream=v2;%s' % params
330 return b'none-v2;stream=v2;%s' % params
331
331
332 if not version:
332 if not version:
333 raise error.Abort(
333 raise error.Abort(
334 _(b'could not identify changegroup version in bundle')
334 _(b'could not identify changegroup version in bundle')
335 )
335 )
336
336
337 return b'%s-%s' % (comp, version)
337 return b'%s-%s' % (comp, version)
338 elif isinstance(b, streamclone.streamcloneapplier):
338 elif isinstance(b, streamclone.streamcloneapplier):
339 requirements = streamclone.readbundle1header(fh)[2]
339 requirements = streamclone.readbundle1header(fh)[2]
340 formatted = bundle2._formatrequirementsparams(requirements)
340 formatted = bundle2._formatrequirementsparams(requirements)
341 return b'none-packed1;%s' % formatted
341 return b'none-packed1;%s' % formatted
342 else:
342 else:
343 raise error.Abort(_(b'unknown bundle type: %s') % b)
343 raise error.Abort(_(b'unknown bundle type: %s') % b)
344
344
345
345
346 def _computeoutgoing(repo, heads, common):
346 def _computeoutgoing(repo, heads, common):
347 """Computes which revs are outgoing given a set of common
347 """Computes which revs are outgoing given a set of common
348 and a set of heads.
348 and a set of heads.
349
349
350 This is a separate function so extensions can have access to
350 This is a separate function so extensions can have access to
351 the logic.
351 the logic.
352
352
353 Returns a discovery.outgoing object.
353 Returns a discovery.outgoing object.
354 """
354 """
355 cl = repo.changelog
355 cl = repo.changelog
356 if common:
356 if common:
357 hasnode = cl.hasnode
357 hasnode = cl.hasnode
358 common = [n for n in common if hasnode(n)]
358 common = [n for n in common if hasnode(n)]
359 else:
359 else:
360 common = [nullid]
360 common = [nullid]
361 if not heads:
361 if not heads:
362 heads = cl.heads()
362 heads = cl.heads()
363 return discovery.outgoing(repo, common, heads)
363 return discovery.outgoing(repo, common, heads)
364
364
365
365
366 def _checkpublish(pushop):
366 def _checkpublish(pushop):
367 repo = pushop.repo
367 repo = pushop.repo
368 ui = repo.ui
368 ui = repo.ui
369 behavior = ui.config(b'experimental', b'auto-publish')
369 behavior = ui.config(b'experimental', b'auto-publish')
370 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
370 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
371 return
371 return
372 remotephases = listkeys(pushop.remote, b'phases')
372 remotephases = listkeys(pushop.remote, b'phases')
373 if not remotephases.get(b'publishing', False):
373 if not remotephases.get(b'publishing', False):
374 return
374 return
375
375
376 if pushop.revs is None:
376 if pushop.revs is None:
377 published = repo.filtered(b'served').revs(b'not public()')
377 published = repo.filtered(b'served').revs(b'not public()')
378 else:
378 else:
379 published = repo.revs(b'::%ln - public()', pushop.revs)
379 published = repo.revs(b'::%ln - public()', pushop.revs)
380 if published:
380 if published:
381 if behavior == b'warn':
381 if behavior == b'warn':
382 ui.warn(
382 ui.warn(
383 _(b'%i changesets about to be published\n') % len(published)
383 _(b'%i changesets about to be published\n') % len(published)
384 )
384 )
385 elif behavior == b'confirm':
385 elif behavior == b'confirm':
386 if ui.promptchoice(
386 if ui.promptchoice(
387 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
387 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
388 % len(published)
388 % len(published)
389 ):
389 ):
390 raise error.Abort(_(b'user quit'))
390 raise error.Abort(_(b'user quit'))
391 elif behavior == b'abort':
391 elif behavior == b'abort':
392 msg = _(b'push would publish %i changesets') % len(published)
392 msg = _(b'push would publish %i changesets') % len(published)
393 hint = _(
393 hint = _(
394 b"use --publish or adjust 'experimental.auto-publish'"
394 b"use --publish or adjust 'experimental.auto-publish'"
395 b" config"
395 b" config"
396 )
396 )
397 raise error.Abort(msg, hint=hint)
397 raise error.Abort(msg, hint=hint)
398
398
399
399
400 def _forcebundle1(op):
400 def _forcebundle1(op):
401 """return true if a pull/push must use bundle1
401 """return true if a pull/push must use bundle1
402
402
403 This function is used to allow testing of the older bundle version"""
403 This function is used to allow testing of the older bundle version"""
404 ui = op.repo.ui
404 ui = op.repo.ui
405 # The goal is this config is to allow developer to choose the bundle
405 # The goal is this config is to allow developer to choose the bundle
406 # version used during exchanged. This is especially handy during test.
406 # version used during exchanged. This is especially handy during test.
407 # Value is a list of bundle version to be picked from, highest version
407 # Value is a list of bundle version to be picked from, highest version
408 # should be used.
408 # should be used.
409 #
409 #
410 # developer config: devel.legacy.exchange
410 # developer config: devel.legacy.exchange
411 exchange = ui.configlist(b'devel', b'legacy.exchange')
411 exchange = ui.configlist(b'devel', b'legacy.exchange')
412 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
412 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
413 return forcebundle1 or not op.remote.capable(b'bundle2')
413 return forcebundle1 or not op.remote.capable(b'bundle2')
414
414
415
415
416 class pushoperation(object):
416 class pushoperation(object):
417 """A object that represent a single push operation
417 """A object that represent a single push operation
418
418
419 Its purpose is to carry push related state and very common operations.
419 Its purpose is to carry push related state and very common operations.
420
420
421 A new pushoperation should be created at the beginning of each push and
421 A new pushoperation should be created at the beginning of each push and
422 discarded afterward.
422 discarded afterward.
423 """
423 """
424
424
425 def __init__(
425 def __init__(
426 self,
426 self,
427 repo,
427 repo,
428 remote,
428 remote,
429 force=False,
429 force=False,
430 revs=None,
430 revs=None,
431 newbranch=False,
431 newbranch=False,
432 bookmarks=(),
432 bookmarks=(),
433 publish=False,
433 publish=False,
434 pushvars=None,
434 pushvars=None,
435 ):
435 ):
436 # repo we push from
436 # repo we push from
437 self.repo = repo
437 self.repo = repo
438 self.ui = repo.ui
438 self.ui = repo.ui
439 # repo we push to
439 # repo we push to
440 self.remote = remote
440 self.remote = remote
441 # force option provided
441 # force option provided
442 self.force = force
442 self.force = force
443 # revs to be pushed (None is "all")
443 # revs to be pushed (None is "all")
444 self.revs = revs
444 self.revs = revs
445 # bookmark explicitly pushed
445 # bookmark explicitly pushed
446 self.bookmarks = bookmarks
446 self.bookmarks = bookmarks
447 # allow push of new branch
447 # allow push of new branch
448 self.newbranch = newbranch
448 self.newbranch = newbranch
449 # step already performed
449 # step already performed
450 # (used to check what steps have been already performed through bundle2)
450 # (used to check what steps have been already performed through bundle2)
451 self.stepsdone = set()
451 self.stepsdone = set()
452 # Integer version of the changegroup push result
452 # Integer version of the changegroup push result
453 # - None means nothing to push
453 # - None means nothing to push
454 # - 0 means HTTP error
454 # - 0 means HTTP error
455 # - 1 means we pushed and remote head count is unchanged *or*
455 # - 1 means we pushed and remote head count is unchanged *or*
456 # we have outgoing changesets but refused to push
456 # we have outgoing changesets but refused to push
457 # - other values as described by addchangegroup()
457 # - other values as described by addchangegroup()
458 self.cgresult = None
458 self.cgresult = None
459 # Boolean value for the bookmark push
459 # Boolean value for the bookmark push
460 self.bkresult = None
460 self.bkresult = None
461 # discover.outgoing object (contains common and outgoing data)
461 # discover.outgoing object (contains common and outgoing data)
462 self.outgoing = None
462 self.outgoing = None
463 # all remote topological heads before the push
463 # all remote topological heads before the push
464 self.remoteheads = None
464 self.remoteheads = None
465 # Details of the remote branch pre and post push
465 # Details of the remote branch pre and post push
466 #
466 #
467 # mapping: {'branch': ([remoteheads],
467 # mapping: {'branch': ([remoteheads],
468 # [newheads],
468 # [newheads],
469 # [unsyncedheads],
469 # [unsyncedheads],
470 # [discardedheads])}
470 # [discardedheads])}
471 # - branch: the branch name
471 # - branch: the branch name
472 # - remoteheads: the list of remote heads known locally
472 # - remoteheads: the list of remote heads known locally
473 # None if the branch is new
473 # None if the branch is new
474 # - newheads: the new remote heads (known locally) with outgoing pushed
474 # - newheads: the new remote heads (known locally) with outgoing pushed
475 # - unsyncedheads: the list of remote heads unknown locally.
475 # - unsyncedheads: the list of remote heads unknown locally.
476 # - discardedheads: the list of remote heads made obsolete by the push
476 # - discardedheads: the list of remote heads made obsolete by the push
477 self.pushbranchmap = None
477 self.pushbranchmap = None
478 # testable as a boolean indicating if any nodes are missing locally.
478 # testable as a boolean indicating if any nodes are missing locally.
479 self.incoming = None
479 self.incoming = None
480 # summary of the remote phase situation
480 # summary of the remote phase situation
481 self.remotephases = None
481 self.remotephases = None
482 # phases changes that must be pushed along side the changesets
482 # phases changes that must be pushed along side the changesets
483 self.outdatedphases = None
483 self.outdatedphases = None
484 # phases changes that must be pushed if changeset push fails
484 # phases changes that must be pushed if changeset push fails
485 self.fallbackoutdatedphases = None
485 self.fallbackoutdatedphases = None
486 # outgoing obsmarkers
486 # outgoing obsmarkers
487 self.outobsmarkers = set()
487 self.outobsmarkers = set()
488 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
488 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
489 self.outbookmarks = []
489 self.outbookmarks = []
490 # transaction manager
490 # transaction manager
491 self.trmanager = None
491 self.trmanager = None
492 # map { pushkey partid -> callback handling failure}
492 # map { pushkey partid -> callback handling failure}
493 # used to handle exception from mandatory pushkey part failure
493 # used to handle exception from mandatory pushkey part failure
494 self.pkfailcb = {}
494 self.pkfailcb = {}
495 # an iterable of pushvars or None
495 # an iterable of pushvars or None
496 self.pushvars = pushvars
496 self.pushvars = pushvars
497 # publish pushed changesets
497 # publish pushed changesets
498 self.publish = publish
498 self.publish = publish
499
499
500 @util.propertycache
500 @util.propertycache
501 def futureheads(self):
501 def futureheads(self):
502 """future remote heads if the changeset push succeeds"""
502 """future remote heads if the changeset push succeeds"""
503 return self.outgoing.missingheads
503 return self.outgoing.missingheads
504
504
505 @util.propertycache
505 @util.propertycache
506 def fallbackheads(self):
506 def fallbackheads(self):
507 """future remote heads if the changeset push fails"""
507 """future remote heads if the changeset push fails"""
508 if self.revs is None:
508 if self.revs is None:
509 # not target to push, all common are relevant
509 # not target to push, all common are relevant
510 return self.outgoing.commonheads
510 return self.outgoing.commonheads
511 unfi = self.repo.unfiltered()
511 unfi = self.repo.unfiltered()
512 # I want cheads = heads(::missingheads and ::commonheads)
512 # I want cheads = heads(::missingheads and ::commonheads)
513 # (missingheads is revs with secret changeset filtered out)
513 # (missingheads is revs with secret changeset filtered out)
514 #
514 #
515 # This can be expressed as:
515 # This can be expressed as:
516 # cheads = ( (missingheads and ::commonheads)
516 # cheads = ( (missingheads and ::commonheads)
517 # + (commonheads and ::missingheads))"
517 # + (commonheads and ::missingheads))"
518 # )
518 # )
519 #
519 #
520 # while trying to push we already computed the following:
520 # while trying to push we already computed the following:
521 # common = (::commonheads)
521 # common = (::commonheads)
522 # missing = ((commonheads::missingheads) - commonheads)
522 # missing = ((commonheads::missingheads) - commonheads)
523 #
523 #
524 # We can pick:
524 # We can pick:
525 # * missingheads part of common (::commonheads)
525 # * missingheads part of common (::commonheads)
526 common = self.outgoing.common
526 common = self.outgoing.common
527 rev = self.repo.changelog.index.rev
527 rev = self.repo.changelog.index.rev
528 cheads = [node for node in self.revs if rev(node) in common]
528 cheads = [node for node in self.revs if rev(node) in common]
529 # and
529 # and
530 # * commonheads parents on missing
530 # * commonheads parents on missing
531 revset = unfi.set(
531 revset = unfi.set(
532 b'%ln and parents(roots(%ln))',
532 b'%ln and parents(roots(%ln))',
533 self.outgoing.commonheads,
533 self.outgoing.commonheads,
534 self.outgoing.missing,
534 self.outgoing.missing,
535 )
535 )
536 cheads.extend(c.node() for c in revset)
536 cheads.extend(c.node() for c in revset)
537 return cheads
537 return cheads
538
538
539 @property
539 @property
540 def commonheads(self):
540 def commonheads(self):
541 """set of all common heads after changeset bundle push"""
541 """set of all common heads after changeset bundle push"""
542 if self.cgresult:
542 if self.cgresult:
543 return self.futureheads
543 return self.futureheads
544 else:
544 else:
545 return self.fallbackheads
545 return self.fallbackheads
546
546
547
547
548 # mapping of message used when pushing bookmark
548 # mapping of message used when pushing bookmark
549 bookmsgmap = {
549 bookmsgmap = {
550 b'update': (
550 b'update': (
551 _(b"updating bookmark %s\n"),
551 _(b"updating bookmark %s\n"),
552 _(b'updating bookmark %s failed!\n'),
552 _(b'updating bookmark %s failed!\n'),
553 ),
553 ),
554 b'export': (
554 b'export': (
555 _(b"exporting bookmark %s\n"),
555 _(b"exporting bookmark %s\n"),
556 _(b'exporting bookmark %s failed!\n'),
556 _(b'exporting bookmark %s failed!\n'),
557 ),
557 ),
558 b'delete': (
558 b'delete': (
559 _(b"deleting remote bookmark %s\n"),
559 _(b"deleting remote bookmark %s\n"),
560 _(b'deleting remote bookmark %s failed!\n'),
560 _(b'deleting remote bookmark %s failed!\n'),
561 ),
561 ),
562 }
562 }
563
563
564
564
565 def push(
565 def push(
566 repo,
566 repo,
567 remote,
567 remote,
568 force=False,
568 force=False,
569 revs=None,
569 revs=None,
570 newbranch=False,
570 newbranch=False,
571 bookmarks=(),
571 bookmarks=(),
572 publish=False,
572 publish=False,
573 opargs=None,
573 opargs=None,
574 ):
574 ):
575 '''Push outgoing changesets (limited by revs) from a local
575 '''Push outgoing changesets (limited by revs) from a local
576 repository to remote. Return an integer:
576 repository to remote. Return an integer:
577 - None means nothing to push
577 - None means nothing to push
578 - 0 means HTTP error
578 - 0 means HTTP error
579 - 1 means we pushed and remote head count is unchanged *or*
579 - 1 means we pushed and remote head count is unchanged *or*
580 we have outgoing changesets but refused to push
580 we have outgoing changesets but refused to push
581 - other values as described by addchangegroup()
581 - other values as described by addchangegroup()
582 '''
582 '''
583 if opargs is None:
583 if opargs is None:
584 opargs = {}
584 opargs = {}
585 pushop = pushoperation(
585 pushop = pushoperation(
586 repo,
586 repo,
587 remote,
587 remote,
588 force,
588 force,
589 revs,
589 revs,
590 newbranch,
590 newbranch,
591 bookmarks,
591 bookmarks,
592 publish,
592 publish,
593 **pycompat.strkwargs(opargs)
593 **pycompat.strkwargs(opargs)
594 )
594 )
595 if pushop.remote.local():
595 if pushop.remote.local():
596 missing = (
596 missing = (
597 set(pushop.repo.requirements) - pushop.remote.local().supported
597 set(pushop.repo.requirements) - pushop.remote.local().supported
598 )
598 )
599 if missing:
599 if missing:
600 msg = _(
600 msg = _(
601 b"required features are not"
601 b"required features are not"
602 b" supported in the destination:"
602 b" supported in the destination:"
603 b" %s"
603 b" %s"
604 ) % (b', '.join(sorted(missing)))
604 ) % (b', '.join(sorted(missing)))
605 raise error.Abort(msg)
605 raise error.Abort(msg)
606
606
607 if not pushop.remote.canpush():
607 if not pushop.remote.canpush():
608 raise error.Abort(_(b"destination does not support push"))
608 raise error.Abort(_(b"destination does not support push"))
609
609
610 if not pushop.remote.capable(b'unbundle'):
610 if not pushop.remote.capable(b'unbundle'):
611 raise error.Abort(
611 raise error.Abort(
612 _(
612 _(
613 b'cannot push: destination does not support the '
613 b'cannot push: destination does not support the '
614 b'unbundle wire protocol command'
614 b'unbundle wire protocol command'
615 )
615 )
616 )
616 )
617
617
618 # get lock as we might write phase data
618 # get lock as we might write phase data
619 wlock = lock = None
619 wlock = lock = None
620 try:
620 try:
621 # bundle2 push may receive a reply bundle touching bookmarks
621 # bundle2 push may receive a reply bundle touching bookmarks
622 # requiring the wlock. Take it now to ensure proper ordering.
622 # requiring the wlock. Take it now to ensure proper ordering.
623 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
623 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
624 if (
624 if (
625 (not _forcebundle1(pushop))
625 (not _forcebundle1(pushop))
626 and maypushback
626 and maypushback
627 and not bookmod.bookmarksinstore(repo)
627 and not bookmod.bookmarksinstore(repo)
628 ):
628 ):
629 wlock = pushop.repo.wlock()
629 wlock = pushop.repo.wlock()
630 lock = pushop.repo.lock()
630 lock = pushop.repo.lock()
631 pushop.trmanager = transactionmanager(
631 pushop.trmanager = transactionmanager(
632 pushop.repo, b'push-response', pushop.remote.url()
632 pushop.repo, b'push-response', pushop.remote.url()
633 )
633 )
634 except error.LockUnavailable as err:
634 except error.LockUnavailable as err:
635 # source repo cannot be locked.
635 # source repo cannot be locked.
636 # We do not abort the push, but just disable the local phase
636 # We do not abort the push, but just disable the local phase
637 # synchronisation.
637 # synchronisation.
638 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
638 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
639 err
639 err
640 )
640 )
641 pushop.ui.debug(msg)
641 pushop.ui.debug(msg)
642
642
643 with wlock or util.nullcontextmanager():
643 with wlock or util.nullcontextmanager():
644 with lock or util.nullcontextmanager():
644 with lock or util.nullcontextmanager():
645 with pushop.trmanager or util.nullcontextmanager():
645 with pushop.trmanager or util.nullcontextmanager():
646 pushop.repo.checkpush(pushop)
646 pushop.repo.checkpush(pushop)
647 _checkpublish(pushop)
647 _checkpublish(pushop)
648 _pushdiscovery(pushop)
648 _pushdiscovery(pushop)
649 if not _forcebundle1(pushop):
649 if not _forcebundle1(pushop):
650 _pushbundle2(pushop)
650 _pushbundle2(pushop)
651 _pushchangeset(pushop)
651 _pushchangeset(pushop)
652 _pushsyncphase(pushop)
652 _pushsyncphase(pushop)
653 _pushobsolete(pushop)
653 _pushobsolete(pushop)
654 _pushbookmark(pushop)
654 _pushbookmark(pushop)
655
655
656 if repo.ui.configbool(b'experimental', b'remotenames'):
656 if repo.ui.configbool(b'experimental', b'remotenames'):
657 logexchange.pullremotenames(repo, remote)
657 logexchange.pullremotenames(repo, remote)
658
658
659 return pushop
659 return pushop
660
660
661
661
662 # list of steps to perform discovery before push
662 # list of steps to perform discovery before push
663 pushdiscoveryorder = []
663 pushdiscoveryorder = []
664
664
665 # Mapping between step name and function
665 # Mapping between step name and function
666 #
666 #
667 # This exists to help extensions wrap steps if necessary
667 # This exists to help extensions wrap steps if necessary
668 pushdiscoverymapping = {}
668 pushdiscoverymapping = {}
669
669
670
670
671 def pushdiscovery(stepname):
671 def pushdiscovery(stepname):
672 """decorator for function performing discovery before push
672 """decorator for function performing discovery before push
673
673
674 The function is added to the step -> function mapping and appended to the
674 The function is added to the step -> function mapping and appended to the
675 list of steps. Beware that decorated function will be added in order (this
675 list of steps. Beware that decorated function will be added in order (this
676 may matter).
676 may matter).
677
677
678 You can only use this decorator for a new step, if you want to wrap a step
678 You can only use this decorator for a new step, if you want to wrap a step
679 from an extension, change the pushdiscovery dictionary directly."""
679 from an extension, change the pushdiscovery dictionary directly."""
680
680
681 def dec(func):
681 def dec(func):
682 assert stepname not in pushdiscoverymapping
682 assert stepname not in pushdiscoverymapping
683 pushdiscoverymapping[stepname] = func
683 pushdiscoverymapping[stepname] = func
684 pushdiscoveryorder.append(stepname)
684 pushdiscoveryorder.append(stepname)
685 return func
685 return func
686
686
687 return dec
687 return dec
688
688
689
689
690 def _pushdiscovery(pushop):
690 def _pushdiscovery(pushop):
691 """Run all discovery steps"""
691 """Run all discovery steps"""
692 for stepname in pushdiscoveryorder:
692 for stepname in pushdiscoveryorder:
693 step = pushdiscoverymapping[stepname]
693 step = pushdiscoverymapping[stepname]
694 step(pushop)
694 step(pushop)
695
695
696
696
697 @pushdiscovery(b'changeset')
697 @pushdiscovery(b'changeset')
698 def _pushdiscoverychangeset(pushop):
698 def _pushdiscoverychangeset(pushop):
699 """discover the changeset that need to be pushed"""
699 """discover the changeset that need to be pushed"""
700 fci = discovery.findcommonincoming
700 fci = discovery.findcommonincoming
701 if pushop.revs:
701 if pushop.revs:
702 commoninc = fci(
702 commoninc = fci(
703 pushop.repo,
703 pushop.repo,
704 pushop.remote,
704 pushop.remote,
705 force=pushop.force,
705 force=pushop.force,
706 ancestorsof=pushop.revs,
706 ancestorsof=pushop.revs,
707 )
707 )
708 else:
708 else:
709 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
709 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
710 common, inc, remoteheads = commoninc
710 common, inc, remoteheads = commoninc
711 fco = discovery.findcommonoutgoing
711 fco = discovery.findcommonoutgoing
712 outgoing = fco(
712 outgoing = fco(
713 pushop.repo,
713 pushop.repo,
714 pushop.remote,
714 pushop.remote,
715 onlyheads=pushop.revs,
715 onlyheads=pushop.revs,
716 commoninc=commoninc,
716 commoninc=commoninc,
717 force=pushop.force,
717 force=pushop.force,
718 )
718 )
719 pushop.outgoing = outgoing
719 pushop.outgoing = outgoing
720 pushop.remoteheads = remoteheads
720 pushop.remoteheads = remoteheads
721 pushop.incoming = inc
721 pushop.incoming = inc
722
722
723
723
724 @pushdiscovery(b'phase')
724 @pushdiscovery(b'phase')
725 def _pushdiscoveryphase(pushop):
725 def _pushdiscoveryphase(pushop):
726 """discover the phase that needs to be pushed
726 """discover the phase that needs to be pushed
727
727
728 (computed for both success and failure case for changesets push)"""
728 (computed for both success and failure case for changesets push)"""
729 outgoing = pushop.outgoing
729 outgoing = pushop.outgoing
730 unfi = pushop.repo.unfiltered()
730 unfi = pushop.repo.unfiltered()
731 remotephases = listkeys(pushop.remote, b'phases')
731 remotephases = listkeys(pushop.remote, b'phases')
732
732
733 if (
733 if (
734 pushop.ui.configbool(b'ui', b'_usedassubrepo')
734 pushop.ui.configbool(b'ui', b'_usedassubrepo')
735 and remotephases # server supports phases
735 and remotephases # server supports phases
736 and not pushop.outgoing.missing # no changesets to be pushed
736 and not pushop.outgoing.missing # no changesets to be pushed
737 and remotephases.get(b'publishing', False)
737 and remotephases.get(b'publishing', False)
738 ):
738 ):
739 # When:
739 # When:
740 # - this is a subrepo push
740 # - this is a subrepo push
741 # - and remote support phase
741 # - and remote support phase
742 # - and no changeset are to be pushed
742 # - and no changeset are to be pushed
743 # - and remote is publishing
743 # - and remote is publishing
744 # We may be in issue 3781 case!
744 # We may be in issue 3781 case!
745 # We drop the possible phase synchronisation done by
745 # We drop the possible phase synchronisation done by
746 # courtesy to publish changesets possibly locally draft
746 # courtesy to publish changesets possibly locally draft
747 # on the remote.
747 # on the remote.
748 pushop.outdatedphases = []
748 pushop.outdatedphases = []
749 pushop.fallbackoutdatedphases = []
749 pushop.fallbackoutdatedphases = []
750 return
750 return
751
751
752 pushop.remotephases = phases.remotephasessummary(
752 pushop.remotephases = phases.remotephasessummary(
753 pushop.repo, pushop.fallbackheads, remotephases
753 pushop.repo, pushop.fallbackheads, remotephases
754 )
754 )
755 droots = pushop.remotephases.draftroots
755 droots = pushop.remotephases.draftroots
756
756
757 extracond = b''
757 extracond = b''
758 if not pushop.remotephases.publishing:
758 if not pushop.remotephases.publishing:
759 extracond = b' and public()'
759 extracond = b' and public()'
760 revset = b'heads((%%ln::%%ln) %s)' % extracond
760 revset = b'heads((%%ln::%%ln) %s)' % extracond
761 # Get the list of all revs draft on remote by public here.
761 # Get the list of all revs draft on remote by public here.
762 # XXX Beware that revset break if droots is not strictly
762 # XXX Beware that revset break if droots is not strictly
763 # XXX root we may want to ensure it is but it is costly
763 # XXX root we may want to ensure it is but it is costly
764 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
764 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
765 if not pushop.remotephases.publishing and pushop.publish:
765 if not pushop.remotephases.publishing and pushop.publish:
766 future = list(
766 future = list(
767 unfi.set(
767 unfi.set(
768 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
768 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
769 )
769 )
770 )
770 )
771 elif not outgoing.missing:
771 elif not outgoing.missing:
772 future = fallback
772 future = fallback
773 else:
773 else:
774 # adds changeset we are going to push as draft
774 # adds changeset we are going to push as draft
775 #
775 #
776 # should not be necessary for publishing server, but because of an
776 # should not be necessary for publishing server, but because of an
777 # issue fixed in xxxxx we have to do it anyway.
777 # issue fixed in xxxxx we have to do it anyway.
778 fdroots = list(
778 fdroots = list(
779 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
779 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
780 )
780 )
781 fdroots = [f.node() for f in fdroots]
781 fdroots = [f.node() for f in fdroots]
782 future = list(unfi.set(revset, fdroots, pushop.futureheads))
782 future = list(unfi.set(revset, fdroots, pushop.futureheads))
783 pushop.outdatedphases = future
783 pushop.outdatedphases = future
784 pushop.fallbackoutdatedphases = fallback
784 pushop.fallbackoutdatedphases = fallback
785
785
786
786
787 @pushdiscovery(b'obsmarker')
787 @pushdiscovery(b'obsmarker')
788 def _pushdiscoveryobsmarkers(pushop):
788 def _pushdiscoveryobsmarkers(pushop):
789 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
789 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
790 return
790 return
791
791
792 if not pushop.repo.obsstore:
792 if not pushop.repo.obsstore:
793 return
793 return
794
794
795 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
795 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
796 return
796 return
797
797
798 repo = pushop.repo
798 repo = pushop.repo
799 # very naive computation, that can be quite expensive on big repo.
799 # very naive computation, that can be quite expensive on big repo.
800 # However: evolution is currently slow on them anyway.
800 # However: evolution is currently slow on them anyway.
801 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
801 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
802 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
802 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
803
803
804
804
805 @pushdiscovery(b'bookmarks')
805 @pushdiscovery(b'bookmarks')
806 def _pushdiscoverybookmarks(pushop):
806 def _pushdiscoverybookmarks(pushop):
807 ui = pushop.ui
807 ui = pushop.ui
808 repo = pushop.repo.unfiltered()
808 repo = pushop.repo.unfiltered()
809 remote = pushop.remote
809 remote = pushop.remote
810 ui.debug(b"checking for updated bookmarks\n")
810 ui.debug(b"checking for updated bookmarks\n")
811 ancestors = ()
811 ancestors = ()
812 if pushop.revs:
812 if pushop.revs:
813 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
813 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
814 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
814 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
815
815
816 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
816 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
817
817
818 explicit = {
818 explicit = {
819 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
819 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
820 }
820 }
821
821
822 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
822 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
823 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
823 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
824
824
825
825
826 def _processcompared(pushop, pushed, explicit, remotebms, comp):
826 def _processcompared(pushop, pushed, explicit, remotebms, comp):
827 """take decision on bookmarks to push to the remote repo
827 """take decision on bookmarks to push to the remote repo
828
828
829 Exists to help extensions alter this behavior.
829 Exists to help extensions alter this behavior.
830 """
830 """
831 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
831 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
832
832
833 repo = pushop.repo
833 repo = pushop.repo
834
834
835 for b, scid, dcid in advsrc:
835 for b, scid, dcid in advsrc:
836 if b in explicit:
836 if b in explicit:
837 explicit.remove(b)
837 explicit.remove(b)
838 if not pushed or repo[scid].rev() in pushed:
838 if not pushed or repo[scid].rev() in pushed:
839 pushop.outbookmarks.append((b, dcid, scid))
839 pushop.outbookmarks.append((b, dcid, scid))
840 # search added bookmark
840 # search added bookmark
841 for b, scid, dcid in addsrc:
841 for b, scid, dcid in addsrc:
842 if b in explicit:
842 if b in explicit:
843 explicit.remove(b)
843 explicit.remove(b)
844 pushop.outbookmarks.append((b, b'', scid))
844 pushop.outbookmarks.append((b, b'', scid))
845 # search for overwritten bookmark
845 # search for overwritten bookmark
846 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
846 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
847 if b in explicit:
847 if b in explicit:
848 explicit.remove(b)
848 explicit.remove(b)
849 pushop.outbookmarks.append((b, dcid, scid))
849 pushop.outbookmarks.append((b, dcid, scid))
850 # search for bookmark to delete
850 # search for bookmark to delete
851 for b, scid, dcid in adddst:
851 for b, scid, dcid in adddst:
852 if b in explicit:
852 if b in explicit:
853 explicit.remove(b)
853 explicit.remove(b)
854 # treat as "deleted locally"
854 # treat as "deleted locally"
855 pushop.outbookmarks.append((b, dcid, b''))
855 pushop.outbookmarks.append((b, dcid, b''))
856 # identical bookmarks shouldn't get reported
856 # identical bookmarks shouldn't get reported
857 for b, scid, dcid in same:
857 for b, scid, dcid in same:
858 if b in explicit:
858 if b in explicit:
859 explicit.remove(b)
859 explicit.remove(b)
860
860
861 if explicit:
861 if explicit:
862 explicit = sorted(explicit)
862 explicit = sorted(explicit)
863 # we should probably list all of them
863 # we should probably list all of them
864 pushop.ui.warn(
864 pushop.ui.warn(
865 _(
865 _(
866 b'bookmark %s does not exist on the local '
866 b'bookmark %s does not exist on the local '
867 b'or remote repository!\n'
867 b'or remote repository!\n'
868 )
868 )
869 % explicit[0]
869 % explicit[0]
870 )
870 )
871 pushop.bkresult = 2
871 pushop.bkresult = 2
872
872
873 pushop.outbookmarks.sort()
873 pushop.outbookmarks.sort()
874
874
875
875
876 def _pushcheckoutgoing(pushop):
876 def _pushcheckoutgoing(pushop):
877 outgoing = pushop.outgoing
877 outgoing = pushop.outgoing
878 unfi = pushop.repo.unfiltered()
878 unfi = pushop.repo.unfiltered()
879 if not outgoing.missing:
879 if not outgoing.missing:
880 # nothing to push
880 # nothing to push
881 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
881 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
882 return False
882 return False
883 # something to push
883 # something to push
884 if not pushop.force:
884 if not pushop.force:
885 # if repo.obsstore == False --> no obsolete
885 # if repo.obsstore == False --> no obsolete
886 # then, save the iteration
886 # then, save the iteration
887 if unfi.obsstore:
887 if unfi.obsstore:
888 # this message are here for 80 char limit reason
888 # this message are here for 80 char limit reason
889 mso = _(b"push includes obsolete changeset: %s!")
889 mso = _(b"push includes obsolete changeset: %s!")
890 mspd = _(b"push includes phase-divergent changeset: %s!")
890 mspd = _(b"push includes phase-divergent changeset: %s!")
891 mscd = _(b"push includes content-divergent changeset: %s!")
891 mscd = _(b"push includes content-divergent changeset: %s!")
892 mst = {
892 mst = {
893 b"orphan": _(b"push includes orphan changeset: %s!"),
893 b"orphan": _(b"push includes orphan changeset: %s!"),
894 b"phase-divergent": mspd,
894 b"phase-divergent": mspd,
895 b"content-divergent": mscd,
895 b"content-divergent": mscd,
896 }
896 }
897 # If we are to push if there is at least one
897 # If we are to push if there is at least one
898 # obsolete or unstable changeset in missing, at
898 # obsolete or unstable changeset in missing, at
899 # least one of the missinghead will be obsolete or
899 # least one of the missinghead will be obsolete or
900 # unstable. So checking heads only is ok
900 # unstable. So checking heads only is ok
901 for node in outgoing.missingheads:
901 for node in outgoing.missingheads:
902 ctx = unfi[node]
902 ctx = unfi[node]
903 if ctx.obsolete():
903 if ctx.obsolete():
904 raise error.Abort(mso % ctx)
904 raise error.Abort(mso % ctx)
905 elif ctx.isunstable():
905 elif ctx.isunstable():
906 # TODO print more than one instability in the abort
906 # TODO print more than one instability in the abort
907 # message
907 # message
908 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
908 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
909
909
910 discovery.checkheads(pushop)
910 discovery.checkheads(pushop)
911 return True
911 return True
912
912
913
913
914 # List of names of steps to perform for an outgoing bundle2, order matters.
914 # List of names of steps to perform for an outgoing bundle2, order matters.
915 b2partsgenorder = []
915 b2partsgenorder = []
916
916
917 # Mapping between step name and function
917 # Mapping between step name and function
918 #
918 #
919 # This exists to help extensions wrap steps if necessary
919 # This exists to help extensions wrap steps if necessary
920 b2partsgenmapping = {}
920 b2partsgenmapping = {}
921
921
922
922
923 def b2partsgenerator(stepname, idx=None):
923 def b2partsgenerator(stepname, idx=None):
924 """decorator for function generating bundle2 part
924 """decorator for function generating bundle2 part
925
925
926 The function is added to the step -> function mapping and appended to the
926 The function is added to the step -> function mapping and appended to the
927 list of steps. Beware that decorated functions will be added in order
927 list of steps. Beware that decorated functions will be added in order
928 (this may matter).
928 (this may matter).
929
929
930 You can only use this decorator for new steps, if you want to wrap a step
930 You can only use this decorator for new steps, if you want to wrap a step
931 from an extension, attack the b2partsgenmapping dictionary directly."""
931 from an extension, attack the b2partsgenmapping dictionary directly."""
932
932
933 def dec(func):
933 def dec(func):
934 assert stepname not in b2partsgenmapping
934 assert stepname not in b2partsgenmapping
935 b2partsgenmapping[stepname] = func
935 b2partsgenmapping[stepname] = func
936 if idx is None:
936 if idx is None:
937 b2partsgenorder.append(stepname)
937 b2partsgenorder.append(stepname)
938 else:
938 else:
939 b2partsgenorder.insert(idx, stepname)
939 b2partsgenorder.insert(idx, stepname)
940 return func
940 return func
941
941
942 return dec
942 return dec
943
943
944
944
945 def _pushb2ctxcheckheads(pushop, bundler):
945 def _pushb2ctxcheckheads(pushop, bundler):
946 """Generate race condition checking parts
946 """Generate race condition checking parts
947
947
948 Exists as an independent function to aid extensions
948 Exists as an independent function to aid extensions
949 """
949 """
950 # * 'force' do not check for push race,
950 # * 'force' do not check for push race,
951 # * if we don't push anything, there are nothing to check.
951 # * if we don't push anything, there are nothing to check.
952 if not pushop.force and pushop.outgoing.missingheads:
952 if not pushop.force and pushop.outgoing.missingheads:
953 allowunrelated = b'related' in bundler.capabilities.get(
953 allowunrelated = b'related' in bundler.capabilities.get(
954 b'checkheads', ()
954 b'checkheads', ()
955 )
955 )
956 emptyremote = pushop.pushbranchmap is None
956 emptyremote = pushop.pushbranchmap is None
957 if not allowunrelated or emptyremote:
957 if not allowunrelated or emptyremote:
958 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
958 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
959 else:
959 else:
960 affected = set()
960 affected = set()
961 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
961 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
962 remoteheads, newheads, unsyncedheads, discardedheads = heads
962 remoteheads, newheads, unsyncedheads, discardedheads = heads
963 if remoteheads is not None:
963 if remoteheads is not None:
964 remote = set(remoteheads)
964 remote = set(remoteheads)
965 affected |= set(discardedheads) & remote
965 affected |= set(discardedheads) & remote
966 affected |= remote - set(newheads)
966 affected |= remote - set(newheads)
967 if affected:
967 if affected:
968 data = iter(sorted(affected))
968 data = iter(sorted(affected))
969 bundler.newpart(b'check:updated-heads', data=data)
969 bundler.newpart(b'check:updated-heads', data=data)
970
970
971
971
972 def _pushing(pushop):
972 def _pushing(pushop):
973 """return True if we are pushing anything"""
973 """return True if we are pushing anything"""
974 return bool(
974 return bool(
975 pushop.outgoing.missing
975 pushop.outgoing.missing
976 or pushop.outdatedphases
976 or pushop.outdatedphases
977 or pushop.outobsmarkers
977 or pushop.outobsmarkers
978 or pushop.outbookmarks
978 or pushop.outbookmarks
979 )
979 )
980
980
981
981
982 @b2partsgenerator(b'check-bookmarks')
982 @b2partsgenerator(b'check-bookmarks')
983 def _pushb2checkbookmarks(pushop, bundler):
983 def _pushb2checkbookmarks(pushop, bundler):
984 """insert bookmark move checking"""
984 """insert bookmark move checking"""
985 if not _pushing(pushop) or pushop.force:
985 if not _pushing(pushop) or pushop.force:
986 return
986 return
987 b2caps = bundle2.bundle2caps(pushop.remote)
987 b2caps = bundle2.bundle2caps(pushop.remote)
988 hasbookmarkcheck = b'bookmarks' in b2caps
988 hasbookmarkcheck = b'bookmarks' in b2caps
989 if not (pushop.outbookmarks and hasbookmarkcheck):
989 if not (pushop.outbookmarks and hasbookmarkcheck):
990 return
990 return
991 data = []
991 data = []
992 for book, old, new in pushop.outbookmarks:
992 for book, old, new in pushop.outbookmarks:
993 data.append((book, old))
993 data.append((book, old))
994 checkdata = bookmod.binaryencode(data)
994 checkdata = bookmod.binaryencode(data)
995 bundler.newpart(b'check:bookmarks', data=checkdata)
995 bundler.newpart(b'check:bookmarks', data=checkdata)
996
996
997
997
998 @b2partsgenerator(b'check-phases')
998 @b2partsgenerator(b'check-phases')
999 def _pushb2checkphases(pushop, bundler):
999 def _pushb2checkphases(pushop, bundler):
1000 """insert phase move checking"""
1000 """insert phase move checking"""
1001 if not _pushing(pushop) or pushop.force:
1001 if not _pushing(pushop) or pushop.force:
1002 return
1002 return
1003 b2caps = bundle2.bundle2caps(pushop.remote)
1003 b2caps = bundle2.bundle2caps(pushop.remote)
1004 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1005 if pushop.remotephases is not None and hasphaseheads:
1005 if pushop.remotephases is not None and hasphaseheads:
1006 # check that the remote phase has not changed
1006 # check that the remote phase has not changed
1007 checks = [[] for p in phases.allphases]
1007 checks = [[] for p in phases.allphases]
1008 checks[phases.public].extend(pushop.remotephases.publicheads)
1008 checks[phases.public].extend(pushop.remotephases.publicheads)
1009 checks[phases.draft].extend(pushop.remotephases.draftroots)
1009 checks[phases.draft].extend(pushop.remotephases.draftroots)
1010 if any(checks):
1010 if any(checks):
1011 for nodes in checks:
1011 for nodes in checks:
1012 nodes.sort()
1012 nodes.sort()
1013 checkdata = phases.binaryencode(checks)
1013 checkdata = phases.binaryencode(checks)
1014 bundler.newpart(b'check:phases', data=checkdata)
1014 bundler.newpart(b'check:phases', data=checkdata)
1015
1015
1016
1016
1017 @b2partsgenerator(b'changeset')
1017 @b2partsgenerator(b'changeset')
1018 def _pushb2ctx(pushop, bundler):
1018 def _pushb2ctx(pushop, bundler):
1019 """handle changegroup push through bundle2
1019 """handle changegroup push through bundle2
1020
1020
1021 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1021 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1022 """
1022 """
1023 if b'changesets' in pushop.stepsdone:
1023 if b'changesets' in pushop.stepsdone:
1024 return
1024 return
1025 pushop.stepsdone.add(b'changesets')
1025 pushop.stepsdone.add(b'changesets')
1026 # Send known heads to the server for race detection.
1026 # Send known heads to the server for race detection.
1027 if not _pushcheckoutgoing(pushop):
1027 if not _pushcheckoutgoing(pushop):
1028 return
1028 return
1029 pushop.repo.prepushoutgoinghooks(pushop)
1029 pushop.repo.prepushoutgoinghooks(pushop)
1030
1030
1031 _pushb2ctxcheckheads(pushop, bundler)
1031 _pushb2ctxcheckheads(pushop, bundler)
1032
1032
1033 b2caps = bundle2.bundle2caps(pushop.remote)
1033 b2caps = bundle2.bundle2caps(pushop.remote)
1034 version = b'01'
1034 version = b'01'
1035 cgversions = b2caps.get(b'changegroup')
1035 cgversions = b2caps.get(b'changegroup')
1036 if cgversions: # 3.1 and 3.2 ship with an empty value
1036 if cgversions: # 3.1 and 3.2 ship with an empty value
1037 cgversions = [
1037 cgversions = [
1038 v
1038 v
1039 for v in cgversions
1039 for v in cgversions
1040 if v in changegroup.supportedoutgoingversions(pushop.repo)
1040 if v in changegroup.supportedoutgoingversions(pushop.repo)
1041 ]
1041 ]
1042 if not cgversions:
1042 if not cgversions:
1043 raise error.Abort(_(b'no common changegroup version'))
1043 raise error.Abort(_(b'no common changegroup version'))
1044 version = max(cgversions)
1044 version = max(cgversions)
1045 cgstream = changegroup.makestream(
1045 cgstream = changegroup.makestream(
1046 pushop.repo, pushop.outgoing, version, b'push'
1046 pushop.repo, pushop.outgoing, version, b'push'
1047 )
1047 )
1048 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1048 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1049 if cgversions:
1049 if cgversions:
1050 cgpart.addparam(b'version', version)
1050 cgpart.addparam(b'version', version)
1051 if b'treemanifest' in pushop.repo.requirements:
1051 if b'treemanifest' in pushop.repo.requirements:
1052 cgpart.addparam(b'treemanifest', b'1')
1052 cgpart.addparam(b'treemanifest', b'1')
1053 if b'exp-sidedata-flag' in pushop.repo.requirements:
1053 if b'exp-sidedata-flag' in pushop.repo.requirements:
1054 cgpart.addparam(b'exp-sidedata', b'1')
1054 cgpart.addparam(b'exp-sidedata', b'1')
1055
1055
1056 def handlereply(op):
1056 def handlereply(op):
1057 """extract addchangegroup returns from server reply"""
1057 """extract addchangegroup returns from server reply"""
1058 cgreplies = op.records.getreplies(cgpart.id)
1058 cgreplies = op.records.getreplies(cgpart.id)
1059 assert len(cgreplies[b'changegroup']) == 1
1059 assert len(cgreplies[b'changegroup']) == 1
1060 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1060 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1061
1061
1062 return handlereply
1062 return handlereply
1063
1063
1064
1064
1065 @b2partsgenerator(b'phase')
1065 @b2partsgenerator(b'phase')
1066 def _pushb2phases(pushop, bundler):
1066 def _pushb2phases(pushop, bundler):
1067 """handle phase push through bundle2"""
1067 """handle phase push through bundle2"""
1068 if b'phases' in pushop.stepsdone:
1068 if b'phases' in pushop.stepsdone:
1069 return
1069 return
1070 b2caps = bundle2.bundle2caps(pushop.remote)
1070 b2caps = bundle2.bundle2caps(pushop.remote)
1071 ui = pushop.repo.ui
1071 ui = pushop.repo.ui
1072
1072
1073 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1073 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1074 haspushkey = b'pushkey' in b2caps
1074 haspushkey = b'pushkey' in b2caps
1075 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1075 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1076
1076
1077 if hasphaseheads and not legacyphase:
1077 if hasphaseheads and not legacyphase:
1078 return _pushb2phaseheads(pushop, bundler)
1078 return _pushb2phaseheads(pushop, bundler)
1079 elif haspushkey:
1079 elif haspushkey:
1080 return _pushb2phasespushkey(pushop, bundler)
1080 return _pushb2phasespushkey(pushop, bundler)
1081
1081
1082
1082
1083 def _pushb2phaseheads(pushop, bundler):
1083 def _pushb2phaseheads(pushop, bundler):
1084 """push phase information through a bundle2 - binary part"""
1084 """push phase information through a bundle2 - binary part"""
1085 pushop.stepsdone.add(b'phases')
1085 pushop.stepsdone.add(b'phases')
1086 if pushop.outdatedphases:
1086 if pushop.outdatedphases:
1087 updates = [[] for p in phases.allphases]
1087 updates = [[] for p in phases.allphases]
1088 updates[0].extend(h.node() for h in pushop.outdatedphases)
1088 updates[0].extend(h.node() for h in pushop.outdatedphases)
1089 phasedata = phases.binaryencode(updates)
1089 phasedata = phases.binaryencode(updates)
1090 bundler.newpart(b'phase-heads', data=phasedata)
1090 bundler.newpart(b'phase-heads', data=phasedata)
1091
1091
1092
1092
1093 def _pushb2phasespushkey(pushop, bundler):
1093 def _pushb2phasespushkey(pushop, bundler):
1094 """push phase information through a bundle2 - pushkey part"""
1094 """push phase information through a bundle2 - pushkey part"""
1095 pushop.stepsdone.add(b'phases')
1095 pushop.stepsdone.add(b'phases')
1096 part2node = []
1096 part2node = []
1097
1097
1098 def handlefailure(pushop, exc):
1098 def handlefailure(pushop, exc):
1099 targetid = int(exc.partid)
1099 targetid = int(exc.partid)
1100 for partid, node in part2node:
1100 for partid, node in part2node:
1101 if partid == targetid:
1101 if partid == targetid:
1102 raise error.Abort(_(b'updating %s to public failed') % node)
1102 raise error.Abort(_(b'updating %s to public failed') % node)
1103
1103
1104 enc = pushkey.encode
1104 enc = pushkey.encode
1105 for newremotehead in pushop.outdatedphases:
1105 for newremotehead in pushop.outdatedphases:
1106 part = bundler.newpart(b'pushkey')
1106 part = bundler.newpart(b'pushkey')
1107 part.addparam(b'namespace', enc(b'phases'))
1107 part.addparam(b'namespace', enc(b'phases'))
1108 part.addparam(b'key', enc(newremotehead.hex()))
1108 part.addparam(b'key', enc(newremotehead.hex()))
1109 part.addparam(b'old', enc(b'%d' % phases.draft))
1109 part.addparam(b'old', enc(b'%d' % phases.draft))
1110 part.addparam(b'new', enc(b'%d' % phases.public))
1110 part.addparam(b'new', enc(b'%d' % phases.public))
1111 part2node.append((part.id, newremotehead))
1111 part2node.append((part.id, newremotehead))
1112 pushop.pkfailcb[part.id] = handlefailure
1112 pushop.pkfailcb[part.id] = handlefailure
1113
1113
1114 def handlereply(op):
1114 def handlereply(op):
1115 for partid, node in part2node:
1115 for partid, node in part2node:
1116 partrep = op.records.getreplies(partid)
1116 partrep = op.records.getreplies(partid)
1117 results = partrep[b'pushkey']
1117 results = partrep[b'pushkey']
1118 assert len(results) <= 1
1118 assert len(results) <= 1
1119 msg = None
1119 msg = None
1120 if not results:
1120 if not results:
1121 msg = _(b'server ignored update of %s to public!\n') % node
1121 msg = _(b'server ignored update of %s to public!\n') % node
1122 elif not int(results[0][b'return']):
1122 elif not int(results[0][b'return']):
1123 msg = _(b'updating %s to public failed!\n') % node
1123 msg = _(b'updating %s to public failed!\n') % node
1124 if msg is not None:
1124 if msg is not None:
1125 pushop.ui.warn(msg)
1125 pushop.ui.warn(msg)
1126
1126
1127 return handlereply
1127 return handlereply
1128
1128
1129
1129
1130 @b2partsgenerator(b'obsmarkers')
1130 @b2partsgenerator(b'obsmarkers')
1131 def _pushb2obsmarkers(pushop, bundler):
1131 def _pushb2obsmarkers(pushop, bundler):
1132 if b'obsmarkers' in pushop.stepsdone:
1132 if b'obsmarkers' in pushop.stepsdone:
1133 return
1133 return
1134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1134 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1135 if obsolete.commonversion(remoteversions) is None:
1135 if obsolete.commonversion(remoteversions) is None:
1136 return
1136 return
1137 pushop.stepsdone.add(b'obsmarkers')
1137 pushop.stepsdone.add(b'obsmarkers')
1138 if pushop.outobsmarkers:
1138 if pushop.outobsmarkers:
1139 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1139 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1140 bundle2.buildobsmarkerspart(bundler, markers)
1140 bundle2.buildobsmarkerspart(bundler, markers)
1141
1141
1142
1142
1143 @b2partsgenerator(b'bookmarks')
1143 @b2partsgenerator(b'bookmarks')
1144 def _pushb2bookmarks(pushop, bundler):
1144 def _pushb2bookmarks(pushop, bundler):
1145 """handle bookmark push through bundle2"""
1145 """handle bookmark push through bundle2"""
1146 if b'bookmarks' in pushop.stepsdone:
1146 if b'bookmarks' in pushop.stepsdone:
1147 return
1147 return
1148 b2caps = bundle2.bundle2caps(pushop.remote)
1148 b2caps = bundle2.bundle2caps(pushop.remote)
1149
1149
1150 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1150 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1151 legacybooks = b'bookmarks' in legacy
1151 legacybooks = b'bookmarks' in legacy
1152
1152
1153 if not legacybooks and b'bookmarks' in b2caps:
1153 if not legacybooks and b'bookmarks' in b2caps:
1154 return _pushb2bookmarkspart(pushop, bundler)
1154 return _pushb2bookmarkspart(pushop, bundler)
1155 elif b'pushkey' in b2caps:
1155 elif b'pushkey' in b2caps:
1156 return _pushb2bookmarkspushkey(pushop, bundler)
1156 return _pushb2bookmarkspushkey(pushop, bundler)
1157
1157
1158
1158
1159 def _bmaction(old, new):
1159 def _bmaction(old, new):
1160 """small utility for bookmark pushing"""
1160 """small utility for bookmark pushing"""
1161 if not old:
1161 if not old:
1162 return b'export'
1162 return b'export'
1163 elif not new:
1163 elif not new:
1164 return b'delete'
1164 return b'delete'
1165 return b'update'
1165 return b'update'
1166
1166
1167
1167
1168 def _abortonsecretctx(pushop, node, b):
1168 def _abortonsecretctx(pushop, node, b):
1169 """abort if a given bookmark points to a secret changeset"""
1169 """abort if a given bookmark points to a secret changeset"""
1170 if node and pushop.repo[node].phase() == phases.secret:
1170 if node and pushop.repo[node].phase() == phases.secret:
1171 raise error.Abort(
1171 raise error.Abort(
1172 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1172 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1173 )
1173 )
1174
1174
1175
1175
1176 def _pushb2bookmarkspart(pushop, bundler):
1176 def _pushb2bookmarkspart(pushop, bundler):
1177 pushop.stepsdone.add(b'bookmarks')
1177 pushop.stepsdone.add(b'bookmarks')
1178 if not pushop.outbookmarks:
1178 if not pushop.outbookmarks:
1179 return
1179 return
1180
1180
1181 allactions = []
1181 allactions = []
1182 data = []
1182 data = []
1183 for book, old, new in pushop.outbookmarks:
1183 for book, old, new in pushop.outbookmarks:
1184 _abortonsecretctx(pushop, new, book)
1184 _abortonsecretctx(pushop, new, book)
1185 data.append((book, new))
1185 data.append((book, new))
1186 allactions.append((book, _bmaction(old, new)))
1186 allactions.append((book, _bmaction(old, new)))
1187 checkdata = bookmod.binaryencode(data)
1187 checkdata = bookmod.binaryencode(data)
1188 bundler.newpart(b'bookmarks', data=checkdata)
1188 bundler.newpart(b'bookmarks', data=checkdata)
1189
1189
1190 def handlereply(op):
1190 def handlereply(op):
1191 ui = pushop.ui
1191 ui = pushop.ui
1192 # if success
1192 # if success
1193 for book, action in allactions:
1193 for book, action in allactions:
1194 ui.status(bookmsgmap[action][0] % book)
1194 ui.status(bookmsgmap[action][0] % book)
1195
1195
1196 return handlereply
1196 return handlereply
1197
1197
1198
1198
1199 def _pushb2bookmarkspushkey(pushop, bundler):
1199 def _pushb2bookmarkspushkey(pushop, bundler):
1200 pushop.stepsdone.add(b'bookmarks')
1200 pushop.stepsdone.add(b'bookmarks')
1201 part2book = []
1201 part2book = []
1202 enc = pushkey.encode
1202 enc = pushkey.encode
1203
1203
1204 def handlefailure(pushop, exc):
1204 def handlefailure(pushop, exc):
1205 targetid = int(exc.partid)
1205 targetid = int(exc.partid)
1206 for partid, book, action in part2book:
1206 for partid, book, action in part2book:
1207 if partid == targetid:
1207 if partid == targetid:
1208 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1208 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1209 # we should not be called for part we did not generated
1209 # we should not be called for part we did not generated
1210 assert False
1210 assert False
1211
1211
1212 for book, old, new in pushop.outbookmarks:
1212 for book, old, new in pushop.outbookmarks:
1213 _abortonsecretctx(pushop, new, book)
1213 _abortonsecretctx(pushop, new, book)
1214 part = bundler.newpart(b'pushkey')
1214 part = bundler.newpart(b'pushkey')
1215 part.addparam(b'namespace', enc(b'bookmarks'))
1215 part.addparam(b'namespace', enc(b'bookmarks'))
1216 part.addparam(b'key', enc(book))
1216 part.addparam(b'key', enc(book))
1217 part.addparam(b'old', enc(hex(old)))
1217 part.addparam(b'old', enc(hex(old)))
1218 part.addparam(b'new', enc(hex(new)))
1218 part.addparam(b'new', enc(hex(new)))
1219 action = b'update'
1219 action = b'update'
1220 if not old:
1220 if not old:
1221 action = b'export'
1221 action = b'export'
1222 elif not new:
1222 elif not new:
1223 action = b'delete'
1223 action = b'delete'
1224 part2book.append((part.id, book, action))
1224 part2book.append((part.id, book, action))
1225 pushop.pkfailcb[part.id] = handlefailure
1225 pushop.pkfailcb[part.id] = handlefailure
1226
1226
1227 def handlereply(op):
1227 def handlereply(op):
1228 ui = pushop.ui
1228 ui = pushop.ui
1229 for partid, book, action in part2book:
1229 for partid, book, action in part2book:
1230 partrep = op.records.getreplies(partid)
1230 partrep = op.records.getreplies(partid)
1231 results = partrep[b'pushkey']
1231 results = partrep[b'pushkey']
1232 assert len(results) <= 1
1232 assert len(results) <= 1
1233 if not results:
1233 if not results:
1234 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1234 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1235 else:
1235 else:
1236 ret = int(results[0][b'return'])
1236 ret = int(results[0][b'return'])
1237 if ret:
1237 if ret:
1238 ui.status(bookmsgmap[action][0] % book)
1238 ui.status(bookmsgmap[action][0] % book)
1239 else:
1239 else:
1240 ui.warn(bookmsgmap[action][1] % book)
1240 ui.warn(bookmsgmap[action][1] % book)
1241 if pushop.bkresult is not None:
1241 if pushop.bkresult is not None:
1242 pushop.bkresult = 1
1242 pushop.bkresult = 1
1243
1243
1244 return handlereply
1244 return handlereply
1245
1245
1246
1246
1247 @b2partsgenerator(b'pushvars', idx=0)
1247 @b2partsgenerator(b'pushvars', idx=0)
1248 def _getbundlesendvars(pushop, bundler):
1248 def _getbundlesendvars(pushop, bundler):
1249 '''send shellvars via bundle2'''
1249 '''send shellvars via bundle2'''
1250 pushvars = pushop.pushvars
1250 pushvars = pushop.pushvars
1251 if pushvars:
1251 if pushvars:
1252 shellvars = {}
1252 shellvars = {}
1253 for raw in pushvars:
1253 for raw in pushvars:
1254 if b'=' not in raw:
1254 if b'=' not in raw:
1255 msg = (
1255 msg = (
1256 b"unable to parse variable '%s', should follow "
1256 b"unable to parse variable '%s', should follow "
1257 b"'KEY=VALUE' or 'KEY=' format"
1257 b"'KEY=VALUE' or 'KEY=' format"
1258 )
1258 )
1259 raise error.Abort(msg % raw)
1259 raise error.Abort(msg % raw)
1260 k, v = raw.split(b'=', 1)
1260 k, v = raw.split(b'=', 1)
1261 shellvars[k] = v
1261 shellvars[k] = v
1262
1262
1263 part = bundler.newpart(b'pushvars')
1263 part = bundler.newpart(b'pushvars')
1264
1264
1265 for key, value in pycompat.iteritems(shellvars):
1265 for key, value in pycompat.iteritems(shellvars):
1266 part.addparam(key, value, mandatory=False)
1266 part.addparam(key, value, mandatory=False)
1267
1267
1268
1268
1269 def _pushbundle2(pushop):
1269 def _pushbundle2(pushop):
1270 """push data to the remote using bundle2
1270 """push data to the remote using bundle2
1271
1271
1272 The only currently supported type of data is changegroup but this will
1272 The only currently supported type of data is changegroup but this will
1273 evolve in the future."""
1273 evolve in the future."""
1274 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1274 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1275 pushback = pushop.trmanager and pushop.ui.configbool(
1275 pushback = pushop.trmanager and pushop.ui.configbool(
1276 b'experimental', b'bundle2.pushback'
1276 b'experimental', b'bundle2.pushback'
1277 )
1277 )
1278
1278
1279 # create reply capability
1279 # create reply capability
1280 capsblob = bundle2.encodecaps(
1280 capsblob = bundle2.encodecaps(
1281 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1281 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1282 )
1282 )
1283 bundler.newpart(b'replycaps', data=capsblob)
1283 bundler.newpart(b'replycaps', data=capsblob)
1284 replyhandlers = []
1284 replyhandlers = []
1285 for partgenname in b2partsgenorder:
1285 for partgenname in b2partsgenorder:
1286 partgen = b2partsgenmapping[partgenname]
1286 partgen = b2partsgenmapping[partgenname]
1287 ret = partgen(pushop, bundler)
1287 ret = partgen(pushop, bundler)
1288 if callable(ret):
1288 if callable(ret):
1289 replyhandlers.append(ret)
1289 replyhandlers.append(ret)
1290 # do not push if nothing to push
1290 # do not push if nothing to push
1291 if bundler.nbparts <= 1:
1291 if bundler.nbparts <= 1:
1292 return
1292 return
1293 stream = util.chunkbuffer(bundler.getchunks())
1293 stream = util.chunkbuffer(bundler.getchunks())
1294 try:
1294 try:
1295 try:
1295 try:
1296 with pushop.remote.commandexecutor() as e:
1296 with pushop.remote.commandexecutor() as e:
1297 reply = e.callcommand(
1297 reply = e.callcommand(
1298 b'unbundle',
1298 b'unbundle',
1299 {
1299 {
1300 b'bundle': stream,
1300 b'bundle': stream,
1301 b'heads': [b'force'],
1301 b'heads': [b'force'],
1302 b'url': pushop.remote.url(),
1302 b'url': pushop.remote.url(),
1303 },
1303 },
1304 ).result()
1304 ).result()
1305 except error.BundleValueError as exc:
1305 except error.BundleValueError as exc:
1306 raise error.Abort(_(b'missing support for %s') % exc)
1306 raise error.Abort(_(b'missing support for %s') % exc)
1307 try:
1307 try:
1308 trgetter = None
1308 trgetter = None
1309 if pushback:
1309 if pushback:
1310 trgetter = pushop.trmanager.transaction
1310 trgetter = pushop.trmanager.transaction
1311 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1311 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1312 except error.BundleValueError as exc:
1312 except error.BundleValueError as exc:
1313 raise error.Abort(_(b'missing support for %s') % exc)
1313 raise error.Abort(_(b'missing support for %s') % exc)
1314 except bundle2.AbortFromPart as exc:
1314 except bundle2.AbortFromPart as exc:
1315 pushop.ui.status(_(b'remote: %s\n') % exc)
1315 pushop.ui.status(_(b'remote: %s\n') % exc)
1316 if exc.hint is not None:
1316 if exc.hint is not None:
1317 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1317 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1318 raise error.Abort(_(b'push failed on remote'))
1318 raise error.Abort(_(b'push failed on remote'))
1319 except error.PushkeyFailed as exc:
1319 except error.PushkeyFailed as exc:
1320 partid = int(exc.partid)
1320 partid = int(exc.partid)
1321 if partid not in pushop.pkfailcb:
1321 if partid not in pushop.pkfailcb:
1322 raise
1322 raise
1323 pushop.pkfailcb[partid](pushop, exc)
1323 pushop.pkfailcb[partid](pushop, exc)
1324 for rephand in replyhandlers:
1324 for rephand in replyhandlers:
1325 rephand(op)
1325 rephand(op)
1326
1326
1327
1327
1328 def _pushchangeset(pushop):
1328 def _pushchangeset(pushop):
1329 """Make the actual push of changeset bundle to remote repo"""
1329 """Make the actual push of changeset bundle to remote repo"""
1330 if b'changesets' in pushop.stepsdone:
1330 if b'changesets' in pushop.stepsdone:
1331 return
1331 return
1332 pushop.stepsdone.add(b'changesets')
1332 pushop.stepsdone.add(b'changesets')
1333 if not _pushcheckoutgoing(pushop):
1333 if not _pushcheckoutgoing(pushop):
1334 return
1334 return
1335
1335
1336 # Should have verified this in push().
1336 # Should have verified this in push().
1337 assert pushop.remote.capable(b'unbundle')
1337 assert pushop.remote.capable(b'unbundle')
1338
1338
1339 pushop.repo.prepushoutgoinghooks(pushop)
1339 pushop.repo.prepushoutgoinghooks(pushop)
1340 outgoing = pushop.outgoing
1340 outgoing = pushop.outgoing
1341 # TODO: get bundlecaps from remote
1341 # TODO: get bundlecaps from remote
1342 bundlecaps = None
1342 bundlecaps = None
1343 # create a changegroup from local
1343 # create a changegroup from local
1344 if pushop.revs is None and not (
1344 if pushop.revs is None and not (
1345 outgoing.excluded or pushop.repo.changelog.filteredrevs
1345 outgoing.excluded or pushop.repo.changelog.filteredrevs
1346 ):
1346 ):
1347 # push everything,
1347 # push everything,
1348 # use the fast path, no race possible on push
1348 # use the fast path, no race possible on push
1349 cg = changegroup.makechangegroup(
1349 cg = changegroup.makechangegroup(
1350 pushop.repo,
1350 pushop.repo,
1351 outgoing,
1351 outgoing,
1352 b'01',
1352 b'01',
1353 b'push',
1353 b'push',
1354 fastpath=True,
1354 fastpath=True,
1355 bundlecaps=bundlecaps,
1355 bundlecaps=bundlecaps,
1356 )
1356 )
1357 else:
1357 else:
1358 cg = changegroup.makechangegroup(
1358 cg = changegroup.makechangegroup(
1359 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1359 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1360 )
1360 )
1361
1361
1362 # apply changegroup to remote
1362 # apply changegroup to remote
1363 # local repo finds heads on server, finds out what
1363 # local repo finds heads on server, finds out what
1364 # revs it must push. once revs transferred, if server
1364 # revs it must push. once revs transferred, if server
1365 # finds it has different heads (someone else won
1365 # finds it has different heads (someone else won
1366 # commit/push race), server aborts.
1366 # commit/push race), server aborts.
1367 if pushop.force:
1367 if pushop.force:
1368 remoteheads = [b'force']
1368 remoteheads = [b'force']
1369 else:
1369 else:
1370 remoteheads = pushop.remoteheads
1370 remoteheads = pushop.remoteheads
1371 # ssh: return remote's addchangegroup()
1371 # ssh: return remote's addchangegroup()
1372 # http: return remote's addchangegroup() or 0 for error
1372 # http: return remote's addchangegroup() or 0 for error
1373 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1373 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1374
1374
1375
1375
1376 def _pushsyncphase(pushop):
1376 def _pushsyncphase(pushop):
1377 """synchronise phase information locally and remotely"""
1377 """synchronise phase information locally and remotely"""
1378 cheads = pushop.commonheads
1378 cheads = pushop.commonheads
1379 # even when we don't push, exchanging phase data is useful
1379 # even when we don't push, exchanging phase data is useful
1380 remotephases = listkeys(pushop.remote, b'phases')
1380 remotephases = listkeys(pushop.remote, b'phases')
1381 if (
1381 if (
1382 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1382 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1383 and remotephases # server supports phases
1383 and remotephases # server supports phases
1384 and pushop.cgresult is None # nothing was pushed
1384 and pushop.cgresult is None # nothing was pushed
1385 and remotephases.get(b'publishing', False)
1385 and remotephases.get(b'publishing', False)
1386 ):
1386 ):
1387 # When:
1387 # When:
1388 # - this is a subrepo push
1388 # - this is a subrepo push
1389 # - and remote support phase
1389 # - and remote support phase
1390 # - and no changeset was pushed
1390 # - and no changeset was pushed
1391 # - and remote is publishing
1391 # - and remote is publishing
1392 # We may be in issue 3871 case!
1392 # We may be in issue 3871 case!
1393 # We drop the possible phase synchronisation done by
1393 # We drop the possible phase synchronisation done by
1394 # courtesy to publish changesets possibly locally draft
1394 # courtesy to publish changesets possibly locally draft
1395 # on the remote.
1395 # on the remote.
1396 remotephases = {b'publishing': b'True'}
1396 remotephases = {b'publishing': b'True'}
1397 if not remotephases: # old server or public only reply from non-publishing
1397 if not remotephases: # old server or public only reply from non-publishing
1398 _localphasemove(pushop, cheads)
1398 _localphasemove(pushop, cheads)
1399 # don't push any phase data as there is nothing to push
1399 # don't push any phase data as there is nothing to push
1400 else:
1400 else:
1401 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1401 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1402 pheads, droots = ana
1402 pheads, droots = ana
1403 ### Apply remote phase on local
1403 ### Apply remote phase on local
1404 if remotephases.get(b'publishing', False):
1404 if remotephases.get(b'publishing', False):
1405 _localphasemove(pushop, cheads)
1405 _localphasemove(pushop, cheads)
1406 else: # publish = False
1406 else: # publish = False
1407 _localphasemove(pushop, pheads)
1407 _localphasemove(pushop, pheads)
1408 _localphasemove(pushop, cheads, phases.draft)
1408 _localphasemove(pushop, cheads, phases.draft)
1409 ### Apply local phase on remote
1409 ### Apply local phase on remote
1410
1410
1411 if pushop.cgresult:
1411 if pushop.cgresult:
1412 if b'phases' in pushop.stepsdone:
1412 if b'phases' in pushop.stepsdone:
1413 # phases already pushed though bundle2
1413 # phases already pushed though bundle2
1414 return
1414 return
1415 outdated = pushop.outdatedphases
1415 outdated = pushop.outdatedphases
1416 else:
1416 else:
1417 outdated = pushop.fallbackoutdatedphases
1417 outdated = pushop.fallbackoutdatedphases
1418
1418
1419 pushop.stepsdone.add(b'phases')
1419 pushop.stepsdone.add(b'phases')
1420
1420
1421 # filter heads already turned public by the push
1421 # filter heads already turned public by the push
1422 outdated = [c for c in outdated if c.node() not in pheads]
1422 outdated = [c for c in outdated if c.node() not in pheads]
1423 # fallback to independent pushkey command
1423 # fallback to independent pushkey command
1424 for newremotehead in outdated:
1424 for newremotehead in outdated:
1425 with pushop.remote.commandexecutor() as e:
1425 with pushop.remote.commandexecutor() as e:
1426 r = e.callcommand(
1426 r = e.callcommand(
1427 b'pushkey',
1427 b'pushkey',
1428 {
1428 {
1429 b'namespace': b'phases',
1429 b'namespace': b'phases',
1430 b'key': newremotehead.hex(),
1430 b'key': newremotehead.hex(),
1431 b'old': b'%d' % phases.draft,
1431 b'old': b'%d' % phases.draft,
1432 b'new': b'%d' % phases.public,
1432 b'new': b'%d' % phases.public,
1433 },
1433 },
1434 ).result()
1434 ).result()
1435
1435
1436 if not r:
1436 if not r:
1437 pushop.ui.warn(
1437 pushop.ui.warn(
1438 _(b'updating %s to public failed!\n') % newremotehead
1438 _(b'updating %s to public failed!\n') % newremotehead
1439 )
1439 )
1440
1440
1441
1441
1442 def _localphasemove(pushop, nodes, phase=phases.public):
1442 def _localphasemove(pushop, nodes, phase=phases.public):
1443 """move <nodes> to <phase> in the local source repo"""
1443 """move <nodes> to <phase> in the local source repo"""
1444 if pushop.trmanager:
1444 if pushop.trmanager:
1445 phases.advanceboundary(
1445 phases.advanceboundary(
1446 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1446 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1447 )
1447 )
1448 else:
1448 else:
1449 # repo is not locked, do not change any phases!
1449 # repo is not locked, do not change any phases!
1450 # Informs the user that phases should have been moved when
1450 # Informs the user that phases should have been moved when
1451 # applicable.
1451 # applicable.
1452 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1452 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1453 phasestr = phases.phasenames[phase]
1453 phasestr = phases.phasenames[phase]
1454 if actualmoves:
1454 if actualmoves:
1455 pushop.ui.status(
1455 pushop.ui.status(
1456 _(
1456 _(
1457 b'cannot lock source repo, skipping '
1457 b'cannot lock source repo, skipping '
1458 b'local %s phase update\n'
1458 b'local %s phase update\n'
1459 )
1459 )
1460 % phasestr
1460 % phasestr
1461 )
1461 )
1462
1462
1463
1463
1464 def _pushobsolete(pushop):
1464 def _pushobsolete(pushop):
1465 """utility function to push obsolete markers to a remote"""
1465 """utility function to push obsolete markers to a remote"""
1466 if b'obsmarkers' in pushop.stepsdone:
1466 if b'obsmarkers' in pushop.stepsdone:
1467 return
1467 return
1468 repo = pushop.repo
1468 repo = pushop.repo
1469 remote = pushop.remote
1469 remote = pushop.remote
1470 pushop.stepsdone.add(b'obsmarkers')
1470 pushop.stepsdone.add(b'obsmarkers')
1471 if pushop.outobsmarkers:
1471 if pushop.outobsmarkers:
1472 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1472 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1473 rslts = []
1473 rslts = []
1474 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1474 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1475 remotedata = obsolete._pushkeyescape(markers)
1475 remotedata = obsolete._pushkeyescape(markers)
1476 for key in sorted(remotedata, reverse=True):
1476 for key in sorted(remotedata, reverse=True):
1477 # reverse sort to ensure we end with dump0
1477 # reverse sort to ensure we end with dump0
1478 data = remotedata[key]
1478 data = remotedata[key]
1479 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1479 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1480 if [r for r in rslts if not r]:
1480 if [r for r in rslts if not r]:
1481 msg = _(b'failed to push some obsolete markers!\n')
1481 msg = _(b'failed to push some obsolete markers!\n')
1482 repo.ui.warn(msg)
1482 repo.ui.warn(msg)
1483
1483
1484
1484
1485 def _pushbookmark(pushop):
1485 def _pushbookmark(pushop):
1486 """Update bookmark position on remote"""
1486 """Update bookmark position on remote"""
1487 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1487 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1488 return
1488 return
1489 pushop.stepsdone.add(b'bookmarks')
1489 pushop.stepsdone.add(b'bookmarks')
1490 ui = pushop.ui
1490 ui = pushop.ui
1491 remote = pushop.remote
1491 remote = pushop.remote
1492
1492
1493 for b, old, new in pushop.outbookmarks:
1493 for b, old, new in pushop.outbookmarks:
1494 action = b'update'
1494 action = b'update'
1495 if not old:
1495 if not old:
1496 action = b'export'
1496 action = b'export'
1497 elif not new:
1497 elif not new:
1498 action = b'delete'
1498 action = b'delete'
1499
1499
1500 with remote.commandexecutor() as e:
1500 with remote.commandexecutor() as e:
1501 r = e.callcommand(
1501 r = e.callcommand(
1502 b'pushkey',
1502 b'pushkey',
1503 {
1503 {
1504 b'namespace': b'bookmarks',
1504 b'namespace': b'bookmarks',
1505 b'key': b,
1505 b'key': b,
1506 b'old': hex(old),
1506 b'old': hex(old),
1507 b'new': hex(new),
1507 b'new': hex(new),
1508 },
1508 },
1509 ).result()
1509 ).result()
1510
1510
1511 if r:
1511 if r:
1512 ui.status(bookmsgmap[action][0] % b)
1512 ui.status(bookmsgmap[action][0] % b)
1513 else:
1513 else:
1514 ui.warn(bookmsgmap[action][1] % b)
1514 ui.warn(bookmsgmap[action][1] % b)
1515 # discovery can have set the value form invalid entry
1515 # discovery can have set the value form invalid entry
1516 if pushop.bkresult is not None:
1516 if pushop.bkresult is not None:
1517 pushop.bkresult = 1
1517 pushop.bkresult = 1
1518
1518
1519
1519
1520 class pulloperation(object):
1520 class pulloperation(object):
1521 """A object that represent a single pull operation
1521 """A object that represent a single pull operation
1522
1522
1523 It purpose is to carry pull related state and very common operation.
1523 It purpose is to carry pull related state and very common operation.
1524
1524
1525 A new should be created at the beginning of each pull and discarded
1525 A new should be created at the beginning of each pull and discarded
1526 afterward.
1526 afterward.
1527 """
1527 """
1528
1528
1529 def __init__(
1529 def __init__(
1530 self,
1530 self,
1531 repo,
1531 repo,
1532 remote,
1532 remote,
1533 heads=None,
1533 heads=None,
1534 force=False,
1534 force=False,
1535 bookmarks=(),
1535 bookmarks=(),
1536 remotebookmarks=None,
1536 remotebookmarks=None,
1537 streamclonerequested=None,
1537 streamclonerequested=None,
1538 includepats=None,
1538 includepats=None,
1539 excludepats=None,
1539 excludepats=None,
1540 depth=None,
1540 depth=None,
1541 ):
1541 ):
1542 # repo we pull into
1542 # repo we pull into
1543 self.repo = repo
1543 self.repo = repo
1544 # repo we pull from
1544 # repo we pull from
1545 self.remote = remote
1545 self.remote = remote
1546 # revision we try to pull (None is "all")
1546 # revision we try to pull (None is "all")
1547 self.heads = heads
1547 self.heads = heads
1548 # bookmark pulled explicitly
1548 # bookmark pulled explicitly
1549 self.explicitbookmarks = [
1549 self.explicitbookmarks = [
1550 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1550 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1551 ]
1551 ]
1552 # do we force pull?
1552 # do we force pull?
1553 self.force = force
1553 self.force = force
1554 # whether a streaming clone was requested
1554 # whether a streaming clone was requested
1555 self.streamclonerequested = streamclonerequested
1555 self.streamclonerequested = streamclonerequested
1556 # transaction manager
1556 # transaction manager
1557 self.trmanager = None
1557 self.trmanager = None
1558 # set of common changeset between local and remote before pull
1558 # set of common changeset between local and remote before pull
1559 self.common = None
1559 self.common = None
1560 # set of pulled head
1560 # set of pulled head
1561 self.rheads = None
1561 self.rheads = None
1562 # list of missing changeset to fetch remotely
1562 # list of missing changeset to fetch remotely
1563 self.fetch = None
1563 self.fetch = None
1564 # remote bookmarks data
1564 # remote bookmarks data
1565 self.remotebookmarks = remotebookmarks
1565 self.remotebookmarks = remotebookmarks
1566 # result of changegroup pulling (used as return code by pull)
1566 # result of changegroup pulling (used as return code by pull)
1567 self.cgresult = None
1567 self.cgresult = None
1568 # list of step already done
1568 # list of step already done
1569 self.stepsdone = set()
1569 self.stepsdone = set()
1570 # Whether we attempted a clone from pre-generated bundles.
1570 # Whether we attempted a clone from pre-generated bundles.
1571 self.clonebundleattempted = False
1571 self.clonebundleattempted = False
1572 # Set of file patterns to include.
1572 # Set of file patterns to include.
1573 self.includepats = includepats
1573 self.includepats = includepats
1574 # Set of file patterns to exclude.
1574 # Set of file patterns to exclude.
1575 self.excludepats = excludepats
1575 self.excludepats = excludepats
1576 # Number of ancestor changesets to pull from each pulled head.
1576 # Number of ancestor changesets to pull from each pulled head.
1577 self.depth = depth
1577 self.depth = depth
1578
1578
1579 @util.propertycache
1579 @util.propertycache
1580 def pulledsubset(self):
1580 def pulledsubset(self):
1581 """heads of the set of changeset target by the pull"""
1581 """heads of the set of changeset target by the pull"""
1582 # compute target subset
1582 # compute target subset
1583 if self.heads is None:
1583 if self.heads is None:
1584 # We pulled every thing possible
1584 # We pulled every thing possible
1585 # sync on everything common
1585 # sync on everything common
1586 c = set(self.common)
1586 c = set(self.common)
1587 ret = list(self.common)
1587 ret = list(self.common)
1588 for n in self.rheads:
1588 for n in self.rheads:
1589 if n not in c:
1589 if n not in c:
1590 ret.append(n)
1590 ret.append(n)
1591 return ret
1591 return ret
1592 else:
1592 else:
1593 # We pulled a specific subset
1593 # We pulled a specific subset
1594 # sync on this subset
1594 # sync on this subset
1595 return self.heads
1595 return self.heads
1596
1596
1597 @util.propertycache
1597 @util.propertycache
1598 def canusebundle2(self):
1598 def canusebundle2(self):
1599 return not _forcebundle1(self)
1599 return not _forcebundle1(self)
1600
1600
1601 @util.propertycache
1601 @util.propertycache
1602 def remotebundle2caps(self):
1602 def remotebundle2caps(self):
1603 return bundle2.bundle2caps(self.remote)
1603 return bundle2.bundle2caps(self.remote)
1604
1604
1605 def gettransaction(self):
1605 def gettransaction(self):
1606 # deprecated; talk to trmanager directly
1606 # deprecated; talk to trmanager directly
1607 return self.trmanager.transaction()
1607 return self.trmanager.transaction()
1608
1608
1609
1609
1610 class transactionmanager(util.transactional):
1610 class transactionmanager(util.transactional):
1611 """An object to manage the life cycle of a transaction
1611 """An object to manage the life cycle of a transaction
1612
1612
1613 It creates the transaction on demand and calls the appropriate hooks when
1613 It creates the transaction on demand and calls the appropriate hooks when
1614 closing the transaction."""
1614 closing the transaction."""
1615
1615
1616 def __init__(self, repo, source, url):
1616 def __init__(self, repo, source, url):
1617 self.repo = repo
1617 self.repo = repo
1618 self.source = source
1618 self.source = source
1619 self.url = url
1619 self.url = url
1620 self._tr = None
1620 self._tr = None
1621
1621
1622 def transaction(self):
1622 def transaction(self):
1623 """Return an open transaction object, constructing if necessary"""
1623 """Return an open transaction object, constructing if necessary"""
1624 if not self._tr:
1624 if not self._tr:
1625 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1625 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1626 self._tr = self.repo.transaction(trname)
1626 self._tr = self.repo.transaction(trname)
1627 self._tr.hookargs[b'source'] = self.source
1627 self._tr.hookargs[b'source'] = self.source
1628 self._tr.hookargs[b'url'] = self.url
1628 self._tr.hookargs[b'url'] = self.url
1629 return self._tr
1629 return self._tr
1630
1630
1631 def close(self):
1631 def close(self):
1632 """close transaction if created"""
1632 """close transaction if created"""
1633 if self._tr is not None:
1633 if self._tr is not None:
1634 self._tr.close()
1634 self._tr.close()
1635
1635
1636 def release(self):
1636 def release(self):
1637 """release transaction if created"""
1637 """release transaction if created"""
1638 if self._tr is not None:
1638 if self._tr is not None:
1639 self._tr.release()
1639 self._tr.release()
1640
1640
1641
1641
1642 def listkeys(remote, namespace):
1642 def listkeys(remote, namespace):
1643 with remote.commandexecutor() as e:
1643 with remote.commandexecutor() as e:
1644 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1644 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1645
1645
1646
1646
1647 def _fullpullbundle2(repo, pullop):
1647 def _fullpullbundle2(repo, pullop):
1648 # The server may send a partial reply, i.e. when inlining
1648 # The server may send a partial reply, i.e. when inlining
1649 # pre-computed bundles. In that case, update the common
1649 # pre-computed bundles. In that case, update the common
1650 # set based on the results and pull another bundle.
1650 # set based on the results and pull another bundle.
1651 #
1651 #
1652 # There are two indicators that the process is finished:
1652 # There are two indicators that the process is finished:
1653 # - no changeset has been added, or
1653 # - no changeset has been added, or
1654 # - all remote heads are known locally.
1654 # - all remote heads are known locally.
1655 # The head check must use the unfiltered view as obsoletion
1655 # The head check must use the unfiltered view as obsoletion
1656 # markers can hide heads.
1656 # markers can hide heads.
1657 unfi = repo.unfiltered()
1657 unfi = repo.unfiltered()
1658 unficl = unfi.changelog
1658 unficl = unfi.changelog
1659
1659
1660 def headsofdiff(h1, h2):
1660 def headsofdiff(h1, h2):
1661 """Returns heads(h1 % h2)"""
1661 """Returns heads(h1 % h2)"""
1662 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1662 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1663 return set(ctx.node() for ctx in res)
1663 return set(ctx.node() for ctx in res)
1664
1664
1665 def headsofunion(h1, h2):
1665 def headsofunion(h1, h2):
1666 """Returns heads((h1 + h2) - null)"""
1666 """Returns heads((h1 + h2) - null)"""
1667 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1667 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1668 return set(ctx.node() for ctx in res)
1668 return set(ctx.node() for ctx in res)
1669
1669
1670 while True:
1670 while True:
1671 old_heads = unficl.heads()
1671 old_heads = unficl.heads()
1672 clstart = len(unficl)
1672 clstart = len(unficl)
1673 _pullbundle2(pullop)
1673 _pullbundle2(pullop)
1674 if repository.NARROW_REQUIREMENT in repo.requirements:
1674 if repository.NARROW_REQUIREMENT in repo.requirements:
1675 # XXX narrow clones filter the heads on the server side during
1675 # XXX narrow clones filter the heads on the server side during
1676 # XXX getbundle and result in partial replies as well.
1676 # XXX getbundle and result in partial replies as well.
1677 # XXX Disable pull bundles in this case as band aid to avoid
1677 # XXX Disable pull bundles in this case as band aid to avoid
1678 # XXX extra round trips.
1678 # XXX extra round trips.
1679 break
1679 break
1680 if clstart == len(unficl):
1680 if clstart == len(unficl):
1681 break
1681 break
1682 if all(unficl.hasnode(n) for n in pullop.rheads):
1682 if all(unficl.hasnode(n) for n in pullop.rheads):
1683 break
1683 break
1684 new_heads = headsofdiff(unficl.heads(), old_heads)
1684 new_heads = headsofdiff(unficl.heads(), old_heads)
1685 pullop.common = headsofunion(new_heads, pullop.common)
1685 pullop.common = headsofunion(new_heads, pullop.common)
1686 pullop.rheads = set(pullop.rheads) - pullop.common
1686 pullop.rheads = set(pullop.rheads) - pullop.common
1687
1687
1688
1688
1689 def pull(
1689 def pull(
1690 repo,
1690 repo,
1691 remote,
1691 remote,
1692 heads=None,
1692 heads=None,
1693 force=False,
1693 force=False,
1694 bookmarks=(),
1694 bookmarks=(),
1695 opargs=None,
1695 opargs=None,
1696 streamclonerequested=None,
1696 streamclonerequested=None,
1697 includepats=None,
1697 includepats=None,
1698 excludepats=None,
1698 excludepats=None,
1699 depth=None,
1699 depth=None,
1700 ):
1700 ):
1701 """Fetch repository data from a remote.
1701 """Fetch repository data from a remote.
1702
1702
1703 This is the main function used to retrieve data from a remote repository.
1703 This is the main function used to retrieve data from a remote repository.
1704
1704
1705 ``repo`` is the local repository to clone into.
1705 ``repo`` is the local repository to clone into.
1706 ``remote`` is a peer instance.
1706 ``remote`` is a peer instance.
1707 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1707 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1708 default) means to pull everything from the remote.
1708 default) means to pull everything from the remote.
1709 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1709 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1710 default, all remote bookmarks are pulled.
1710 default, all remote bookmarks are pulled.
1711 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1711 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1712 initialization.
1712 initialization.
1713 ``streamclonerequested`` is a boolean indicating whether a "streaming
1713 ``streamclonerequested`` is a boolean indicating whether a "streaming
1714 clone" is requested. A "streaming clone" is essentially a raw file copy
1714 clone" is requested. A "streaming clone" is essentially a raw file copy
1715 of revlogs from the server. This only works when the local repository is
1715 of revlogs from the server. This only works when the local repository is
1716 empty. The default value of ``None`` means to respect the server
1716 empty. The default value of ``None`` means to respect the server
1717 configuration for preferring stream clones.
1717 configuration for preferring stream clones.
1718 ``includepats`` and ``excludepats`` define explicit file patterns to
1718 ``includepats`` and ``excludepats`` define explicit file patterns to
1719 include and exclude in storage, respectively. If not defined, narrow
1719 include and exclude in storage, respectively. If not defined, narrow
1720 patterns from the repo instance are used, if available.
1720 patterns from the repo instance are used, if available.
1721 ``depth`` is an integer indicating the DAG depth of history we're
1721 ``depth`` is an integer indicating the DAG depth of history we're
1722 interested in. If defined, for each revision specified in ``heads``, we
1722 interested in. If defined, for each revision specified in ``heads``, we
1723 will fetch up to this many of its ancestors and data associated with them.
1723 will fetch up to this many of its ancestors and data associated with them.
1724
1724
1725 Returns the ``pulloperation`` created for this pull.
1725 Returns the ``pulloperation`` created for this pull.
1726 """
1726 """
1727 if opargs is None:
1727 if opargs is None:
1728 opargs = {}
1728 opargs = {}
1729
1729
1730 # We allow the narrow patterns to be passed in explicitly to provide more
1730 # We allow the narrow patterns to be passed in explicitly to provide more
1731 # flexibility for API consumers.
1731 # flexibility for API consumers.
1732 if includepats or excludepats:
1732 if includepats or excludepats:
1733 includepats = includepats or set()
1733 includepats = includepats or set()
1734 excludepats = excludepats or set()
1734 excludepats = excludepats or set()
1735 else:
1735 else:
1736 includepats, excludepats = repo.narrowpats
1736 includepats, excludepats = repo.narrowpats
1737
1737
1738 narrowspec.validatepatterns(includepats)
1738 narrowspec.validatepatterns(includepats)
1739 narrowspec.validatepatterns(excludepats)
1739 narrowspec.validatepatterns(excludepats)
1740
1740
1741 pullop = pulloperation(
1741 pullop = pulloperation(
1742 repo,
1742 repo,
1743 remote,
1743 remote,
1744 heads,
1744 heads,
1745 force,
1745 force,
1746 bookmarks=bookmarks,
1746 bookmarks=bookmarks,
1747 streamclonerequested=streamclonerequested,
1747 streamclonerequested=streamclonerequested,
1748 includepats=includepats,
1748 includepats=includepats,
1749 excludepats=excludepats,
1749 excludepats=excludepats,
1750 depth=depth,
1750 depth=depth,
1751 **pycompat.strkwargs(opargs)
1751 **pycompat.strkwargs(opargs)
1752 )
1752 )
1753
1753
1754 peerlocal = pullop.remote.local()
1754 peerlocal = pullop.remote.local()
1755 if peerlocal:
1755 if peerlocal:
1756 missing = set(peerlocal.requirements) - pullop.repo.supported
1756 missing = set(peerlocal.requirements) - pullop.repo.supported
1757 if missing:
1757 if missing:
1758 msg = _(
1758 msg = _(
1759 b"required features are not"
1759 b"required features are not"
1760 b" supported in the destination:"
1760 b" supported in the destination:"
1761 b" %s"
1761 b" %s"
1762 ) % (b', '.join(sorted(missing)))
1762 ) % (b', '.join(sorted(missing)))
1763 raise error.Abort(msg)
1763 raise error.Abort(msg)
1764
1764
1765 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1765 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1766 wlock = util.nullcontextmanager()
1766 wlock = util.nullcontextmanager()
1767 if not bookmod.bookmarksinstore(repo):
1767 if not bookmod.bookmarksinstore(repo):
1768 wlock = repo.wlock()
1768 wlock = repo.wlock()
1769 with wlock, repo.lock(), pullop.trmanager:
1769 with wlock, repo.lock(), pullop.trmanager:
1770 # Use the modern wire protocol, if available.
1770 # Use the modern wire protocol, if available.
1771 if remote.capable(b'command-changesetdata'):
1771 if remote.capable(b'command-changesetdata'):
1772 exchangev2.pull(pullop)
1772 exchangev2.pull(pullop)
1773 else:
1773 else:
1774 # This should ideally be in _pullbundle2(). However, it needs to run
1774 # This should ideally be in _pullbundle2(). However, it needs to run
1775 # before discovery to avoid extra work.
1775 # before discovery to avoid extra work.
1776 _maybeapplyclonebundle(pullop)
1776 _maybeapplyclonebundle(pullop)
1777 streamclone.maybeperformlegacystreamclone(pullop)
1777 streamclone.maybeperformlegacystreamclone(pullop)
1778 _pulldiscovery(pullop)
1778 _pulldiscovery(pullop)
1779 if pullop.canusebundle2:
1779 if pullop.canusebundle2:
1780 _fullpullbundle2(repo, pullop)
1780 _fullpullbundle2(repo, pullop)
1781 _pullchangeset(pullop)
1781 _pullchangeset(pullop)
1782 _pullphase(pullop)
1782 _pullphase(pullop)
1783 _pullbookmarks(pullop)
1783 _pullbookmarks(pullop)
1784 _pullobsolete(pullop)
1784 _pullobsolete(pullop)
1785
1785
1786 # storing remotenames
1786 # storing remotenames
1787 if repo.ui.configbool(b'experimental', b'remotenames'):
1787 if repo.ui.configbool(b'experimental', b'remotenames'):
1788 logexchange.pullremotenames(repo, remote)
1788 logexchange.pullremotenames(repo, remote)
1789
1789
1790 return pullop
1790 return pullop
1791
1791
1792
1792
1793 # list of steps to perform discovery before pull
1793 # list of steps to perform discovery before pull
1794 pulldiscoveryorder = []
1794 pulldiscoveryorder = []
1795
1795
1796 # Mapping between step name and function
1796 # Mapping between step name and function
1797 #
1797 #
1798 # This exists to help extensions wrap steps if necessary
1798 # This exists to help extensions wrap steps if necessary
1799 pulldiscoverymapping = {}
1799 pulldiscoverymapping = {}
1800
1800
1801
1801
1802 def pulldiscovery(stepname):
1802 def pulldiscovery(stepname):
1803 """decorator for function performing discovery before pull
1803 """decorator for function performing discovery before pull
1804
1804
1805 The function is added to the step -> function mapping and appended to the
1805 The function is added to the step -> function mapping and appended to the
1806 list of steps. Beware that decorated function will be added in order (this
1806 list of steps. Beware that decorated function will be added in order (this
1807 may matter).
1807 may matter).
1808
1808
1809 You can only use this decorator for a new step, if you want to wrap a step
1809 You can only use this decorator for a new step, if you want to wrap a step
1810 from an extension, change the pulldiscovery dictionary directly."""
1810 from an extension, change the pulldiscovery dictionary directly."""
1811
1811
1812 def dec(func):
1812 def dec(func):
1813 assert stepname not in pulldiscoverymapping
1813 assert stepname not in pulldiscoverymapping
1814 pulldiscoverymapping[stepname] = func
1814 pulldiscoverymapping[stepname] = func
1815 pulldiscoveryorder.append(stepname)
1815 pulldiscoveryorder.append(stepname)
1816 return func
1816 return func
1817
1817
1818 return dec
1818 return dec
1819
1819
1820
1820
1821 def _pulldiscovery(pullop):
1821 def _pulldiscovery(pullop):
1822 """Run all discovery steps"""
1822 """Run all discovery steps"""
1823 for stepname in pulldiscoveryorder:
1823 for stepname in pulldiscoveryorder:
1824 step = pulldiscoverymapping[stepname]
1824 step = pulldiscoverymapping[stepname]
1825 step(pullop)
1825 step(pullop)
1826
1826
1827
1827
1828 @pulldiscovery(b'b1:bookmarks')
1828 @pulldiscovery(b'b1:bookmarks')
1829 def _pullbookmarkbundle1(pullop):
1829 def _pullbookmarkbundle1(pullop):
1830 """fetch bookmark data in bundle1 case
1830 """fetch bookmark data in bundle1 case
1831
1831
1832 If not using bundle2, we have to fetch bookmarks before changeset
1832 If not using bundle2, we have to fetch bookmarks before changeset
1833 discovery to reduce the chance and impact of race conditions."""
1833 discovery to reduce the chance and impact of race conditions."""
1834 if pullop.remotebookmarks is not None:
1834 if pullop.remotebookmarks is not None:
1835 return
1835 return
1836 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1836 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1837 # all known bundle2 servers now support listkeys, but lets be nice with
1837 # all known bundle2 servers now support listkeys, but lets be nice with
1838 # new implementation.
1838 # new implementation.
1839 return
1839 return
1840 books = listkeys(pullop.remote, b'bookmarks')
1840 books = listkeys(pullop.remote, b'bookmarks')
1841 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1841 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1842
1842
1843
1843
1844 @pulldiscovery(b'changegroup')
1844 @pulldiscovery(b'changegroup')
1845 def _pulldiscoverychangegroup(pullop):
1845 def _pulldiscoverychangegroup(pullop):
1846 """discovery phase for the pull
1846 """discovery phase for the pull
1847
1847
1848 Current handle changeset discovery only, will change handle all discovery
1848 Current handle changeset discovery only, will change handle all discovery
1849 at some point."""
1849 at some point."""
1850 tmp = discovery.findcommonincoming(
1850 tmp = discovery.findcommonincoming(
1851 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1851 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1852 )
1852 )
1853 common, fetch, rheads = tmp
1853 common, fetch, rheads = tmp
1854 has_node = pullop.repo.unfiltered().changelog.index.has_node
1854 has_node = pullop.repo.unfiltered().changelog.index.has_node
1855 if fetch and rheads:
1855 if fetch and rheads:
1856 # If a remote heads is filtered locally, put in back in common.
1856 # If a remote heads is filtered locally, put in back in common.
1857 #
1857 #
1858 # This is a hackish solution to catch most of "common but locally
1858 # This is a hackish solution to catch most of "common but locally
1859 # hidden situation". We do not performs discovery on unfiltered
1859 # hidden situation". We do not performs discovery on unfiltered
1860 # repository because it end up doing a pathological amount of round
1860 # repository because it end up doing a pathological amount of round
1861 # trip for w huge amount of changeset we do not care about.
1861 # trip for w huge amount of changeset we do not care about.
1862 #
1862 #
1863 # If a set of such "common but filtered" changeset exist on the server
1863 # If a set of such "common but filtered" changeset exist on the server
1864 # but are not including a remote heads, we'll not be able to detect it,
1864 # but are not including a remote heads, we'll not be able to detect it,
1865 scommon = set(common)
1865 scommon = set(common)
1866 for n in rheads:
1866 for n in rheads:
1867 if has_node(n):
1867 if has_node(n):
1868 if n not in scommon:
1868 if n not in scommon:
1869 common.append(n)
1869 common.append(n)
1870 if set(rheads).issubset(set(common)):
1870 if set(rheads).issubset(set(common)):
1871 fetch = []
1871 fetch = []
1872 pullop.common = common
1872 pullop.common = common
1873 pullop.fetch = fetch
1873 pullop.fetch = fetch
1874 pullop.rheads = rheads
1874 pullop.rheads = rheads
1875
1875
1876
1876
1877 def _pullbundle2(pullop):
1877 def _pullbundle2(pullop):
1878 """pull data using bundle2
1878 """pull data using bundle2
1879
1879
1880 For now, the only supported data are changegroup."""
1880 For now, the only supported data are changegroup."""
1881 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1881 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1882
1882
1883 # make ui easier to access
1883 # make ui easier to access
1884 ui = pullop.repo.ui
1884 ui = pullop.repo.ui
1885
1885
1886 # At the moment we don't do stream clones over bundle2. If that is
1886 # At the moment we don't do stream clones over bundle2. If that is
1887 # implemented then here's where the check for that will go.
1887 # implemented then here's where the check for that will go.
1888 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1888 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1889
1889
1890 # declare pull perimeters
1890 # declare pull perimeters
1891 kwargs[b'common'] = pullop.common
1891 kwargs[b'common'] = pullop.common
1892 kwargs[b'heads'] = pullop.heads or pullop.rheads
1892 kwargs[b'heads'] = pullop.heads or pullop.rheads
1893
1893
1894 # check server supports narrow and then adding includepats and excludepats
1894 # check server supports narrow and then adding includepats and excludepats
1895 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1895 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1896 if servernarrow and pullop.includepats:
1896 if servernarrow and pullop.includepats:
1897 kwargs[b'includepats'] = pullop.includepats
1897 kwargs[b'includepats'] = pullop.includepats
1898 if servernarrow and pullop.excludepats:
1898 if servernarrow and pullop.excludepats:
1899 kwargs[b'excludepats'] = pullop.excludepats
1899 kwargs[b'excludepats'] = pullop.excludepats
1900
1900
1901 if streaming:
1901 if streaming:
1902 kwargs[b'cg'] = False
1902 kwargs[b'cg'] = False
1903 kwargs[b'stream'] = True
1903 kwargs[b'stream'] = True
1904 pullop.stepsdone.add(b'changegroup')
1904 pullop.stepsdone.add(b'changegroup')
1905 pullop.stepsdone.add(b'phases')
1905 pullop.stepsdone.add(b'phases')
1906
1906
1907 else:
1907 else:
1908 # pulling changegroup
1908 # pulling changegroup
1909 pullop.stepsdone.add(b'changegroup')
1909 pullop.stepsdone.add(b'changegroup')
1910
1910
1911 kwargs[b'cg'] = pullop.fetch
1911 kwargs[b'cg'] = pullop.fetch
1912
1912
1913 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1913 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1914 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1914 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1915 if not legacyphase and hasbinaryphase:
1915 if not legacyphase and hasbinaryphase:
1916 kwargs[b'phases'] = True
1916 kwargs[b'phases'] = True
1917 pullop.stepsdone.add(b'phases')
1917 pullop.stepsdone.add(b'phases')
1918
1918
1919 if b'listkeys' in pullop.remotebundle2caps:
1919 if b'listkeys' in pullop.remotebundle2caps:
1920 if b'phases' not in pullop.stepsdone:
1920 if b'phases' not in pullop.stepsdone:
1921 kwargs[b'listkeys'] = [b'phases']
1921 kwargs[b'listkeys'] = [b'phases']
1922
1922
1923 bookmarksrequested = False
1923 bookmarksrequested = False
1924 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1924 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1925 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1925 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1926
1926
1927 if pullop.remotebookmarks is not None:
1927 if pullop.remotebookmarks is not None:
1928 pullop.stepsdone.add(b'request-bookmarks')
1928 pullop.stepsdone.add(b'request-bookmarks')
1929
1929
1930 if (
1930 if (
1931 b'request-bookmarks' not in pullop.stepsdone
1931 b'request-bookmarks' not in pullop.stepsdone
1932 and pullop.remotebookmarks is None
1932 and pullop.remotebookmarks is None
1933 and not legacybookmark
1933 and not legacybookmark
1934 and hasbinarybook
1934 and hasbinarybook
1935 ):
1935 ):
1936 kwargs[b'bookmarks'] = True
1936 kwargs[b'bookmarks'] = True
1937 bookmarksrequested = True
1937 bookmarksrequested = True
1938
1938
1939 if b'listkeys' in pullop.remotebundle2caps:
1939 if b'listkeys' in pullop.remotebundle2caps:
1940 if b'request-bookmarks' not in pullop.stepsdone:
1940 if b'request-bookmarks' not in pullop.stepsdone:
1941 # make sure to always includes bookmark data when migrating
1941 # make sure to always includes bookmark data when migrating
1942 # `hg incoming --bundle` to using this function.
1942 # `hg incoming --bundle` to using this function.
1943 pullop.stepsdone.add(b'request-bookmarks')
1943 pullop.stepsdone.add(b'request-bookmarks')
1944 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1944 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1945
1945
1946 # If this is a full pull / clone and the server supports the clone bundles
1946 # If this is a full pull / clone and the server supports the clone bundles
1947 # feature, tell the server whether we attempted a clone bundle. The
1947 # feature, tell the server whether we attempted a clone bundle. The
1948 # presence of this flag indicates the client supports clone bundles. This
1948 # presence of this flag indicates the client supports clone bundles. This
1949 # will enable the server to treat clients that support clone bundles
1949 # will enable the server to treat clients that support clone bundles
1950 # differently from those that don't.
1950 # differently from those that don't.
1951 if (
1951 if (
1952 pullop.remote.capable(b'clonebundles')
1952 pullop.remote.capable(b'clonebundles')
1953 and pullop.heads is None
1953 and pullop.heads is None
1954 and list(pullop.common) == [nullid]
1954 and list(pullop.common) == [nullid]
1955 ):
1955 ):
1956 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1956 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1957
1957
1958 if streaming:
1958 if streaming:
1959 pullop.repo.ui.status(_(b'streaming all changes\n'))
1959 pullop.repo.ui.status(_(b'streaming all changes\n'))
1960 elif not pullop.fetch:
1960 elif not pullop.fetch:
1961 pullop.repo.ui.status(_(b"no changes found\n"))
1961 pullop.repo.ui.status(_(b"no changes found\n"))
1962 pullop.cgresult = 0
1962 pullop.cgresult = 0
1963 else:
1963 else:
1964 if pullop.heads is None and list(pullop.common) == [nullid]:
1964 if pullop.heads is None and list(pullop.common) == [nullid]:
1965 pullop.repo.ui.status(_(b"requesting all changes\n"))
1965 pullop.repo.ui.status(_(b"requesting all changes\n"))
1966 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1966 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1967 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1967 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1968 if obsolete.commonversion(remoteversions) is not None:
1968 if obsolete.commonversion(remoteversions) is not None:
1969 kwargs[b'obsmarkers'] = True
1969 kwargs[b'obsmarkers'] = True
1970 pullop.stepsdone.add(b'obsmarkers')
1970 pullop.stepsdone.add(b'obsmarkers')
1971 _pullbundle2extraprepare(pullop, kwargs)
1971 _pullbundle2extraprepare(pullop, kwargs)
1972
1972
1973 with pullop.remote.commandexecutor() as e:
1973 with pullop.remote.commandexecutor() as e:
1974 args = dict(kwargs)
1974 args = dict(kwargs)
1975 args[b'source'] = b'pull'
1975 args[b'source'] = b'pull'
1976 bundle = e.callcommand(b'getbundle', args).result()
1976 bundle = e.callcommand(b'getbundle', args).result()
1977
1977
1978 try:
1978 try:
1979 op = bundle2.bundleoperation(
1979 op = bundle2.bundleoperation(
1980 pullop.repo, pullop.gettransaction, source=b'pull'
1980 pullop.repo, pullop.gettransaction, source=b'pull'
1981 )
1981 )
1982 op.modes[b'bookmarks'] = b'records'
1982 op.modes[b'bookmarks'] = b'records'
1983 bundle2.processbundle(pullop.repo, bundle, op=op)
1983 bundle2.processbundle(pullop.repo, bundle, op=op)
1984 except bundle2.AbortFromPart as exc:
1984 except bundle2.AbortFromPart as exc:
1985 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1985 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1986 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1986 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1987 except error.BundleValueError as exc:
1987 except error.BundleValueError as exc:
1988 raise error.Abort(_(b'missing support for %s') % exc)
1988 raise error.Abort(_(b'missing support for %s') % exc)
1989
1989
1990 if pullop.fetch:
1990 if pullop.fetch:
1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1991 pullop.cgresult = bundle2.combinechangegroupresults(op)
1992
1992
1993 # processing phases change
1993 # processing phases change
1994 for namespace, value in op.records[b'listkeys']:
1994 for namespace, value in op.records[b'listkeys']:
1995 if namespace == b'phases':
1995 if namespace == b'phases':
1996 _pullapplyphases(pullop, value)
1996 _pullapplyphases(pullop, value)
1997
1997
1998 # processing bookmark update
1998 # processing bookmark update
1999 if bookmarksrequested:
1999 if bookmarksrequested:
2000 books = {}
2000 books = {}
2001 for record in op.records[b'bookmarks']:
2001 for record in op.records[b'bookmarks']:
2002 books[record[b'bookmark']] = record[b"node"]
2002 books[record[b'bookmark']] = record[b"node"]
2003 pullop.remotebookmarks = books
2003 pullop.remotebookmarks = books
2004 else:
2004 else:
2005 for namespace, value in op.records[b'listkeys']:
2005 for namespace, value in op.records[b'listkeys']:
2006 if namespace == b'bookmarks':
2006 if namespace == b'bookmarks':
2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2007 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2008
2008
2009 # bookmark data were either already there or pulled in the bundle
2009 # bookmark data were either already there or pulled in the bundle
2010 if pullop.remotebookmarks is not None:
2010 if pullop.remotebookmarks is not None:
2011 _pullbookmarks(pullop)
2011 _pullbookmarks(pullop)
2012
2012
2013
2013
2014 def _pullbundle2extraprepare(pullop, kwargs):
2014 def _pullbundle2extraprepare(pullop, kwargs):
2015 """hook function so that extensions can extend the getbundle call"""
2015 """hook function so that extensions can extend the getbundle call"""
2016
2016
2017
2017
2018 def _pullchangeset(pullop):
2018 def _pullchangeset(pullop):
2019 """pull changeset from unbundle into the local repo"""
2019 """pull changeset from unbundle into the local repo"""
2020 # We delay the open of the transaction as late as possible so we
2020 # We delay the open of the transaction as late as possible so we
2021 # don't open transaction for nothing or you break future useful
2021 # don't open transaction for nothing or you break future useful
2022 # rollback call
2022 # rollback call
2023 if b'changegroup' in pullop.stepsdone:
2023 if b'changegroup' in pullop.stepsdone:
2024 return
2024 return
2025 pullop.stepsdone.add(b'changegroup')
2025 pullop.stepsdone.add(b'changegroup')
2026 if not pullop.fetch:
2026 if not pullop.fetch:
2027 pullop.repo.ui.status(_(b"no changes found\n"))
2027 pullop.repo.ui.status(_(b"no changes found\n"))
2028 pullop.cgresult = 0
2028 pullop.cgresult = 0
2029 return
2029 return
2030 tr = pullop.gettransaction()
2030 tr = pullop.gettransaction()
2031 if pullop.heads is None and list(pullop.common) == [nullid]:
2031 if pullop.heads is None and list(pullop.common) == [nullid]:
2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2032 pullop.repo.ui.status(_(b"requesting all changes\n"))
2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2033 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2034 # issue1320, avoid a race if remote changed after discovery
2034 # issue1320, avoid a race if remote changed after discovery
2035 pullop.heads = pullop.rheads
2035 pullop.heads = pullop.rheads
2036
2036
2037 if pullop.remote.capable(b'getbundle'):
2037 if pullop.remote.capable(b'getbundle'):
2038 # TODO: get bundlecaps from remote
2038 # TODO: get bundlecaps from remote
2039 cg = pullop.remote.getbundle(
2039 cg = pullop.remote.getbundle(
2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2040 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2041 )
2041 )
2042 elif pullop.heads is None:
2042 elif pullop.heads is None:
2043 with pullop.remote.commandexecutor() as e:
2043 with pullop.remote.commandexecutor() as e:
2044 cg = e.callcommand(
2044 cg = e.callcommand(
2045 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2045 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2046 ).result()
2046 ).result()
2047
2047
2048 elif not pullop.remote.capable(b'changegroupsubset'):
2048 elif not pullop.remote.capable(b'changegroupsubset'):
2049 raise error.Abort(
2049 raise error.Abort(
2050 _(
2050 _(
2051 b"partial pull cannot be done because "
2051 b"partial pull cannot be done because "
2052 b"other repository doesn't support "
2052 b"other repository doesn't support "
2053 b"changegroupsubset."
2053 b"changegroupsubset."
2054 )
2054 )
2055 )
2055 )
2056 else:
2056 else:
2057 with pullop.remote.commandexecutor() as e:
2057 with pullop.remote.commandexecutor() as e:
2058 cg = e.callcommand(
2058 cg = e.callcommand(
2059 b'changegroupsubset',
2059 b'changegroupsubset',
2060 {
2060 {
2061 b'bases': pullop.fetch,
2061 b'bases': pullop.fetch,
2062 b'heads': pullop.heads,
2062 b'heads': pullop.heads,
2063 b'source': b'pull',
2063 b'source': b'pull',
2064 },
2064 },
2065 ).result()
2065 ).result()
2066
2066
2067 bundleop = bundle2.applybundle(
2067 bundleop = bundle2.applybundle(
2068 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2068 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2069 )
2069 )
2070 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2070 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2071
2071
2072
2072
2073 def _pullphase(pullop):
2073 def _pullphase(pullop):
2074 # Get remote phases data from remote
2074 # Get remote phases data from remote
2075 if b'phases' in pullop.stepsdone:
2075 if b'phases' in pullop.stepsdone:
2076 return
2076 return
2077 remotephases = listkeys(pullop.remote, b'phases')
2077 remotephases = listkeys(pullop.remote, b'phases')
2078 _pullapplyphases(pullop, remotephases)
2078 _pullapplyphases(pullop, remotephases)
2079
2079
2080
2080
2081 def _pullapplyphases(pullop, remotephases):
2081 def _pullapplyphases(pullop, remotephases):
2082 """apply phase movement from observed remote state"""
2082 """apply phase movement from observed remote state"""
2083 if b'phases' in pullop.stepsdone:
2083 if b'phases' in pullop.stepsdone:
2084 return
2084 return
2085 pullop.stepsdone.add(b'phases')
2085 pullop.stepsdone.add(b'phases')
2086 publishing = bool(remotephases.get(b'publishing', False))
2086 publishing = bool(remotephases.get(b'publishing', False))
2087 if remotephases and not publishing:
2087 if remotephases and not publishing:
2088 # remote is new and non-publishing
2088 # remote is new and non-publishing
2089 pheads, _dr = phases.analyzeremotephases(
2089 pheads, _dr = phases.analyzeremotephases(
2090 pullop.repo, pullop.pulledsubset, remotephases
2090 pullop.repo, pullop.pulledsubset, remotephases
2091 )
2091 )
2092 dheads = pullop.pulledsubset
2092 dheads = pullop.pulledsubset
2093 else:
2093 else:
2094 # Remote is old or publishing all common changesets
2094 # Remote is old or publishing all common changesets
2095 # should be seen as public
2095 # should be seen as public
2096 pheads = pullop.pulledsubset
2096 pheads = pullop.pulledsubset
2097 dheads = []
2097 dheads = []
2098 unfi = pullop.repo.unfiltered()
2098 unfi = pullop.repo.unfiltered()
2099 phase = unfi._phasecache.phase
2099 phase = unfi._phasecache.phase
2100 rev = unfi.changelog.index.get_rev
2100 rev = unfi.changelog.index.get_rev
2101 public = phases.public
2101 public = phases.public
2102 draft = phases.draft
2102 draft = phases.draft
2103
2103
2104 # exclude changesets already public locally and update the others
2104 # exclude changesets already public locally and update the others
2105 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2105 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2106 if pheads:
2106 if pheads:
2107 tr = pullop.gettransaction()
2107 tr = pullop.gettransaction()
2108 phases.advanceboundary(pullop.repo, tr, public, pheads)
2108 phases.advanceboundary(pullop.repo, tr, public, pheads)
2109
2109
2110 # exclude changesets already draft locally and update the others
2110 # exclude changesets already draft locally and update the others
2111 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2111 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2112 if dheads:
2112 if dheads:
2113 tr = pullop.gettransaction()
2113 tr = pullop.gettransaction()
2114 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2114 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2115
2115
2116
2116
2117 def _pullbookmarks(pullop):
2117 def _pullbookmarks(pullop):
2118 """process the remote bookmark information to update the local one"""
2118 """process the remote bookmark information to update the local one"""
2119 if b'bookmarks' in pullop.stepsdone:
2119 if b'bookmarks' in pullop.stepsdone:
2120 return
2120 return
2121 pullop.stepsdone.add(b'bookmarks')
2121 pullop.stepsdone.add(b'bookmarks')
2122 repo = pullop.repo
2122 repo = pullop.repo
2123 remotebookmarks = pullop.remotebookmarks
2123 remotebookmarks = pullop.remotebookmarks
2124 bookmod.updatefromremote(
2124 bookmod.updatefromremote(
2125 repo.ui,
2125 repo.ui,
2126 repo,
2126 repo,
2127 remotebookmarks,
2127 remotebookmarks,
2128 pullop.remote.url(),
2128 pullop.remote.url(),
2129 pullop.gettransaction,
2129 pullop.gettransaction,
2130 explicit=pullop.explicitbookmarks,
2130 explicit=pullop.explicitbookmarks,
2131 )
2131 )
2132
2132
2133
2133
2134 def _pullobsolete(pullop):
2134 def _pullobsolete(pullop):
2135 """utility function to pull obsolete markers from a remote
2135 """utility function to pull obsolete markers from a remote
2136
2136
2137 The `gettransaction` is function that return the pull transaction, creating
2137 The `gettransaction` is function that return the pull transaction, creating
2138 one if necessary. We return the transaction to inform the calling code that
2138 one if necessary. We return the transaction to inform the calling code that
2139 a new transaction have been created (when applicable).
2139 a new transaction have been created (when applicable).
2140
2140
2141 Exists mostly to allow overriding for experimentation purpose"""
2141 Exists mostly to allow overriding for experimentation purpose"""
2142 if b'obsmarkers' in pullop.stepsdone:
2142 if b'obsmarkers' in pullop.stepsdone:
2143 return
2143 return
2144 pullop.stepsdone.add(b'obsmarkers')
2144 pullop.stepsdone.add(b'obsmarkers')
2145 tr = None
2145 tr = None
2146 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2146 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2147 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2147 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2148 remoteobs = listkeys(pullop.remote, b'obsolete')
2148 remoteobs = listkeys(pullop.remote, b'obsolete')
2149 if b'dump0' in remoteobs:
2149 if b'dump0' in remoteobs:
2150 tr = pullop.gettransaction()
2150 tr = pullop.gettransaction()
2151 markers = []
2151 markers = []
2152 for key in sorted(remoteobs, reverse=True):
2152 for key in sorted(remoteobs, reverse=True):
2153 if key.startswith(b'dump'):
2153 if key.startswith(b'dump'):
2154 data = util.b85decode(remoteobs[key])
2154 data = util.b85decode(remoteobs[key])
2155 version, newmarks = obsolete._readmarkers(data)
2155 version, newmarks = obsolete._readmarkers(data)
2156 markers += newmarks
2156 markers += newmarks
2157 if markers:
2157 if markers:
2158 pullop.repo.obsstore.add(tr, markers)
2158 pullop.repo.obsstore.add(tr, markers)
2159 pullop.repo.invalidatevolatilesets()
2159 pullop.repo.invalidatevolatilesets()
2160 return tr
2160 return tr
2161
2161
2162
2162
2163 def applynarrowacl(repo, kwargs):
2163 def applynarrowacl(repo, kwargs):
2164 """Apply narrow fetch access control.
2164 """Apply narrow fetch access control.
2165
2165
2166 This massages the named arguments for getbundle wire protocol commands
2166 This massages the named arguments for getbundle wire protocol commands
2167 so requested data is filtered through access control rules.
2167 so requested data is filtered through access control rules.
2168 """
2168 """
2169 ui = repo.ui
2169 ui = repo.ui
2170 # TODO this assumes existence of HTTP and is a layering violation.
2170 # TODO this assumes existence of HTTP and is a layering violation.
2171 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2171 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2172 user_includes = ui.configlist(
2172 user_includes = ui.configlist(
2173 _NARROWACL_SECTION,
2173 _NARROWACL_SECTION,
2174 username + b'.includes',
2174 username + b'.includes',
2175 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2175 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2176 )
2176 )
2177 user_excludes = ui.configlist(
2177 user_excludes = ui.configlist(
2178 _NARROWACL_SECTION,
2178 _NARROWACL_SECTION,
2179 username + b'.excludes',
2179 username + b'.excludes',
2180 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2180 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2181 )
2181 )
2182 if not user_includes:
2182 if not user_includes:
2183 raise error.Abort(
2183 raise error.Abort(
2184 _(b"%s configuration for user %s is empty")
2184 _(b"%s configuration for user %s is empty")
2185 % (_NARROWACL_SECTION, username)
2185 % (_NARROWACL_SECTION, username)
2186 )
2186 )
2187
2187
2188 user_includes = [
2188 user_includes = [
2189 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2189 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2190 ]
2190 ]
2191 user_excludes = [
2191 user_excludes = [
2192 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2192 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2193 ]
2193 ]
2194
2194
2195 req_includes = set(kwargs.get('includepats', []))
2195 req_includes = set(kwargs.get('includepats', []))
2196 req_excludes = set(kwargs.get('excludepats', []))
2196 req_excludes = set(kwargs.get('excludepats', []))
2197
2197
2198 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2198 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2199 req_includes, req_excludes, user_includes, user_excludes
2199 req_includes, req_excludes, user_includes, user_excludes
2200 )
2200 )
2201
2201
2202 if invalid_includes:
2202 if invalid_includes:
2203 raise error.Abort(
2203 raise error.Abort(
2204 _(b"The following includes are not accessible for %s: %s")
2204 _(b"The following includes are not accessible for %s: %s")
2205 % (username, invalid_includes)
2205 % (username, invalid_includes)
2206 )
2206 )
2207
2207
2208 new_args = {}
2208 new_args = {}
2209 new_args.update(kwargs)
2209 new_args.update(kwargs)
2210 new_args['narrow'] = True
2210 new_args['narrow'] = True
2211 new_args['narrow_acl'] = True
2211 new_args['narrow_acl'] = True
2212 new_args['includepats'] = req_includes
2212 new_args['includepats'] = req_includes
2213 if req_excludes:
2213 if req_excludes:
2214 new_args['excludepats'] = req_excludes
2214 new_args['excludepats'] = req_excludes
2215
2215
2216 return new_args
2216 return new_args
2217
2217
2218
2218
2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 """Compute the shape of a narrowed DAG.
2220 """Compute the shape of a narrowed DAG.
2221
2221
2222 Args:
2222 Args:
2223 repo: The repository we're transferring.
2223 repo: The repository we're transferring.
2224 common: The roots of the DAG range we're transferring.
2224 common: The roots of the DAG range we're transferring.
2225 May be just [nullid], which means all ancestors of heads.
2225 May be just [nullid], which means all ancestors of heads.
2226 heads: The heads of the DAG range we're transferring.
2226 heads: The heads of the DAG range we're transferring.
2227 match: The narrowmatcher that allows us to identify relevant changes.
2227 match: The narrowmatcher that allows us to identify relevant changes.
2228 depth: If not None, only consider nodes to be full nodes if they are at
2228 depth: If not None, only consider nodes to be full nodes if they are at
2229 most depth changesets away from one of heads.
2229 most depth changesets away from one of heads.
2230
2230
2231 Returns:
2231 Returns:
2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233
2233
2234 visitnodes: The list of nodes (either full or ellipsis) which
2234 visitnodes: The list of nodes (either full or ellipsis) which
2235 need to be sent to the client.
2235 need to be sent to the client.
2236 relevant_nodes: The set of changelog nodes which change a file inside
2236 relevant_nodes: The set of changelog nodes which change a file inside
2237 the narrowspec. The client needs these as non-ellipsis nodes.
2237 the narrowspec. The client needs these as non-ellipsis nodes.
2238 ellipsisroots: A dict of {rev: parents} that is used in
2238 ellipsisroots: A dict of {rev: parents} that is used in
2239 narrowchangegroup to produce ellipsis nodes with the
2239 narrowchangegroup to produce ellipsis nodes with the
2240 correct parents.
2240 correct parents.
2241 """
2241 """
2242 cl = repo.changelog
2242 cl = repo.changelog
2243 mfl = repo.manifestlog
2243 mfl = repo.manifestlog
2244
2244
2245 clrev = cl.rev
2245 clrev = cl.rev
2246
2246
2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 headsrevs = {clrev(n) for n in heads}
2248 headsrevs = {clrev(n) for n in heads}
2249
2249
2250 if depth:
2250 if depth:
2251 revdepth = {h: 0 for h in headsrevs}
2251 revdepth = {h: 0 for h in headsrevs}
2252
2252
2253 ellipsisheads = collections.defaultdict(set)
2253 ellipsisheads = collections.defaultdict(set)
2254 ellipsisroots = collections.defaultdict(set)
2254 ellipsisroots = collections.defaultdict(set)
2255
2255
2256 def addroot(head, curchange):
2256 def addroot(head, curchange):
2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 ellipsisroots[head].add(curchange)
2258 ellipsisroots[head].add(curchange)
2259 # Recursively split ellipsis heads with 3 roots by finding the
2259 # Recursively split ellipsis heads with 3 roots by finding the
2260 # roots' youngest common descendant which is an elided merge commit.
2260 # roots' youngest common descendant which is an elided merge commit.
2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 # root of the head.
2262 # root of the head.
2263 while len(ellipsisroots[head]) > 2:
2263 while len(ellipsisroots[head]) > 2:
2264 child, roots = splithead(head)
2264 child, roots = splithead(head)
2265 splitroots(head, child, roots)
2265 splitroots(head, child, roots)
2266 head = child # Recurse in case we just added a 3rd root
2266 head = child # Recurse in case we just added a 3rd root
2267
2267
2268 def splitroots(head, child, roots):
2268 def splitroots(head, child, roots):
2269 ellipsisroots[head].difference_update(roots)
2269 ellipsisroots[head].difference_update(roots)
2270 ellipsisroots[head].add(child)
2270 ellipsisroots[head].add(child)
2271 ellipsisroots[child].update(roots)
2271 ellipsisroots[child].update(roots)
2272 ellipsisroots[child].discard(child)
2272 ellipsisroots[child].discard(child)
2273
2273
2274 def splithead(head):
2274 def splithead(head):
2275 r1, r2, r3 = sorted(ellipsisroots[head])
2275 r1, r2, r3 = sorted(ellipsisroots[head])
2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 mid = repo.revs(
2277 mid = repo.revs(
2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 )
2279 )
2280 for j in mid:
2280 for j in mid:
2281 if j == nr2:
2281 if j == nr2:
2282 return nr2, (nr1, nr2)
2282 return nr2, (nr1, nr2)
2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 return j, (nr1, nr2)
2284 return j, (nr1, nr2)
2285 raise error.Abort(
2285 raise error.Abort(
2286 _(
2286 _(
2287 b'Failed to split up ellipsis node! head: %d, '
2287 b'Failed to split up ellipsis node! head: %d, '
2288 b'roots: %d %d %d'
2288 b'roots: %d %d %d'
2289 )
2289 )
2290 % (head, r1, r2, r3)
2290 % (head, r1, r2, r3)
2291 )
2291 )
2292
2292
2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 visit = reversed(missing)
2294 visit = reversed(missing)
2295 relevant_nodes = set()
2295 relevant_nodes = set()
2296 visitnodes = [cl.node(m) for m in missing]
2296 visitnodes = [cl.node(m) for m in missing]
2297 required = set(headsrevs) | known
2297 required = set(headsrevs) | known
2298 for rev in visit:
2298 for rev in visit:
2299 clrev = cl.changelogrevision(rev)
2299 clrev = cl.changelogrevision(rev)
2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 if depth is not None:
2301 if depth is not None:
2302 curdepth = revdepth[rev]
2302 curdepth = revdepth[rev]
2303 for p in ps:
2303 for p in ps:
2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 needed = False
2305 needed = False
2306 shallow_enough = depth is None or revdepth[rev] <= depth
2306 shallow_enough = depth is None or revdepth[rev] <= depth
2307 if shallow_enough:
2307 if shallow_enough:
2308 curmf = mfl[clrev.manifest].read()
2308 curmf = mfl[clrev.manifest].read()
2309 if ps:
2309 if ps:
2310 # We choose to not trust the changed files list in
2310 # We choose to not trust the changed files list in
2311 # changesets because it's not always correct. TODO: could
2311 # changesets because it's not always correct. TODO: could
2312 # we trust it for the non-merge case?
2312 # we trust it for the non-merge case?
2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 needed = bool(curmf.diff(p1mf, match))
2314 needed = bool(curmf.diff(p1mf, match))
2315 if not needed and len(ps) > 1:
2315 if not needed and len(ps) > 1:
2316 # For merge changes, the list of changed files is not
2316 # For merge changes, the list of changed files is not
2317 # helpful, since we need to emit the merge if a file
2317 # helpful, since we need to emit the merge if a file
2318 # in the narrow spec has changed on either side of the
2318 # in the narrow spec has changed on either side of the
2319 # merge. As a result, we do a manifest diff to check.
2319 # merge. As a result, we do a manifest diff to check.
2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 needed = bool(curmf.diff(p2mf, match))
2321 needed = bool(curmf.diff(p2mf, match))
2322 else:
2322 else:
2323 # For a root node, we need to include the node if any
2323 # For a root node, we need to include the node if any
2324 # files in the node match the narrowspec.
2324 # files in the node match the narrowspec.
2325 needed = any(curmf.walk(match))
2325 needed = any(curmf.walk(match))
2326
2326
2327 if needed:
2327 if needed:
2328 for head in ellipsisheads[rev]:
2328 for head in ellipsisheads[rev]:
2329 addroot(head, rev)
2329 addroot(head, rev)
2330 for p in ps:
2330 for p in ps:
2331 required.add(p)
2331 required.add(p)
2332 relevant_nodes.add(cl.node(rev))
2332 relevant_nodes.add(cl.node(rev))
2333 else:
2333 else:
2334 if not ps:
2334 if not ps:
2335 ps = [nullrev]
2335 ps = [nullrev]
2336 if rev in required:
2336 if rev in required:
2337 for head in ellipsisheads[rev]:
2337 for head in ellipsisheads[rev]:
2338 addroot(head, rev)
2338 addroot(head, rev)
2339 for p in ps:
2339 for p in ps:
2340 ellipsisheads[p].add(rev)
2340 ellipsisheads[p].add(rev)
2341 else:
2341 else:
2342 for p in ps:
2342 for p in ps:
2343 ellipsisheads[p] |= ellipsisheads[rev]
2343 ellipsisheads[p] |= ellipsisheads[rev]
2344
2344
2345 # add common changesets as roots of their reachable ellipsis heads
2345 # add common changesets as roots of their reachable ellipsis heads
2346 for c in commonrevs:
2346 for c in commonrevs:
2347 for head in ellipsisheads[c]:
2347 for head in ellipsisheads[c]:
2348 addroot(head, c)
2348 addroot(head, c)
2349 return visitnodes, relevant_nodes, ellipsisroots
2349 return visitnodes, relevant_nodes, ellipsisroots
2350
2350
2351
2351
2352 def caps20to10(repo, role):
2352 def caps20to10(repo, role):
2353 """return a set with appropriate options to use bundle20 during getbundle"""
2353 """return a set with appropriate options to use bundle20 during getbundle"""
2354 caps = {b'HG20'}
2354 caps = {b'HG20'}
2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 return caps
2357 return caps
2358
2358
2359
2359
2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 getbundle2partsorder = []
2361 getbundle2partsorder = []
2362
2362
2363 # Mapping between step name and function
2363 # Mapping between step name and function
2364 #
2364 #
2365 # This exists to help extensions wrap steps if necessary
2365 # This exists to help extensions wrap steps if necessary
2366 getbundle2partsmapping = {}
2366 getbundle2partsmapping = {}
2367
2367
2368
2368
2369 def getbundle2partsgenerator(stepname, idx=None):
2369 def getbundle2partsgenerator(stepname, idx=None):
2370 """decorator for function generating bundle2 part for getbundle
2370 """decorator for function generating bundle2 part for getbundle
2371
2371
2372 The function is added to the step -> function mapping and appended to the
2372 The function is added to the step -> function mapping and appended to the
2373 list of steps. Beware that decorated functions will be added in order
2373 list of steps. Beware that decorated functions will be added in order
2374 (this may matter).
2374 (this may matter).
2375
2375
2376 You can only use this decorator for new steps, if you want to wrap a step
2376 You can only use this decorator for new steps, if you want to wrap a step
2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378
2378
2379 def dec(func):
2379 def dec(func):
2380 assert stepname not in getbundle2partsmapping
2380 assert stepname not in getbundle2partsmapping
2381 getbundle2partsmapping[stepname] = func
2381 getbundle2partsmapping[stepname] = func
2382 if idx is None:
2382 if idx is None:
2383 getbundle2partsorder.append(stepname)
2383 getbundle2partsorder.append(stepname)
2384 else:
2384 else:
2385 getbundle2partsorder.insert(idx, stepname)
2385 getbundle2partsorder.insert(idx, stepname)
2386 return func
2386 return func
2387
2387
2388 return dec
2388 return dec
2389
2389
2390
2390
2391 def bundle2requested(bundlecaps):
2391 def bundle2requested(bundlecaps):
2392 if bundlecaps is not None:
2392 if bundlecaps is not None:
2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 return False
2394 return False
2395
2395
2396
2396
2397 def getbundlechunks(
2397 def getbundlechunks(
2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 ):
2399 ):
2400 """Return chunks constituting a bundle's raw data.
2400 """Return chunks constituting a bundle's raw data.
2401
2401
2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 passed.
2403 passed.
2404
2404
2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 and an iterator over raw chunks (of varying sizes).
2406 and an iterator over raw chunks (of varying sizes).
2407 """
2407 """
2408 kwargs = pycompat.byteskwargs(kwargs)
2408 kwargs = pycompat.byteskwargs(kwargs)
2409 info = {}
2409 info = {}
2410 usebundle2 = bundle2requested(bundlecaps)
2410 usebundle2 = bundle2requested(bundlecaps)
2411 # bundle10 case
2411 # bundle10 case
2412 if not usebundle2:
2412 if not usebundle2:
2413 if bundlecaps and not kwargs.get(b'cg', True):
2413 if bundlecaps and not kwargs.get(b'cg', True):
2414 raise ValueError(
2414 raise ValueError(
2415 _(b'request for bundle10 must include changegroup')
2415 _(b'request for bundle10 must include changegroup')
2416 )
2416 )
2417
2417
2418 if kwargs:
2418 if kwargs:
2419 raise ValueError(
2419 raise ValueError(
2420 _(b'unsupported getbundle arguments: %s')
2420 _(b'unsupported getbundle arguments: %s')
2421 % b', '.join(sorted(kwargs.keys()))
2421 % b', '.join(sorted(kwargs.keys()))
2422 )
2422 )
2423 outgoing = _computeoutgoing(repo, heads, common)
2423 outgoing = _computeoutgoing(repo, heads, common)
2424 info[b'bundleversion'] = 1
2424 info[b'bundleversion'] = 1
2425 return (
2425 return (
2426 info,
2426 info,
2427 changegroup.makestream(
2427 changegroup.makestream(
2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 ),
2429 ),
2430 )
2430 )
2431
2431
2432 # bundle20 case
2432 # bundle20 case
2433 info[b'bundleversion'] = 2
2433 info[b'bundleversion'] = 2
2434 b2caps = {}
2434 b2caps = {}
2435 for bcaps in bundlecaps:
2435 for bcaps in bundlecaps:
2436 if bcaps.startswith(b'bundle2='):
2436 if bcaps.startswith(b'bundle2='):
2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 b2caps.update(bundle2.decodecaps(blob))
2438 b2caps.update(bundle2.decodecaps(blob))
2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2440
2440
2441 kwargs[b'heads'] = heads
2441 kwargs[b'heads'] = heads
2442 kwargs[b'common'] = common
2442 kwargs[b'common'] = common
2443
2443
2444 for name in getbundle2partsorder:
2444 for name in getbundle2partsorder:
2445 func = getbundle2partsmapping[name]
2445 func = getbundle2partsmapping[name]
2446 func(
2446 func(
2447 bundler,
2447 bundler,
2448 repo,
2448 repo,
2449 source,
2449 source,
2450 bundlecaps=bundlecaps,
2450 bundlecaps=bundlecaps,
2451 b2caps=b2caps,
2451 b2caps=b2caps,
2452 **pycompat.strkwargs(kwargs)
2452 **pycompat.strkwargs(kwargs)
2453 )
2453 )
2454
2454
2455 info[b'prefercompressed'] = bundler.prefercompressed
2455 info[b'prefercompressed'] = bundler.prefercompressed
2456
2456
2457 return info, bundler.getchunks()
2457 return info, bundler.getchunks()
2458
2458
2459
2459
2460 @getbundle2partsgenerator(b'stream2')
2460 @getbundle2partsgenerator(b'stream2')
2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463
2463
2464
2464
2465 @getbundle2partsgenerator(b'changegroup')
2465 @getbundle2partsgenerator(b'changegroup')
2466 def _getbundlechangegrouppart(
2466 def _getbundlechangegrouppart(
2467 bundler,
2467 bundler,
2468 repo,
2468 repo,
2469 source,
2469 source,
2470 bundlecaps=None,
2470 bundlecaps=None,
2471 b2caps=None,
2471 b2caps=None,
2472 heads=None,
2472 heads=None,
2473 common=None,
2473 common=None,
2474 **kwargs
2474 **kwargs
2475 ):
2475 ):
2476 """add a changegroup part to the requested bundle"""
2476 """add a changegroup part to the requested bundle"""
2477 if not kwargs.get('cg', True):
2477 if not kwargs.get('cg', True) or not b2caps:
2478 return
2478 return
2479
2479
2480 version = b'01'
2480 version = b'01'
2481 cgversions = b2caps.get(b'changegroup')
2481 cgversions = b2caps.get(b'changegroup')
2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 cgversions = [
2483 cgversions = [
2484 v
2484 v
2485 for v in cgversions
2485 for v in cgversions
2486 if v in changegroup.supportedoutgoingversions(repo)
2486 if v in changegroup.supportedoutgoingversions(repo)
2487 ]
2487 ]
2488 if not cgversions:
2488 if not cgversions:
2489 raise error.Abort(_(b'no common changegroup version'))
2489 raise error.Abort(_(b'no common changegroup version'))
2490 version = max(cgversions)
2490 version = max(cgversions)
2491
2491
2492 outgoing = _computeoutgoing(repo, heads, common)
2492 outgoing = _computeoutgoing(repo, heads, common)
2493 if not outgoing.missing:
2493 if not outgoing.missing:
2494 return
2494 return
2495
2495
2496 if kwargs.get('narrow', False):
2496 if kwargs.get('narrow', False):
2497 include = sorted(filter(bool, kwargs.get('includepats', [])))
2497 include = sorted(filter(bool, kwargs.get('includepats', [])))
2498 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2498 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 else:
2500 else:
2501 matcher = None
2501 matcher = None
2502
2502
2503 cgstream = changegroup.makestream(
2503 cgstream = changegroup.makestream(
2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 )
2505 )
2506
2506
2507 part = bundler.newpart(b'changegroup', data=cgstream)
2507 part = bundler.newpart(b'changegroup', data=cgstream)
2508 if cgversions:
2508 if cgversions:
2509 part.addparam(b'version', version)
2509 part.addparam(b'version', version)
2510
2510
2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512
2512
2513 if b'treemanifest' in repo.requirements:
2513 if b'treemanifest' in repo.requirements:
2514 part.addparam(b'treemanifest', b'1')
2514 part.addparam(b'treemanifest', b'1')
2515
2515
2516 if b'exp-sidedata-flag' in repo.requirements:
2516 if b'exp-sidedata-flag' in repo.requirements:
2517 part.addparam(b'exp-sidedata', b'1')
2517 part.addparam(b'exp-sidedata', b'1')
2518
2518
2519 if (
2519 if (
2520 kwargs.get('narrow', False)
2520 kwargs.get('narrow', False)
2521 and kwargs.get('narrow_acl', False)
2521 and kwargs.get('narrow_acl', False)
2522 and (include or exclude)
2522 and (include or exclude)
2523 ):
2523 ):
2524 # this is mandatory because otherwise ACL clients won't work
2524 # this is mandatory because otherwise ACL clients won't work
2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 narrowspecpart.data = b'%s\0%s' % (
2526 narrowspecpart.data = b'%s\0%s' % (
2527 b'\n'.join(include),
2527 b'\n'.join(include),
2528 b'\n'.join(exclude),
2528 b'\n'.join(exclude),
2529 )
2529 )
2530
2530
2531
2531
2532 @getbundle2partsgenerator(b'bookmarks')
2532 @getbundle2partsgenerator(b'bookmarks')
2533 def _getbundlebookmarkpart(
2533 def _getbundlebookmarkpart(
2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 ):
2535 ):
2536 """add a bookmark part to the requested bundle"""
2536 """add a bookmark part to the requested bundle"""
2537 if not kwargs.get('bookmarks', False):
2537 if not kwargs.get('bookmarks', False):
2538 return
2538 return
2539 if b'bookmarks' not in b2caps:
2539 if not b2caps or b'bookmarks' not in b2caps:
2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 books = bookmod.listbinbookmarks(repo)
2541 books = bookmod.listbinbookmarks(repo)
2542 data = bookmod.binaryencode(books)
2542 data = bookmod.binaryencode(books)
2543 if data:
2543 if data:
2544 bundler.newpart(b'bookmarks', data=data)
2544 bundler.newpart(b'bookmarks', data=data)
2545
2545
2546
2546
2547 @getbundle2partsgenerator(b'listkeys')
2547 @getbundle2partsgenerator(b'listkeys')
2548 def _getbundlelistkeysparts(
2548 def _getbundlelistkeysparts(
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 ):
2550 ):
2551 """add parts containing listkeys namespaces to the requested bundle"""
2551 """add parts containing listkeys namespaces to the requested bundle"""
2552 listkeys = kwargs.get('listkeys', ())
2552 listkeys = kwargs.get('listkeys', ())
2553 for namespace in listkeys:
2553 for namespace in listkeys:
2554 part = bundler.newpart(b'listkeys')
2554 part = bundler.newpart(b'listkeys')
2555 part.addparam(b'namespace', namespace)
2555 part.addparam(b'namespace', namespace)
2556 keys = repo.listkeys(namespace).items()
2556 keys = repo.listkeys(namespace).items()
2557 part.data = pushkey.encodekeys(keys)
2557 part.data = pushkey.encodekeys(keys)
2558
2558
2559
2559
2560 @getbundle2partsgenerator(b'obsmarkers')
2560 @getbundle2partsgenerator(b'obsmarkers')
2561 def _getbundleobsmarkerpart(
2561 def _getbundleobsmarkerpart(
2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 ):
2563 ):
2564 """add an obsolescence markers part to the requested bundle"""
2564 """add an obsolescence markers part to the requested bundle"""
2565 if kwargs.get('obsmarkers', False):
2565 if kwargs.get('obsmarkers', False):
2566 if heads is None:
2566 if heads is None:
2567 heads = repo.heads()
2567 heads = repo.heads()
2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 markers = repo.obsstore.relevantmarkers(subset)
2569 markers = repo.obsstore.relevantmarkers(subset)
2570 markers = obsutil.sortedmarkers(markers)
2570 markers = obsutil.sortedmarkers(markers)
2571 bundle2.buildobsmarkerspart(bundler, markers)
2571 bundle2.buildobsmarkerspart(bundler, markers)
2572
2572
2573
2573
2574 @getbundle2partsgenerator(b'phases')
2574 @getbundle2partsgenerator(b'phases')
2575 def _getbundlephasespart(
2575 def _getbundlephasespart(
2576 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2576 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 ):
2577 ):
2578 """add phase heads part to the requested bundle"""
2578 """add phase heads part to the requested bundle"""
2579 if kwargs.get('phases', False):
2579 if kwargs.get('phases', False):
2580 if not b'heads' in b2caps.get(b'phases'):
2580 if not b2caps or not b'heads' in b2caps.get(b'phases'):
2581 raise error.Abort(_(b'no common phases exchange method'))
2581 raise error.Abort(_(b'no common phases exchange method'))
2582 if heads is None:
2582 if heads is None:
2583 heads = repo.heads()
2583 heads = repo.heads()
2584
2584
2585 headsbyphase = collections.defaultdict(set)
2585 headsbyphase = collections.defaultdict(set)
2586 if repo.publishing():
2586 if repo.publishing():
2587 headsbyphase[phases.public] = heads
2587 headsbyphase[phases.public] = heads
2588 else:
2588 else:
2589 # find the appropriate heads to move
2589 # find the appropriate heads to move
2590
2590
2591 phase = repo._phasecache.phase
2591 phase = repo._phasecache.phase
2592 node = repo.changelog.node
2592 node = repo.changelog.node
2593 rev = repo.changelog.rev
2593 rev = repo.changelog.rev
2594 for h in heads:
2594 for h in heads:
2595 headsbyphase[phase(repo, rev(h))].add(h)
2595 headsbyphase[phase(repo, rev(h))].add(h)
2596 seenphases = list(headsbyphase.keys())
2596 seenphases = list(headsbyphase.keys())
2597
2597
2598 # We do not handle anything but public and draft phase for now)
2598 # We do not handle anything but public and draft phase for now)
2599 if seenphases:
2599 if seenphases:
2600 assert max(seenphases) <= phases.draft
2600 assert max(seenphases) <= phases.draft
2601
2601
2602 # if client is pulling non-public changesets, we need to find
2602 # if client is pulling non-public changesets, we need to find
2603 # intermediate public heads.
2603 # intermediate public heads.
2604 draftheads = headsbyphase.get(phases.draft, set())
2604 draftheads = headsbyphase.get(phases.draft, set())
2605 if draftheads:
2605 if draftheads:
2606 publicheads = headsbyphase.get(phases.public, set())
2606 publicheads = headsbyphase.get(phases.public, set())
2607
2607
2608 revset = b'heads(only(%ln, %ln) and public())'
2608 revset = b'heads(only(%ln, %ln) and public())'
2609 extraheads = repo.revs(revset, draftheads, publicheads)
2609 extraheads = repo.revs(revset, draftheads, publicheads)
2610 for r in extraheads:
2610 for r in extraheads:
2611 headsbyphase[phases.public].add(node(r))
2611 headsbyphase[phases.public].add(node(r))
2612
2612
2613 # transform data in a format used by the encoding function
2613 # transform data in a format used by the encoding function
2614 phasemapping = []
2614 phasemapping = []
2615 for phase in phases.allphases:
2615 for phase in phases.allphases:
2616 phasemapping.append(sorted(headsbyphase[phase]))
2616 phasemapping.append(sorted(headsbyphase[phase]))
2617
2617
2618 # generate the actual part
2618 # generate the actual part
2619 phasedata = phases.binaryencode(phasemapping)
2619 phasedata = phases.binaryencode(phasemapping)
2620 bundler.newpart(b'phase-heads', data=phasedata)
2620 bundler.newpart(b'phase-heads', data=phasedata)
2621
2621
2622
2622
2623 @getbundle2partsgenerator(b'hgtagsfnodes')
2623 @getbundle2partsgenerator(b'hgtagsfnodes')
2624 def _getbundletagsfnodes(
2624 def _getbundletagsfnodes(
2625 bundler,
2625 bundler,
2626 repo,
2626 repo,
2627 source,
2627 source,
2628 bundlecaps=None,
2628 bundlecaps=None,
2629 b2caps=None,
2629 b2caps=None,
2630 heads=None,
2630 heads=None,
2631 common=None,
2631 common=None,
2632 **kwargs
2632 **kwargs
2633 ):
2633 ):
2634 """Transfer the .hgtags filenodes mapping.
2634 """Transfer the .hgtags filenodes mapping.
2635
2635
2636 Only values for heads in this bundle will be transferred.
2636 Only values for heads in this bundle will be transferred.
2637
2637
2638 The part data consists of pairs of 20 byte changeset node and .hgtags
2638 The part data consists of pairs of 20 byte changeset node and .hgtags
2639 filenodes raw values.
2639 filenodes raw values.
2640 """
2640 """
2641 # Don't send unless:
2641 # Don't send unless:
2642 # - changeset are being exchanged,
2642 # - changeset are being exchanged,
2643 # - the client supports it.
2643 # - the client supports it.
2644 if not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2644 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2645 return
2645 return
2646
2646
2647 outgoing = _computeoutgoing(repo, heads, common)
2647 outgoing = _computeoutgoing(repo, heads, common)
2648 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2648 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2649
2649
2650
2650
2651 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2651 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2652 def _getbundlerevbranchcache(
2652 def _getbundlerevbranchcache(
2653 bundler,
2653 bundler,
2654 repo,
2654 repo,
2655 source,
2655 source,
2656 bundlecaps=None,
2656 bundlecaps=None,
2657 b2caps=None,
2657 b2caps=None,
2658 heads=None,
2658 heads=None,
2659 common=None,
2659 common=None,
2660 **kwargs
2660 **kwargs
2661 ):
2661 ):
2662 """Transfer the rev-branch-cache mapping
2662 """Transfer the rev-branch-cache mapping
2663
2663
2664 The payload is a series of data related to each branch
2664 The payload is a series of data related to each branch
2665
2665
2666 1) branch name length
2666 1) branch name length
2667 2) number of open heads
2667 2) number of open heads
2668 3) number of closed heads
2668 3) number of closed heads
2669 4) open heads nodes
2669 4) open heads nodes
2670 5) closed heads nodes
2670 5) closed heads nodes
2671 """
2671 """
2672 # Don't send unless:
2672 # Don't send unless:
2673 # - changeset are being exchanged,
2673 # - changeset are being exchanged,
2674 # - the client supports it.
2674 # - the client supports it.
2675 # - narrow bundle isn't in play (not currently compatible).
2675 # - narrow bundle isn't in play (not currently compatible).
2676 if (
2676 if (
2677 not kwargs.get('cg', True)
2677 not kwargs.get('cg', True)
2678 or not b2caps
2678 or b'rev-branch-cache' not in b2caps
2679 or b'rev-branch-cache' not in b2caps
2679 or kwargs.get('narrow', False)
2680 or kwargs.get('narrow', False)
2680 or repo.ui.has_section(_NARROWACL_SECTION)
2681 or repo.ui.has_section(_NARROWACL_SECTION)
2681 ):
2682 ):
2682 return
2683 return
2683
2684
2684 outgoing = _computeoutgoing(repo, heads, common)
2685 outgoing = _computeoutgoing(repo, heads, common)
2685 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686
2687
2687
2688
2688 def check_heads(repo, their_heads, context):
2689 def check_heads(repo, their_heads, context):
2689 """check if the heads of a repo have been modified
2690 """check if the heads of a repo have been modified
2690
2691
2691 Used by peer for unbundling.
2692 Used by peer for unbundling.
2692 """
2693 """
2693 heads = repo.heads()
2694 heads = repo.heads()
2694 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 if not (
2696 if not (
2696 their_heads == [b'force']
2697 their_heads == [b'force']
2697 or their_heads == heads
2698 or their_heads == heads
2698 or their_heads == [b'hashed', heads_hash]
2699 or their_heads == [b'hashed', heads_hash]
2699 ):
2700 ):
2700 # someone else committed/pushed/unbundled while we
2701 # someone else committed/pushed/unbundled while we
2701 # were transferring data
2702 # were transferring data
2702 raise error.PushRaced(
2703 raise error.PushRaced(
2703 b'repository changed while %s - please try again' % context
2704 b'repository changed while %s - please try again' % context
2704 )
2705 )
2705
2706
2706
2707
2707 def unbundle(repo, cg, heads, source, url):
2708 def unbundle(repo, cg, heads, source, url):
2708 """Apply a bundle to a repo.
2709 """Apply a bundle to a repo.
2709
2710
2710 this function makes sure the repo is locked during the application and have
2711 this function makes sure the repo is locked during the application and have
2711 mechanism to check that no push race occurred between the creation of the
2712 mechanism to check that no push race occurred between the creation of the
2712 bundle and its application.
2713 bundle and its application.
2713
2714
2714 If the push was raced as PushRaced exception is raised."""
2715 If the push was raced as PushRaced exception is raised."""
2715 r = 0
2716 r = 0
2716 # need a transaction when processing a bundle2 stream
2717 # need a transaction when processing a bundle2 stream
2717 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 lockandtr = [None, None, None]
2719 lockandtr = [None, None, None]
2719 recordout = None
2720 recordout = None
2720 # quick fix for output mismatch with bundle2 in 3.4
2721 # quick fix for output mismatch with bundle2 in 3.4
2721 captureoutput = repo.ui.configbool(
2722 captureoutput = repo.ui.configbool(
2722 b'experimental', b'bundle2-output-capture'
2723 b'experimental', b'bundle2-output-capture'
2723 )
2724 )
2724 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 captureoutput = True
2726 captureoutput = True
2726 try:
2727 try:
2727 # note: outside bundle1, 'heads' is expected to be empty and this
2728 # note: outside bundle1, 'heads' is expected to be empty and this
2728 # 'check_heads' call wil be a no-op
2729 # 'check_heads' call wil be a no-op
2729 check_heads(repo, heads, b'uploading changes')
2730 check_heads(repo, heads, b'uploading changes')
2730 # push can proceed
2731 # push can proceed
2731 if not isinstance(cg, bundle2.unbundle20):
2732 if not isinstance(cg, bundle2.unbundle20):
2732 # legacy case: bundle1 (changegroup 01)
2733 # legacy case: bundle1 (changegroup 01)
2733 txnname = b"\n".join([source, util.hidepassword(url)])
2734 txnname = b"\n".join([source, util.hidepassword(url)])
2734 with repo.lock(), repo.transaction(txnname) as tr:
2735 with repo.lock(), repo.transaction(txnname) as tr:
2735 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 r = bundle2.combinechangegroupresults(op)
2737 r = bundle2.combinechangegroupresults(op)
2737 else:
2738 else:
2738 r = None
2739 r = None
2739 try:
2740 try:
2740
2741
2741 def gettransaction():
2742 def gettransaction():
2742 if not lockandtr[2]:
2743 if not lockandtr[2]:
2743 if not bookmod.bookmarksinstore(repo):
2744 if not bookmod.bookmarksinstore(repo):
2744 lockandtr[0] = repo.wlock()
2745 lockandtr[0] = repo.wlock()
2745 lockandtr[1] = repo.lock()
2746 lockandtr[1] = repo.lock()
2746 lockandtr[2] = repo.transaction(source)
2747 lockandtr[2] = repo.transaction(source)
2747 lockandtr[2].hookargs[b'source'] = source
2748 lockandtr[2].hookargs[b'source'] = source
2748 lockandtr[2].hookargs[b'url'] = url
2749 lockandtr[2].hookargs[b'url'] = url
2749 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 return lockandtr[2]
2751 return lockandtr[2]
2751
2752
2752 # Do greedy locking by default until we're satisfied with lazy
2753 # Do greedy locking by default until we're satisfied with lazy
2753 # locking.
2754 # locking.
2754 if not repo.ui.configbool(
2755 if not repo.ui.configbool(
2755 b'experimental', b'bundle2lazylocking'
2756 b'experimental', b'bundle2lazylocking'
2756 ):
2757 ):
2757 gettransaction()
2758 gettransaction()
2758
2759
2759 op = bundle2.bundleoperation(
2760 op = bundle2.bundleoperation(
2760 repo,
2761 repo,
2761 gettransaction,
2762 gettransaction,
2762 captureoutput=captureoutput,
2763 captureoutput=captureoutput,
2763 source=b'push',
2764 source=b'push',
2764 )
2765 )
2765 try:
2766 try:
2766 op = bundle2.processbundle(repo, cg, op=op)
2767 op = bundle2.processbundle(repo, cg, op=op)
2767 finally:
2768 finally:
2768 r = op.reply
2769 r = op.reply
2769 if captureoutput and r is not None:
2770 if captureoutput and r is not None:
2770 repo.ui.pushbuffer(error=True, subproc=True)
2771 repo.ui.pushbuffer(error=True, subproc=True)
2771
2772
2772 def recordout(output):
2773 def recordout(output):
2773 r.newpart(b'output', data=output, mandatory=False)
2774 r.newpart(b'output', data=output, mandatory=False)
2774
2775
2775 if lockandtr[2] is not None:
2776 if lockandtr[2] is not None:
2776 lockandtr[2].close()
2777 lockandtr[2].close()
2777 except BaseException as exc:
2778 except BaseException as exc:
2778 exc.duringunbundle2 = True
2779 exc.duringunbundle2 = True
2779 if captureoutput and r is not None:
2780 if captureoutput and r is not None:
2780 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781
2782
2782 def recordout(output):
2783 def recordout(output):
2783 part = bundle2.bundlepart(
2784 part = bundle2.bundlepart(
2784 b'output', data=output, mandatory=False
2785 b'output', data=output, mandatory=False
2785 )
2786 )
2786 parts.append(part)
2787 parts.append(part)
2787
2788
2788 raise
2789 raise
2789 finally:
2790 finally:
2790 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 if recordout is not None:
2792 if recordout is not None:
2792 recordout(repo.ui.popbuffer())
2793 recordout(repo.ui.popbuffer())
2793 return r
2794 return r
2794
2795
2795
2796
2796 def _maybeapplyclonebundle(pullop):
2797 def _maybeapplyclonebundle(pullop):
2797 """Apply a clone bundle from a remote, if possible."""
2798 """Apply a clone bundle from a remote, if possible."""
2798
2799
2799 repo = pullop.repo
2800 repo = pullop.repo
2800 remote = pullop.remote
2801 remote = pullop.remote
2801
2802
2802 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 return
2804 return
2804
2805
2805 # Only run if local repo is empty.
2806 # Only run if local repo is empty.
2806 if len(repo):
2807 if len(repo):
2807 return
2808 return
2808
2809
2809 if pullop.heads:
2810 if pullop.heads:
2810 return
2811 return
2811
2812
2812 if not remote.capable(b'clonebundles'):
2813 if not remote.capable(b'clonebundles'):
2813 return
2814 return
2814
2815
2815 with remote.commandexecutor() as e:
2816 with remote.commandexecutor() as e:
2816 res = e.callcommand(b'clonebundles', {}).result()
2817 res = e.callcommand(b'clonebundles', {}).result()
2817
2818
2818 # If we call the wire protocol command, that's good enough to record the
2819 # If we call the wire protocol command, that's good enough to record the
2819 # attempt.
2820 # attempt.
2820 pullop.clonebundleattempted = True
2821 pullop.clonebundleattempted = True
2821
2822
2822 entries = parseclonebundlesmanifest(repo, res)
2823 entries = parseclonebundlesmanifest(repo, res)
2823 if not entries:
2824 if not entries:
2824 repo.ui.note(
2825 repo.ui.note(
2825 _(
2826 _(
2826 b'no clone bundles available on remote; '
2827 b'no clone bundles available on remote; '
2827 b'falling back to regular clone\n'
2828 b'falling back to regular clone\n'
2828 )
2829 )
2829 )
2830 )
2830 return
2831 return
2831
2832
2832 entries = filterclonebundleentries(
2833 entries = filterclonebundleentries(
2833 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 )
2835 )
2835
2836
2836 if not entries:
2837 if not entries:
2837 # There is a thundering herd concern here. However, if a server
2838 # There is a thundering herd concern here. However, if a server
2838 # operator doesn't advertise bundles appropriate for its clients,
2839 # operator doesn't advertise bundles appropriate for its clients,
2839 # they deserve what's coming. Furthermore, from a client's
2840 # they deserve what's coming. Furthermore, from a client's
2840 # perspective, no automatic fallback would mean not being able to
2841 # perspective, no automatic fallback would mean not being able to
2841 # clone!
2842 # clone!
2842 repo.ui.warn(
2843 repo.ui.warn(
2843 _(
2844 _(
2844 b'no compatible clone bundles available on server; '
2845 b'no compatible clone bundles available on server; '
2845 b'falling back to regular clone\n'
2846 b'falling back to regular clone\n'
2846 )
2847 )
2847 )
2848 )
2848 repo.ui.warn(
2849 repo.ui.warn(
2849 _(b'(you may want to report this to the server operator)\n')
2850 _(b'(you may want to report this to the server operator)\n')
2850 )
2851 )
2851 return
2852 return
2852
2853
2853 entries = sortclonebundleentries(repo.ui, entries)
2854 entries = sortclonebundleentries(repo.ui, entries)
2854
2855
2855 url = entries[0][b'URL']
2856 url = entries[0][b'URL']
2856 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 if trypullbundlefromurl(repo.ui, repo, url):
2858 if trypullbundlefromurl(repo.ui, repo, url):
2858 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 # Bundle failed.
2860 # Bundle failed.
2860 #
2861 #
2861 # We abort by default to avoid the thundering herd of
2862 # We abort by default to avoid the thundering herd of
2862 # clients flooding a server that was expecting expensive
2863 # clients flooding a server that was expecting expensive
2863 # clone load to be offloaded.
2864 # clone load to be offloaded.
2864 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 else:
2867 else:
2867 raise error.Abort(
2868 raise error.Abort(
2868 _(b'error applying bundle'),
2869 _(b'error applying bundle'),
2869 hint=_(
2870 hint=_(
2870 b'if this error persists, consider contacting '
2871 b'if this error persists, consider contacting '
2871 b'the server operator or disable clone '
2872 b'the server operator or disable clone '
2872 b'bundles via '
2873 b'bundles via '
2873 b'"--config ui.clonebundles=false"'
2874 b'"--config ui.clonebundles=false"'
2874 ),
2875 ),
2875 )
2876 )
2876
2877
2877
2878
2878 def parseclonebundlesmanifest(repo, s):
2879 def parseclonebundlesmanifest(repo, s):
2879 """Parses the raw text of a clone bundles manifest.
2880 """Parses the raw text of a clone bundles manifest.
2880
2881
2881 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 to the URL and other keys are the attributes for the entry.
2883 to the URL and other keys are the attributes for the entry.
2883 """
2884 """
2884 m = []
2885 m = []
2885 for line in s.splitlines():
2886 for line in s.splitlines():
2886 fields = line.split()
2887 fields = line.split()
2887 if not fields:
2888 if not fields:
2888 continue
2889 continue
2889 attrs = {b'URL': fields[0]}
2890 attrs = {b'URL': fields[0]}
2890 for rawattr in fields[1:]:
2891 for rawattr in fields[1:]:
2891 key, value = rawattr.split(b'=', 1)
2892 key, value = rawattr.split(b'=', 1)
2892 key = urlreq.unquote(key)
2893 key = urlreq.unquote(key)
2893 value = urlreq.unquote(value)
2894 value = urlreq.unquote(value)
2894 attrs[key] = value
2895 attrs[key] = value
2895
2896
2896 # Parse BUNDLESPEC into components. This makes client-side
2897 # Parse BUNDLESPEC into components. This makes client-side
2897 # preferences easier to specify since you can prefer a single
2898 # preferences easier to specify since you can prefer a single
2898 # component of the BUNDLESPEC.
2899 # component of the BUNDLESPEC.
2899 if key == b'BUNDLESPEC':
2900 if key == b'BUNDLESPEC':
2900 try:
2901 try:
2901 bundlespec = parsebundlespec(repo, value)
2902 bundlespec = parsebundlespec(repo, value)
2902 attrs[b'COMPRESSION'] = bundlespec.compression
2903 attrs[b'COMPRESSION'] = bundlespec.compression
2903 attrs[b'VERSION'] = bundlespec.version
2904 attrs[b'VERSION'] = bundlespec.version
2904 except error.InvalidBundleSpecification:
2905 except error.InvalidBundleSpecification:
2905 pass
2906 pass
2906 except error.UnsupportedBundleSpecification:
2907 except error.UnsupportedBundleSpecification:
2907 pass
2908 pass
2908
2909
2909 m.append(attrs)
2910 m.append(attrs)
2910
2911
2911 return m
2912 return m
2912
2913
2913
2914
2914 def isstreamclonespec(bundlespec):
2915 def isstreamclonespec(bundlespec):
2915 # Stream clone v1
2916 # Stream clone v1
2916 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 return True
2918 return True
2918
2919
2919 # Stream clone v2
2920 # Stream clone v2
2920 if (
2921 if (
2921 bundlespec.wirecompression == b'UN'
2922 bundlespec.wirecompression == b'UN'
2922 and bundlespec.wireversion == b'02'
2923 and bundlespec.wireversion == b'02'
2923 and bundlespec.contentopts.get(b'streamv2')
2924 and bundlespec.contentopts.get(b'streamv2')
2924 ):
2925 ):
2925 return True
2926 return True
2926
2927
2927 return False
2928 return False
2928
2929
2929
2930
2930 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 """Remove incompatible clone bundle manifest entries.
2932 """Remove incompatible clone bundle manifest entries.
2932
2933
2933 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 and returns a new list consisting of only the entries that this client
2935 and returns a new list consisting of only the entries that this client
2935 should be able to apply.
2936 should be able to apply.
2936
2937
2937 There is no guarantee we'll be able to apply all returned entries because
2938 There is no guarantee we'll be able to apply all returned entries because
2938 the metadata we use to filter on may be missing or wrong.
2939 the metadata we use to filter on may be missing or wrong.
2939 """
2940 """
2940 newentries = []
2941 newentries = []
2941 for entry in entries:
2942 for entry in entries:
2942 spec = entry.get(b'BUNDLESPEC')
2943 spec = entry.get(b'BUNDLESPEC')
2943 if spec:
2944 if spec:
2944 try:
2945 try:
2945 bundlespec = parsebundlespec(repo, spec, strict=True)
2946 bundlespec = parsebundlespec(repo, spec, strict=True)
2946
2947
2947 # If a stream clone was requested, filter out non-streamclone
2948 # If a stream clone was requested, filter out non-streamclone
2948 # entries.
2949 # entries.
2949 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 repo.ui.debug(
2951 repo.ui.debug(
2951 b'filtering %s because not a stream clone\n'
2952 b'filtering %s because not a stream clone\n'
2952 % entry[b'URL']
2953 % entry[b'URL']
2953 )
2954 )
2954 continue
2955 continue
2955
2956
2956 except error.InvalidBundleSpecification as e:
2957 except error.InvalidBundleSpecification as e:
2957 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 continue
2959 continue
2959 except error.UnsupportedBundleSpecification as e:
2960 except error.UnsupportedBundleSpecification as e:
2960 repo.ui.debug(
2961 repo.ui.debug(
2961 b'filtering %s because unsupported bundle '
2962 b'filtering %s because unsupported bundle '
2962 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 )
2964 )
2964 continue
2965 continue
2965 # If we don't have a spec and requested a stream clone, we don't know
2966 # If we don't have a spec and requested a stream clone, we don't know
2966 # what the entry is so don't attempt to apply it.
2967 # what the entry is so don't attempt to apply it.
2967 elif streamclonerequested:
2968 elif streamclonerequested:
2968 repo.ui.debug(
2969 repo.ui.debug(
2969 b'filtering %s because cannot determine if a stream '
2970 b'filtering %s because cannot determine if a stream '
2970 b'clone bundle\n' % entry[b'URL']
2971 b'clone bundle\n' % entry[b'URL']
2971 )
2972 )
2972 continue
2973 continue
2973
2974
2974 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 repo.ui.debug(
2976 repo.ui.debug(
2976 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 )
2978 )
2978 continue
2979 continue
2979
2980
2980 newentries.append(entry)
2981 newentries.append(entry)
2981
2982
2982 return newentries
2983 return newentries
2983
2984
2984
2985
2985 class clonebundleentry(object):
2986 class clonebundleentry(object):
2986 """Represents an item in a clone bundles manifest.
2987 """Represents an item in a clone bundles manifest.
2987
2988
2988 This rich class is needed to support sorting since sorted() in Python 3
2989 This rich class is needed to support sorting since sorted() in Python 3
2989 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 won't work.
2991 won't work.
2991 """
2992 """
2992
2993
2993 def __init__(self, value, prefers):
2994 def __init__(self, value, prefers):
2994 self.value = value
2995 self.value = value
2995 self.prefers = prefers
2996 self.prefers = prefers
2996
2997
2997 def _cmp(self, other):
2998 def _cmp(self, other):
2998 for prefkey, prefvalue in self.prefers:
2999 for prefkey, prefvalue in self.prefers:
2999 avalue = self.value.get(prefkey)
3000 avalue = self.value.get(prefkey)
3000 bvalue = other.value.get(prefkey)
3001 bvalue = other.value.get(prefkey)
3001
3002
3002 # Special case for b missing attribute and a matches exactly.
3003 # Special case for b missing attribute and a matches exactly.
3003 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 return -1
3005 return -1
3005
3006
3006 # Special case for a missing attribute and b matches exactly.
3007 # Special case for a missing attribute and b matches exactly.
3007 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 return 1
3009 return 1
3009
3010
3010 # We can't compare unless attribute present on both.
3011 # We can't compare unless attribute present on both.
3011 if avalue is None or bvalue is None:
3012 if avalue is None or bvalue is None:
3012 continue
3013 continue
3013
3014
3014 # Same values should fall back to next attribute.
3015 # Same values should fall back to next attribute.
3015 if avalue == bvalue:
3016 if avalue == bvalue:
3016 continue
3017 continue
3017
3018
3018 # Exact matches come first.
3019 # Exact matches come first.
3019 if avalue == prefvalue:
3020 if avalue == prefvalue:
3020 return -1
3021 return -1
3021 if bvalue == prefvalue:
3022 if bvalue == prefvalue:
3022 return 1
3023 return 1
3023
3024
3024 # Fall back to next attribute.
3025 # Fall back to next attribute.
3025 continue
3026 continue
3026
3027
3027 # If we got here we couldn't sort by attributes and prefers. Fall
3028 # If we got here we couldn't sort by attributes and prefers. Fall
3028 # back to index order.
3029 # back to index order.
3029 return 0
3030 return 0
3030
3031
3031 def __lt__(self, other):
3032 def __lt__(self, other):
3032 return self._cmp(other) < 0
3033 return self._cmp(other) < 0
3033
3034
3034 def __gt__(self, other):
3035 def __gt__(self, other):
3035 return self._cmp(other) > 0
3036 return self._cmp(other) > 0
3036
3037
3037 def __eq__(self, other):
3038 def __eq__(self, other):
3038 return self._cmp(other) == 0
3039 return self._cmp(other) == 0
3039
3040
3040 def __le__(self, other):
3041 def __le__(self, other):
3041 return self._cmp(other) <= 0
3042 return self._cmp(other) <= 0
3042
3043
3043 def __ge__(self, other):
3044 def __ge__(self, other):
3044 return self._cmp(other) >= 0
3045 return self._cmp(other) >= 0
3045
3046
3046 def __ne__(self, other):
3047 def __ne__(self, other):
3047 return self._cmp(other) != 0
3048 return self._cmp(other) != 0
3048
3049
3049
3050
3050 def sortclonebundleentries(ui, entries):
3051 def sortclonebundleentries(ui, entries):
3051 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 if not prefers:
3053 if not prefers:
3053 return list(entries)
3054 return list(entries)
3054
3055
3055 prefers = [p.split(b'=', 1) for p in prefers]
3056 prefers = [p.split(b'=', 1) for p in prefers]
3056
3057
3057 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 return [i.value for i in items]
3059 return [i.value for i in items]
3059
3060
3060
3061
3061 def trypullbundlefromurl(ui, repo, url):
3062 def trypullbundlefromurl(ui, repo, url):
3062 """Attempt to apply a bundle from a URL."""
3063 """Attempt to apply a bundle from a URL."""
3063 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 try:
3065 try:
3065 fh = urlmod.open(ui, url)
3066 fh = urlmod.open(ui, url)
3066 cg = readbundle(ui, fh, b'stream')
3067 cg = readbundle(ui, fh, b'stream')
3067
3068
3068 if isinstance(cg, streamclone.streamcloneapplier):
3069 if isinstance(cg, streamclone.streamcloneapplier):
3069 cg.apply(repo)
3070 cg.apply(repo)
3070 else:
3071 else:
3071 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 return True
3073 return True
3073 except urlerr.httperror as e:
3074 except urlerr.httperror as e:
3074 ui.warn(
3075 ui.warn(
3075 _(b'HTTP error fetching bundle: %s\n')
3076 _(b'HTTP error fetching bundle: %s\n')
3076 % stringutil.forcebytestr(e)
3077 % stringutil.forcebytestr(e)
3077 )
3078 )
3078 except urlerr.urlerror as e:
3079 except urlerr.urlerror as e:
3079 ui.warn(
3080 ui.warn(
3080 _(b'error fetching bundle: %s\n')
3081 _(b'error fetching bundle: %s\n')
3081 % stringutil.forcebytestr(e.reason)
3082 % stringutil.forcebytestr(e.reason)
3082 )
3083 )
3083
3084
3084 return False
3085 return False
General Comments 0
You need to be logged in to leave comments. Login now