##// END OF EJS Templates
py3: fix sorting of obsolete markers when building bundle...
Denis Laxalde -
r43438:01e8eefd default
parent child Browse files
Show More
@@ -1,3084 +1,3085
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 phases,
31 phases,
32 pushkey,
32 pushkey,
33 pycompat,
33 pycompat,
34 scmutil,
34 scmutil,
35 sslutil,
35 sslutil,
36 streamclone,
36 streamclone,
37 url as urlmod,
37 url as urlmod,
38 util,
38 util,
39 wireprototypes,
39 wireprototypes,
40 )
40 )
41 from .interfaces import repository
41 from .interfaces import repository
42 from .utils import stringutil
42 from .utils import stringutil
43
43
44 urlerr = util.urlerr
44 urlerr = util.urlerr
45 urlreq = util.urlreq
45 urlreq = util.urlreq
46
46
47 _NARROWACL_SECTION = b'narrowacl'
47 _NARROWACL_SECTION = b'narrowacl'
48
48
49 # Maps bundle version human names to changegroup versions.
49 # Maps bundle version human names to changegroup versions.
50 _bundlespeccgversions = {
50 _bundlespeccgversions = {
51 b'v1': b'01',
51 b'v1': b'01',
52 b'v2': b'02',
52 b'v2': b'02',
53 b'packed1': b's1',
53 b'packed1': b's1',
54 b'bundle2': b'02', # legacy
54 b'bundle2': b'02', # legacy
55 }
55 }
56
56
57 # Maps bundle version with content opts to choose which part to bundle
57 # Maps bundle version with content opts to choose which part to bundle
58 _bundlespeccontentopts = {
58 _bundlespeccontentopts = {
59 b'v1': {
59 b'v1': {
60 b'changegroup': True,
60 b'changegroup': True,
61 b'cg.version': b'01',
61 b'cg.version': b'01',
62 b'obsolescence': False,
62 b'obsolescence': False,
63 b'phases': False,
63 b'phases': False,
64 b'tagsfnodescache': False,
64 b'tagsfnodescache': False,
65 b'revbranchcache': False,
65 b'revbranchcache': False,
66 },
66 },
67 b'v2': {
67 b'v2': {
68 b'changegroup': True,
68 b'changegroup': True,
69 b'cg.version': b'02',
69 b'cg.version': b'02',
70 b'obsolescence': False,
70 b'obsolescence': False,
71 b'phases': False,
71 b'phases': False,
72 b'tagsfnodescache': True,
72 b'tagsfnodescache': True,
73 b'revbranchcache': True,
73 b'revbranchcache': True,
74 },
74 },
75 b'packed1': {b'cg.version': b's1'},
75 b'packed1': {b'cg.version': b's1'},
76 }
76 }
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78
78
79 _bundlespecvariants = {
79 _bundlespecvariants = {
80 b"streamv2": {
80 b"streamv2": {
81 b"changegroup": False,
81 b"changegroup": False,
82 b"streamv2": True,
82 b"streamv2": True,
83 b"tagsfnodescache": False,
83 b"tagsfnodescache": False,
84 b"revbranchcache": False,
84 b"revbranchcache": False,
85 }
85 }
86 }
86 }
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90
90
91
91
92 @attr.s
92 @attr.s
93 class bundlespec(object):
93 class bundlespec(object):
94 compression = attr.ib()
94 compression = attr.ib()
95 wirecompression = attr.ib()
95 wirecompression = attr.ib()
96 version = attr.ib()
96 version = attr.ib()
97 wireversion = attr.ib()
97 wireversion = attr.ib()
98 params = attr.ib()
98 params = attr.ib()
99 contentopts = attr.ib()
99 contentopts = attr.ib()
100
100
101
101
102 def parsebundlespec(repo, spec, strict=True):
102 def parsebundlespec(repo, spec, strict=True):
103 """Parse a bundle string specification into parts.
103 """Parse a bundle string specification into parts.
104
104
105 Bundle specifications denote a well-defined bundle/exchange format.
105 Bundle specifications denote a well-defined bundle/exchange format.
106 The content of a given specification should not change over time in
106 The content of a given specification should not change over time in
107 order to ensure that bundles produced by a newer version of Mercurial are
107 order to ensure that bundles produced by a newer version of Mercurial are
108 readable from an older version.
108 readable from an older version.
109
109
110 The string currently has the form:
110 The string currently has the form:
111
111
112 <compression>-<type>[;<parameter0>[;<parameter1>]]
112 <compression>-<type>[;<parameter0>[;<parameter1>]]
113
113
114 Where <compression> is one of the supported compression formats
114 Where <compression> is one of the supported compression formats
115 and <type> is (currently) a version string. A ";" can follow the type and
115 and <type> is (currently) a version string. A ";" can follow the type and
116 all text afterwards is interpreted as URI encoded, ";" delimited key=value
116 all text afterwards is interpreted as URI encoded, ";" delimited key=value
117 pairs.
117 pairs.
118
118
119 If ``strict`` is True (the default) <compression> is required. Otherwise,
119 If ``strict`` is True (the default) <compression> is required. Otherwise,
120 it is optional.
120 it is optional.
121
121
122 Returns a bundlespec object of (compression, version, parameters).
122 Returns a bundlespec object of (compression, version, parameters).
123 Compression will be ``None`` if not in strict mode and a compression isn't
123 Compression will be ``None`` if not in strict mode and a compression isn't
124 defined.
124 defined.
125
125
126 An ``InvalidBundleSpecification`` is raised when the specification is
126 An ``InvalidBundleSpecification`` is raised when the specification is
127 not syntactically well formed.
127 not syntactically well formed.
128
128
129 An ``UnsupportedBundleSpecification`` is raised when the compression or
129 An ``UnsupportedBundleSpecification`` is raised when the compression or
130 bundle type/version is not recognized.
130 bundle type/version is not recognized.
131
131
132 Note: this function will likely eventually return a more complex data
132 Note: this function will likely eventually return a more complex data
133 structure, including bundle2 part information.
133 structure, including bundle2 part information.
134 """
134 """
135
135
136 def parseparams(s):
136 def parseparams(s):
137 if b';' not in s:
137 if b';' not in s:
138 return s, {}
138 return s, {}
139
139
140 params = {}
140 params = {}
141 version, paramstr = s.split(b';', 1)
141 version, paramstr = s.split(b';', 1)
142
142
143 for p in paramstr.split(b';'):
143 for p in paramstr.split(b';'):
144 if b'=' not in p:
144 if b'=' not in p:
145 raise error.InvalidBundleSpecification(
145 raise error.InvalidBundleSpecification(
146 _(
146 _(
147 b'invalid bundle specification: '
147 b'invalid bundle specification: '
148 b'missing "=" in parameter: %s'
148 b'missing "=" in parameter: %s'
149 )
149 )
150 % p
150 % p
151 )
151 )
152
152
153 key, value = p.split(b'=', 1)
153 key, value = p.split(b'=', 1)
154 key = urlreq.unquote(key)
154 key = urlreq.unquote(key)
155 value = urlreq.unquote(value)
155 value = urlreq.unquote(value)
156 params[key] = value
156 params[key] = value
157
157
158 return version, params
158 return version, params
159
159
160 if strict and b'-' not in spec:
160 if strict and b'-' not in spec:
161 raise error.InvalidBundleSpecification(
161 raise error.InvalidBundleSpecification(
162 _(
162 _(
163 b'invalid bundle specification; '
163 b'invalid bundle specification; '
164 b'must be prefixed with compression: %s'
164 b'must be prefixed with compression: %s'
165 )
165 )
166 % spec
166 % spec
167 )
167 )
168
168
169 if b'-' in spec:
169 if b'-' in spec:
170 compression, version = spec.split(b'-', 1)
170 compression, version = spec.split(b'-', 1)
171
171
172 if compression not in util.compengines.supportedbundlenames:
172 if compression not in util.compengines.supportedbundlenames:
173 raise error.UnsupportedBundleSpecification(
173 raise error.UnsupportedBundleSpecification(
174 _(b'%s compression is not supported') % compression
174 _(b'%s compression is not supported') % compression
175 )
175 )
176
176
177 version, params = parseparams(version)
177 version, params = parseparams(version)
178
178
179 if version not in _bundlespeccgversions:
179 if version not in _bundlespeccgversions:
180 raise error.UnsupportedBundleSpecification(
180 raise error.UnsupportedBundleSpecification(
181 _(b'%s is not a recognized bundle version') % version
181 _(b'%s is not a recognized bundle version') % version
182 )
182 )
183 else:
183 else:
184 # Value could be just the compression or just the version, in which
184 # Value could be just the compression or just the version, in which
185 # case some defaults are assumed (but only when not in strict mode).
185 # case some defaults are assumed (but only when not in strict mode).
186 assert not strict
186 assert not strict
187
187
188 spec, params = parseparams(spec)
188 spec, params = parseparams(spec)
189
189
190 if spec in util.compengines.supportedbundlenames:
190 if spec in util.compengines.supportedbundlenames:
191 compression = spec
191 compression = spec
192 version = b'v1'
192 version = b'v1'
193 # Generaldelta repos require v2.
193 # Generaldelta repos require v2.
194 if b'generaldelta' in repo.requirements:
194 if b'generaldelta' in repo.requirements:
195 version = b'v2'
195 version = b'v2'
196 # Modern compression engines require v2.
196 # Modern compression engines require v2.
197 if compression not in _bundlespecv1compengines:
197 if compression not in _bundlespecv1compengines:
198 version = b'v2'
198 version = b'v2'
199 elif spec in _bundlespeccgversions:
199 elif spec in _bundlespeccgversions:
200 if spec == b'packed1':
200 if spec == b'packed1':
201 compression = b'none'
201 compression = b'none'
202 else:
202 else:
203 compression = b'bzip2'
203 compression = b'bzip2'
204 version = spec
204 version = spec
205 else:
205 else:
206 raise error.UnsupportedBundleSpecification(
206 raise error.UnsupportedBundleSpecification(
207 _(b'%s is not a recognized bundle specification') % spec
207 _(b'%s is not a recognized bundle specification') % spec
208 )
208 )
209
209
210 # Bundle version 1 only supports a known set of compression engines.
210 # Bundle version 1 only supports a known set of compression engines.
211 if version == b'v1' and compression not in _bundlespecv1compengines:
211 if version == b'v1' and compression not in _bundlespecv1compengines:
212 raise error.UnsupportedBundleSpecification(
212 raise error.UnsupportedBundleSpecification(
213 _(b'compression engine %s is not supported on v1 bundles')
213 _(b'compression engine %s is not supported on v1 bundles')
214 % compression
214 % compression
215 )
215 )
216
216
217 # The specification for packed1 can optionally declare the data formats
217 # The specification for packed1 can optionally declare the data formats
218 # required to apply it. If we see this metadata, compare against what the
218 # required to apply it. If we see this metadata, compare against what the
219 # repo supports and error if the bundle isn't compatible.
219 # repo supports and error if the bundle isn't compatible.
220 if version == b'packed1' and b'requirements' in params:
220 if version == b'packed1' and b'requirements' in params:
221 requirements = set(params[b'requirements'].split(b','))
221 requirements = set(params[b'requirements'].split(b','))
222 missingreqs = requirements - repo.supportedformats
222 missingreqs = requirements - repo.supportedformats
223 if missingreqs:
223 if missingreqs:
224 raise error.UnsupportedBundleSpecification(
224 raise error.UnsupportedBundleSpecification(
225 _(b'missing support for repository features: %s')
225 _(b'missing support for repository features: %s')
226 % b', '.join(sorted(missingreqs))
226 % b', '.join(sorted(missingreqs))
227 )
227 )
228
228
229 # Compute contentopts based on the version
229 # Compute contentopts based on the version
230 contentopts = _bundlespeccontentopts.get(version, {}).copy()
230 contentopts = _bundlespeccontentopts.get(version, {}).copy()
231
231
232 # Process the variants
232 # Process the variants
233 if b"stream" in params and params[b"stream"] == b"v2":
233 if b"stream" in params and params[b"stream"] == b"v2":
234 variant = _bundlespecvariants[b"streamv2"]
234 variant = _bundlespecvariants[b"streamv2"]
235 contentopts.update(variant)
235 contentopts.update(variant)
236
236
237 engine = util.compengines.forbundlename(compression)
237 engine = util.compengines.forbundlename(compression)
238 compression, wirecompression = engine.bundletype()
238 compression, wirecompression = engine.bundletype()
239 wireversion = _bundlespeccgversions[version]
239 wireversion = _bundlespeccgversions[version]
240
240
241 return bundlespec(
241 return bundlespec(
242 compression, wirecompression, version, wireversion, params, contentopts
242 compression, wirecompression, version, wireversion, params, contentopts
243 )
243 )
244
244
245
245
246 def readbundle(ui, fh, fname, vfs=None):
246 def readbundle(ui, fh, fname, vfs=None):
247 header = changegroup.readexactly(fh, 4)
247 header = changegroup.readexactly(fh, 4)
248
248
249 alg = None
249 alg = None
250 if not fname:
250 if not fname:
251 fname = b"stream"
251 fname = b"stream"
252 if not header.startswith(b'HG') and header.startswith(b'\0'):
252 if not header.startswith(b'HG') and header.startswith(b'\0'):
253 fh = changegroup.headerlessfixup(fh, header)
253 fh = changegroup.headerlessfixup(fh, header)
254 header = b"HG10"
254 header = b"HG10"
255 alg = b'UN'
255 alg = b'UN'
256 elif vfs:
256 elif vfs:
257 fname = vfs.join(fname)
257 fname = vfs.join(fname)
258
258
259 magic, version = header[0:2], header[2:4]
259 magic, version = header[0:2], header[2:4]
260
260
261 if magic != b'HG':
261 if magic != b'HG':
262 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
262 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
263 if version == b'10':
263 if version == b'10':
264 if alg is None:
264 if alg is None:
265 alg = changegroup.readexactly(fh, 2)
265 alg = changegroup.readexactly(fh, 2)
266 return changegroup.cg1unpacker(fh, alg)
266 return changegroup.cg1unpacker(fh, alg)
267 elif version.startswith(b'2'):
267 elif version.startswith(b'2'):
268 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
268 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
269 elif version == b'S1':
269 elif version == b'S1':
270 return streamclone.streamcloneapplier(fh)
270 return streamclone.streamcloneapplier(fh)
271 else:
271 else:
272 raise error.Abort(
272 raise error.Abort(
273 _(b'%s: unknown bundle version %s') % (fname, version)
273 _(b'%s: unknown bundle version %s') % (fname, version)
274 )
274 )
275
275
276
276
277 def getbundlespec(ui, fh):
277 def getbundlespec(ui, fh):
278 """Infer the bundlespec from a bundle file handle.
278 """Infer the bundlespec from a bundle file handle.
279
279
280 The input file handle is seeked and the original seek position is not
280 The input file handle is seeked and the original seek position is not
281 restored.
281 restored.
282 """
282 """
283
283
284 def speccompression(alg):
284 def speccompression(alg):
285 try:
285 try:
286 return util.compengines.forbundletype(alg).bundletype()[0]
286 return util.compengines.forbundletype(alg).bundletype()[0]
287 except KeyError:
287 except KeyError:
288 return None
288 return None
289
289
290 b = readbundle(ui, fh, None)
290 b = readbundle(ui, fh, None)
291 if isinstance(b, changegroup.cg1unpacker):
291 if isinstance(b, changegroup.cg1unpacker):
292 alg = b._type
292 alg = b._type
293 if alg == b'_truncatedBZ':
293 if alg == b'_truncatedBZ':
294 alg = b'BZ'
294 alg = b'BZ'
295 comp = speccompression(alg)
295 comp = speccompression(alg)
296 if not comp:
296 if not comp:
297 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
297 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
298 return b'%s-v1' % comp
298 return b'%s-v1' % comp
299 elif isinstance(b, bundle2.unbundle20):
299 elif isinstance(b, bundle2.unbundle20):
300 if b'Compression' in b.params:
300 if b'Compression' in b.params:
301 comp = speccompression(b.params[b'Compression'])
301 comp = speccompression(b.params[b'Compression'])
302 if not comp:
302 if not comp:
303 raise error.Abort(
303 raise error.Abort(
304 _(b'unknown compression algorithm: %s') % comp
304 _(b'unknown compression algorithm: %s') % comp
305 )
305 )
306 else:
306 else:
307 comp = b'none'
307 comp = b'none'
308
308
309 version = None
309 version = None
310 for part in b.iterparts():
310 for part in b.iterparts():
311 if part.type == b'changegroup':
311 if part.type == b'changegroup':
312 version = part.params[b'version']
312 version = part.params[b'version']
313 if version in (b'01', b'02'):
313 if version in (b'01', b'02'):
314 version = b'v2'
314 version = b'v2'
315 else:
315 else:
316 raise error.Abort(
316 raise error.Abort(
317 _(
317 _(
318 b'changegroup version %s does not have '
318 b'changegroup version %s does not have '
319 b'a known bundlespec'
319 b'a known bundlespec'
320 )
320 )
321 % version,
321 % version,
322 hint=_(b'try upgrading your Mercurial client'),
322 hint=_(b'try upgrading your Mercurial client'),
323 )
323 )
324 elif part.type == b'stream2' and version is None:
324 elif part.type == b'stream2' and version is None:
325 # A stream2 part requires to be part of a v2 bundle
325 # A stream2 part requires to be part of a v2 bundle
326 requirements = urlreq.unquote(part.params[b'requirements'])
326 requirements = urlreq.unquote(part.params[b'requirements'])
327 splitted = requirements.split()
327 splitted = requirements.split()
328 params = bundle2._formatrequirementsparams(splitted)
328 params = bundle2._formatrequirementsparams(splitted)
329 return b'none-v2;stream=v2;%s' % params
329 return b'none-v2;stream=v2;%s' % params
330
330
331 if not version:
331 if not version:
332 raise error.Abort(
332 raise error.Abort(
333 _(b'could not identify changegroup version in bundle')
333 _(b'could not identify changegroup version in bundle')
334 )
334 )
335
335
336 return b'%s-%s' % (comp, version)
336 return b'%s-%s' % (comp, version)
337 elif isinstance(b, streamclone.streamcloneapplier):
337 elif isinstance(b, streamclone.streamcloneapplier):
338 requirements = streamclone.readbundle1header(fh)[2]
338 requirements = streamclone.readbundle1header(fh)[2]
339 formatted = bundle2._formatrequirementsparams(requirements)
339 formatted = bundle2._formatrequirementsparams(requirements)
340 return b'none-packed1;%s' % formatted
340 return b'none-packed1;%s' % formatted
341 else:
341 else:
342 raise error.Abort(_(b'unknown bundle type: %s') % b)
342 raise error.Abort(_(b'unknown bundle type: %s') % b)
343
343
344
344
345 def _computeoutgoing(repo, heads, common):
345 def _computeoutgoing(repo, heads, common):
346 """Computes which revs are outgoing given a set of common
346 """Computes which revs are outgoing given a set of common
347 and a set of heads.
347 and a set of heads.
348
348
349 This is a separate function so extensions can have access to
349 This is a separate function so extensions can have access to
350 the logic.
350 the logic.
351
351
352 Returns a discovery.outgoing object.
352 Returns a discovery.outgoing object.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if common:
355 if common:
356 hasnode = cl.hasnode
356 hasnode = cl.hasnode
357 common = [n for n in common if hasnode(n)]
357 common = [n for n in common if hasnode(n)]
358 else:
358 else:
359 common = [nullid]
359 common = [nullid]
360 if not heads:
360 if not heads:
361 heads = cl.heads()
361 heads = cl.heads()
362 return discovery.outgoing(repo, common, heads)
362 return discovery.outgoing(repo, common, heads)
363
363
364
364
365 def _checkpublish(pushop):
365 def _checkpublish(pushop):
366 repo = pushop.repo
366 repo = pushop.repo
367 ui = repo.ui
367 ui = repo.ui
368 behavior = ui.config(b'experimental', b'auto-publish')
368 behavior = ui.config(b'experimental', b'auto-publish')
369 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
369 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
370 return
370 return
371 remotephases = listkeys(pushop.remote, b'phases')
371 remotephases = listkeys(pushop.remote, b'phases')
372 if not remotephases.get(b'publishing', False):
372 if not remotephases.get(b'publishing', False):
373 return
373 return
374
374
375 if pushop.revs is None:
375 if pushop.revs is None:
376 published = repo.filtered(b'served').revs(b'not public()')
376 published = repo.filtered(b'served').revs(b'not public()')
377 else:
377 else:
378 published = repo.revs(b'::%ln - public()', pushop.revs)
378 published = repo.revs(b'::%ln - public()', pushop.revs)
379 if published:
379 if published:
380 if behavior == b'warn':
380 if behavior == b'warn':
381 ui.warn(
381 ui.warn(
382 _(b'%i changesets about to be published\n') % len(published)
382 _(b'%i changesets about to be published\n') % len(published)
383 )
383 )
384 elif behavior == b'confirm':
384 elif behavior == b'confirm':
385 if ui.promptchoice(
385 if ui.promptchoice(
386 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
386 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
387 % len(published)
387 % len(published)
388 ):
388 ):
389 raise error.Abort(_(b'user quit'))
389 raise error.Abort(_(b'user quit'))
390 elif behavior == b'abort':
390 elif behavior == b'abort':
391 msg = _(b'push would publish %i changesets') % len(published)
391 msg = _(b'push would publish %i changesets') % len(published)
392 hint = _(
392 hint = _(
393 b"use --publish or adjust 'experimental.auto-publish'"
393 b"use --publish or adjust 'experimental.auto-publish'"
394 b" config"
394 b" config"
395 )
395 )
396 raise error.Abort(msg, hint=hint)
396 raise error.Abort(msg, hint=hint)
397
397
398
398
399 def _forcebundle1(op):
399 def _forcebundle1(op):
400 """return true if a pull/push must use bundle1
400 """return true if a pull/push must use bundle1
401
401
402 This function is used to allow testing of the older bundle version"""
402 This function is used to allow testing of the older bundle version"""
403 ui = op.repo.ui
403 ui = op.repo.ui
404 # The goal is this config is to allow developer to choose the bundle
404 # The goal is this config is to allow developer to choose the bundle
405 # version used during exchanged. This is especially handy during test.
405 # version used during exchanged. This is especially handy during test.
406 # Value is a list of bundle version to be picked from, highest version
406 # Value is a list of bundle version to be picked from, highest version
407 # should be used.
407 # should be used.
408 #
408 #
409 # developer config: devel.legacy.exchange
409 # developer config: devel.legacy.exchange
410 exchange = ui.configlist(b'devel', b'legacy.exchange')
410 exchange = ui.configlist(b'devel', b'legacy.exchange')
411 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
411 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
412 return forcebundle1 or not op.remote.capable(b'bundle2')
412 return forcebundle1 or not op.remote.capable(b'bundle2')
413
413
414
414
415 class pushoperation(object):
415 class pushoperation(object):
416 """A object that represent a single push operation
416 """A object that represent a single push operation
417
417
418 Its purpose is to carry push related state and very common operations.
418 Its purpose is to carry push related state and very common operations.
419
419
420 A new pushoperation should be created at the beginning of each push and
420 A new pushoperation should be created at the beginning of each push and
421 discarded afterward.
421 discarded afterward.
422 """
422 """
423
423
424 def __init__(
424 def __init__(
425 self,
425 self,
426 repo,
426 repo,
427 remote,
427 remote,
428 force=False,
428 force=False,
429 revs=None,
429 revs=None,
430 newbranch=False,
430 newbranch=False,
431 bookmarks=(),
431 bookmarks=(),
432 publish=False,
432 publish=False,
433 pushvars=None,
433 pushvars=None,
434 ):
434 ):
435 # repo we push from
435 # repo we push from
436 self.repo = repo
436 self.repo = repo
437 self.ui = repo.ui
437 self.ui = repo.ui
438 # repo we push to
438 # repo we push to
439 self.remote = remote
439 self.remote = remote
440 # force option provided
440 # force option provided
441 self.force = force
441 self.force = force
442 # revs to be pushed (None is "all")
442 # revs to be pushed (None is "all")
443 self.revs = revs
443 self.revs = revs
444 # bookmark explicitly pushed
444 # bookmark explicitly pushed
445 self.bookmarks = bookmarks
445 self.bookmarks = bookmarks
446 # allow push of new branch
446 # allow push of new branch
447 self.newbranch = newbranch
447 self.newbranch = newbranch
448 # step already performed
448 # step already performed
449 # (used to check what steps have been already performed through bundle2)
449 # (used to check what steps have been already performed through bundle2)
450 self.stepsdone = set()
450 self.stepsdone = set()
451 # Integer version of the changegroup push result
451 # Integer version of the changegroup push result
452 # - None means nothing to push
452 # - None means nothing to push
453 # - 0 means HTTP error
453 # - 0 means HTTP error
454 # - 1 means we pushed and remote head count is unchanged *or*
454 # - 1 means we pushed and remote head count is unchanged *or*
455 # we have outgoing changesets but refused to push
455 # we have outgoing changesets but refused to push
456 # - other values as described by addchangegroup()
456 # - other values as described by addchangegroup()
457 self.cgresult = None
457 self.cgresult = None
458 # Boolean value for the bookmark push
458 # Boolean value for the bookmark push
459 self.bkresult = None
459 self.bkresult = None
460 # discover.outgoing object (contains common and outgoing data)
460 # discover.outgoing object (contains common and outgoing data)
461 self.outgoing = None
461 self.outgoing = None
462 # all remote topological heads before the push
462 # all remote topological heads before the push
463 self.remoteheads = None
463 self.remoteheads = None
464 # Details of the remote branch pre and post push
464 # Details of the remote branch pre and post push
465 #
465 #
466 # mapping: {'branch': ([remoteheads],
466 # mapping: {'branch': ([remoteheads],
467 # [newheads],
467 # [newheads],
468 # [unsyncedheads],
468 # [unsyncedheads],
469 # [discardedheads])}
469 # [discardedheads])}
470 # - branch: the branch name
470 # - branch: the branch name
471 # - remoteheads: the list of remote heads known locally
471 # - remoteheads: the list of remote heads known locally
472 # None if the branch is new
472 # None if the branch is new
473 # - newheads: the new remote heads (known locally) with outgoing pushed
473 # - newheads: the new remote heads (known locally) with outgoing pushed
474 # - unsyncedheads: the list of remote heads unknown locally.
474 # - unsyncedheads: the list of remote heads unknown locally.
475 # - discardedheads: the list of remote heads made obsolete by the push
475 # - discardedheads: the list of remote heads made obsolete by the push
476 self.pushbranchmap = None
476 self.pushbranchmap = None
477 # testable as a boolean indicating if any nodes are missing locally.
477 # testable as a boolean indicating if any nodes are missing locally.
478 self.incoming = None
478 self.incoming = None
479 # summary of the remote phase situation
479 # summary of the remote phase situation
480 self.remotephases = None
480 self.remotephases = None
481 # phases changes that must be pushed along side the changesets
481 # phases changes that must be pushed along side the changesets
482 self.outdatedphases = None
482 self.outdatedphases = None
483 # phases changes that must be pushed if changeset push fails
483 # phases changes that must be pushed if changeset push fails
484 self.fallbackoutdatedphases = None
484 self.fallbackoutdatedphases = None
485 # outgoing obsmarkers
485 # outgoing obsmarkers
486 self.outobsmarkers = set()
486 self.outobsmarkers = set()
487 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
487 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
488 self.outbookmarks = []
488 self.outbookmarks = []
489 # transaction manager
489 # transaction manager
490 self.trmanager = None
490 self.trmanager = None
491 # map { pushkey partid -> callback handling failure}
491 # map { pushkey partid -> callback handling failure}
492 # used to handle exception from mandatory pushkey part failure
492 # used to handle exception from mandatory pushkey part failure
493 self.pkfailcb = {}
493 self.pkfailcb = {}
494 # an iterable of pushvars or None
494 # an iterable of pushvars or None
495 self.pushvars = pushvars
495 self.pushvars = pushvars
496 # publish pushed changesets
496 # publish pushed changesets
497 self.publish = publish
497 self.publish = publish
498
498
499 @util.propertycache
499 @util.propertycache
500 def futureheads(self):
500 def futureheads(self):
501 """future remote heads if the changeset push succeeds"""
501 """future remote heads if the changeset push succeeds"""
502 return self.outgoing.missingheads
502 return self.outgoing.missingheads
503
503
504 @util.propertycache
504 @util.propertycache
505 def fallbackheads(self):
505 def fallbackheads(self):
506 """future remote heads if the changeset push fails"""
506 """future remote heads if the changeset push fails"""
507 if self.revs is None:
507 if self.revs is None:
508 # not target to push, all common are relevant
508 # not target to push, all common are relevant
509 return self.outgoing.commonheads
509 return self.outgoing.commonheads
510 unfi = self.repo.unfiltered()
510 unfi = self.repo.unfiltered()
511 # I want cheads = heads(::missingheads and ::commonheads)
511 # I want cheads = heads(::missingheads and ::commonheads)
512 # (missingheads is revs with secret changeset filtered out)
512 # (missingheads is revs with secret changeset filtered out)
513 #
513 #
514 # This can be expressed as:
514 # This can be expressed as:
515 # cheads = ( (missingheads and ::commonheads)
515 # cheads = ( (missingheads and ::commonheads)
516 # + (commonheads and ::missingheads))"
516 # + (commonheads and ::missingheads))"
517 # )
517 # )
518 #
518 #
519 # while trying to push we already computed the following:
519 # while trying to push we already computed the following:
520 # common = (::commonheads)
520 # common = (::commonheads)
521 # missing = ((commonheads::missingheads) - commonheads)
521 # missing = ((commonheads::missingheads) - commonheads)
522 #
522 #
523 # We can pick:
523 # We can pick:
524 # * missingheads part of common (::commonheads)
524 # * missingheads part of common (::commonheads)
525 common = self.outgoing.common
525 common = self.outgoing.common
526 nm = self.repo.changelog.nodemap
526 nm = self.repo.changelog.nodemap
527 cheads = [node for node in self.revs if nm[node] in common]
527 cheads = [node for node in self.revs if nm[node] in common]
528 # and
528 # and
529 # * commonheads parents on missing
529 # * commonheads parents on missing
530 revset = unfi.set(
530 revset = unfi.set(
531 b'%ln and parents(roots(%ln))',
531 b'%ln and parents(roots(%ln))',
532 self.outgoing.commonheads,
532 self.outgoing.commonheads,
533 self.outgoing.missing,
533 self.outgoing.missing,
534 )
534 )
535 cheads.extend(c.node() for c in revset)
535 cheads.extend(c.node() for c in revset)
536 return cheads
536 return cheads
537
537
538 @property
538 @property
539 def commonheads(self):
539 def commonheads(self):
540 """set of all common heads after changeset bundle push"""
540 """set of all common heads after changeset bundle push"""
541 if self.cgresult:
541 if self.cgresult:
542 return self.futureheads
542 return self.futureheads
543 else:
543 else:
544 return self.fallbackheads
544 return self.fallbackheads
545
545
546
546
547 # mapping of message used when pushing bookmark
547 # mapping of message used when pushing bookmark
548 bookmsgmap = {
548 bookmsgmap = {
549 b'update': (
549 b'update': (
550 _(b"updating bookmark %s\n"),
550 _(b"updating bookmark %s\n"),
551 _(b'updating bookmark %s failed!\n'),
551 _(b'updating bookmark %s failed!\n'),
552 ),
552 ),
553 b'export': (
553 b'export': (
554 _(b"exporting bookmark %s\n"),
554 _(b"exporting bookmark %s\n"),
555 _(b'exporting bookmark %s failed!\n'),
555 _(b'exporting bookmark %s failed!\n'),
556 ),
556 ),
557 b'delete': (
557 b'delete': (
558 _(b"deleting remote bookmark %s\n"),
558 _(b"deleting remote bookmark %s\n"),
559 _(b'deleting remote bookmark %s failed!\n'),
559 _(b'deleting remote bookmark %s failed!\n'),
560 ),
560 ),
561 }
561 }
562
562
563
563
564 def push(
564 def push(
565 repo,
565 repo,
566 remote,
566 remote,
567 force=False,
567 force=False,
568 revs=None,
568 revs=None,
569 newbranch=False,
569 newbranch=False,
570 bookmarks=(),
570 bookmarks=(),
571 publish=False,
571 publish=False,
572 opargs=None,
572 opargs=None,
573 ):
573 ):
574 '''Push outgoing changesets (limited by revs) from a local
574 '''Push outgoing changesets (limited by revs) from a local
575 repository to remote. Return an integer:
575 repository to remote. Return an integer:
576 - None means nothing to push
576 - None means nothing to push
577 - 0 means HTTP error
577 - 0 means HTTP error
578 - 1 means we pushed and remote head count is unchanged *or*
578 - 1 means we pushed and remote head count is unchanged *or*
579 we have outgoing changesets but refused to push
579 we have outgoing changesets but refused to push
580 - other values as described by addchangegroup()
580 - other values as described by addchangegroup()
581 '''
581 '''
582 if opargs is None:
582 if opargs is None:
583 opargs = {}
583 opargs = {}
584 pushop = pushoperation(
584 pushop = pushoperation(
585 repo,
585 repo,
586 remote,
586 remote,
587 force,
587 force,
588 revs,
588 revs,
589 newbranch,
589 newbranch,
590 bookmarks,
590 bookmarks,
591 publish,
591 publish,
592 **pycompat.strkwargs(opargs)
592 **pycompat.strkwargs(opargs)
593 )
593 )
594 if pushop.remote.local():
594 if pushop.remote.local():
595 missing = (
595 missing = (
596 set(pushop.repo.requirements) - pushop.remote.local().supported
596 set(pushop.repo.requirements) - pushop.remote.local().supported
597 )
597 )
598 if missing:
598 if missing:
599 msg = _(
599 msg = _(
600 b"required features are not"
600 b"required features are not"
601 b" supported in the destination:"
601 b" supported in the destination:"
602 b" %s"
602 b" %s"
603 ) % (b', '.join(sorted(missing)))
603 ) % (b', '.join(sorted(missing)))
604 raise error.Abort(msg)
604 raise error.Abort(msg)
605
605
606 if not pushop.remote.canpush():
606 if not pushop.remote.canpush():
607 raise error.Abort(_(b"destination does not support push"))
607 raise error.Abort(_(b"destination does not support push"))
608
608
609 if not pushop.remote.capable(b'unbundle'):
609 if not pushop.remote.capable(b'unbundle'):
610 raise error.Abort(
610 raise error.Abort(
611 _(
611 _(
612 b'cannot push: destination does not support the '
612 b'cannot push: destination does not support the '
613 b'unbundle wire protocol command'
613 b'unbundle wire protocol command'
614 )
614 )
615 )
615 )
616
616
617 # get lock as we might write phase data
617 # get lock as we might write phase data
618 wlock = lock = None
618 wlock = lock = None
619 try:
619 try:
620 # bundle2 push may receive a reply bundle touching bookmarks
620 # bundle2 push may receive a reply bundle touching bookmarks
621 # requiring the wlock. Take it now to ensure proper ordering.
621 # requiring the wlock. Take it now to ensure proper ordering.
622 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
622 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
623 if (
623 if (
624 (not _forcebundle1(pushop))
624 (not _forcebundle1(pushop))
625 and maypushback
625 and maypushback
626 and not bookmod.bookmarksinstore(repo)
626 and not bookmod.bookmarksinstore(repo)
627 ):
627 ):
628 wlock = pushop.repo.wlock()
628 wlock = pushop.repo.wlock()
629 lock = pushop.repo.lock()
629 lock = pushop.repo.lock()
630 pushop.trmanager = transactionmanager(
630 pushop.trmanager = transactionmanager(
631 pushop.repo, b'push-response', pushop.remote.url()
631 pushop.repo, b'push-response', pushop.remote.url()
632 )
632 )
633 except error.LockUnavailable as err:
633 except error.LockUnavailable as err:
634 # source repo cannot be locked.
634 # source repo cannot be locked.
635 # We do not abort the push, but just disable the local phase
635 # We do not abort the push, but just disable the local phase
636 # synchronisation.
636 # synchronisation.
637 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
637 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
638 err
638 err
639 )
639 )
640 pushop.ui.debug(msg)
640 pushop.ui.debug(msg)
641
641
642 with wlock or util.nullcontextmanager():
642 with wlock or util.nullcontextmanager():
643 with lock or util.nullcontextmanager():
643 with lock or util.nullcontextmanager():
644 with pushop.trmanager or util.nullcontextmanager():
644 with pushop.trmanager or util.nullcontextmanager():
645 pushop.repo.checkpush(pushop)
645 pushop.repo.checkpush(pushop)
646 _checkpublish(pushop)
646 _checkpublish(pushop)
647 _pushdiscovery(pushop)
647 _pushdiscovery(pushop)
648 if not _forcebundle1(pushop):
648 if not _forcebundle1(pushop):
649 _pushbundle2(pushop)
649 _pushbundle2(pushop)
650 _pushchangeset(pushop)
650 _pushchangeset(pushop)
651 _pushsyncphase(pushop)
651 _pushsyncphase(pushop)
652 _pushobsolete(pushop)
652 _pushobsolete(pushop)
653 _pushbookmark(pushop)
653 _pushbookmark(pushop)
654
654
655 if repo.ui.configbool(b'experimental', b'remotenames'):
655 if repo.ui.configbool(b'experimental', b'remotenames'):
656 logexchange.pullremotenames(repo, remote)
656 logexchange.pullremotenames(repo, remote)
657
657
658 return pushop
658 return pushop
659
659
660
660
661 # list of steps to perform discovery before push
661 # list of steps to perform discovery before push
662 pushdiscoveryorder = []
662 pushdiscoveryorder = []
663
663
664 # Mapping between step name and function
664 # Mapping between step name and function
665 #
665 #
666 # This exists to help extensions wrap steps if necessary
666 # This exists to help extensions wrap steps if necessary
667 pushdiscoverymapping = {}
667 pushdiscoverymapping = {}
668
668
669
669
670 def pushdiscovery(stepname):
670 def pushdiscovery(stepname):
671 """decorator for function performing discovery before push
671 """decorator for function performing discovery before push
672
672
673 The function is added to the step -> function mapping and appended to the
673 The function is added to the step -> function mapping and appended to the
674 list of steps. Beware that decorated function will be added in order (this
674 list of steps. Beware that decorated function will be added in order (this
675 may matter).
675 may matter).
676
676
677 You can only use this decorator for a new step, if you want to wrap a step
677 You can only use this decorator for a new step, if you want to wrap a step
678 from an extension, change the pushdiscovery dictionary directly."""
678 from an extension, change the pushdiscovery dictionary directly."""
679
679
680 def dec(func):
680 def dec(func):
681 assert stepname not in pushdiscoverymapping
681 assert stepname not in pushdiscoverymapping
682 pushdiscoverymapping[stepname] = func
682 pushdiscoverymapping[stepname] = func
683 pushdiscoveryorder.append(stepname)
683 pushdiscoveryorder.append(stepname)
684 return func
684 return func
685
685
686 return dec
686 return dec
687
687
688
688
689 def _pushdiscovery(pushop):
689 def _pushdiscovery(pushop):
690 """Run all discovery steps"""
690 """Run all discovery steps"""
691 for stepname in pushdiscoveryorder:
691 for stepname in pushdiscoveryorder:
692 step = pushdiscoverymapping[stepname]
692 step = pushdiscoverymapping[stepname]
693 step(pushop)
693 step(pushop)
694
694
695
695
696 @pushdiscovery(b'changeset')
696 @pushdiscovery(b'changeset')
697 def _pushdiscoverychangeset(pushop):
697 def _pushdiscoverychangeset(pushop):
698 """discover the changeset that need to be pushed"""
698 """discover the changeset that need to be pushed"""
699 fci = discovery.findcommonincoming
699 fci = discovery.findcommonincoming
700 if pushop.revs:
700 if pushop.revs:
701 commoninc = fci(
701 commoninc = fci(
702 pushop.repo,
702 pushop.repo,
703 pushop.remote,
703 pushop.remote,
704 force=pushop.force,
704 force=pushop.force,
705 ancestorsof=pushop.revs,
705 ancestorsof=pushop.revs,
706 )
706 )
707 else:
707 else:
708 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
708 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
709 common, inc, remoteheads = commoninc
709 common, inc, remoteheads = commoninc
710 fco = discovery.findcommonoutgoing
710 fco = discovery.findcommonoutgoing
711 outgoing = fco(
711 outgoing = fco(
712 pushop.repo,
712 pushop.repo,
713 pushop.remote,
713 pushop.remote,
714 onlyheads=pushop.revs,
714 onlyheads=pushop.revs,
715 commoninc=commoninc,
715 commoninc=commoninc,
716 force=pushop.force,
716 force=pushop.force,
717 )
717 )
718 pushop.outgoing = outgoing
718 pushop.outgoing = outgoing
719 pushop.remoteheads = remoteheads
719 pushop.remoteheads = remoteheads
720 pushop.incoming = inc
720 pushop.incoming = inc
721
721
722
722
723 @pushdiscovery(b'phase')
723 @pushdiscovery(b'phase')
724 def _pushdiscoveryphase(pushop):
724 def _pushdiscoveryphase(pushop):
725 """discover the phase that needs to be pushed
725 """discover the phase that needs to be pushed
726
726
727 (computed for both success and failure case for changesets push)"""
727 (computed for both success and failure case for changesets push)"""
728 outgoing = pushop.outgoing
728 outgoing = pushop.outgoing
729 unfi = pushop.repo.unfiltered()
729 unfi = pushop.repo.unfiltered()
730 remotephases = listkeys(pushop.remote, b'phases')
730 remotephases = listkeys(pushop.remote, b'phases')
731
731
732 if (
732 if (
733 pushop.ui.configbool(b'ui', b'_usedassubrepo')
733 pushop.ui.configbool(b'ui', b'_usedassubrepo')
734 and remotephases # server supports phases
734 and remotephases # server supports phases
735 and not pushop.outgoing.missing # no changesets to be pushed
735 and not pushop.outgoing.missing # no changesets to be pushed
736 and remotephases.get(b'publishing', False)
736 and remotephases.get(b'publishing', False)
737 ):
737 ):
738 # When:
738 # When:
739 # - this is a subrepo push
739 # - this is a subrepo push
740 # - and remote support phase
740 # - and remote support phase
741 # - and no changeset are to be pushed
741 # - and no changeset are to be pushed
742 # - and remote is publishing
742 # - and remote is publishing
743 # We may be in issue 3781 case!
743 # We may be in issue 3781 case!
744 # We drop the possible phase synchronisation done by
744 # We drop the possible phase synchronisation done by
745 # courtesy to publish changesets possibly locally draft
745 # courtesy to publish changesets possibly locally draft
746 # on the remote.
746 # on the remote.
747 pushop.outdatedphases = []
747 pushop.outdatedphases = []
748 pushop.fallbackoutdatedphases = []
748 pushop.fallbackoutdatedphases = []
749 return
749 return
750
750
751 pushop.remotephases = phases.remotephasessummary(
751 pushop.remotephases = phases.remotephasessummary(
752 pushop.repo, pushop.fallbackheads, remotephases
752 pushop.repo, pushop.fallbackheads, remotephases
753 )
753 )
754 droots = pushop.remotephases.draftroots
754 droots = pushop.remotephases.draftroots
755
755
756 extracond = b''
756 extracond = b''
757 if not pushop.remotephases.publishing:
757 if not pushop.remotephases.publishing:
758 extracond = b' and public()'
758 extracond = b' and public()'
759 revset = b'heads((%%ln::%%ln) %s)' % extracond
759 revset = b'heads((%%ln::%%ln) %s)' % extracond
760 # Get the list of all revs draft on remote by public here.
760 # Get the list of all revs draft on remote by public here.
761 # XXX Beware that revset break if droots is not strictly
761 # XXX Beware that revset break if droots is not strictly
762 # XXX root we may want to ensure it is but it is costly
762 # XXX root we may want to ensure it is but it is costly
763 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
763 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
764 if not pushop.remotephases.publishing and pushop.publish:
764 if not pushop.remotephases.publishing and pushop.publish:
765 future = list(
765 future = list(
766 unfi.set(
766 unfi.set(
767 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
767 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
768 )
768 )
769 )
769 )
770 elif not outgoing.missing:
770 elif not outgoing.missing:
771 future = fallback
771 future = fallback
772 else:
772 else:
773 # adds changeset we are going to push as draft
773 # adds changeset we are going to push as draft
774 #
774 #
775 # should not be necessary for publishing server, but because of an
775 # should not be necessary for publishing server, but because of an
776 # issue fixed in xxxxx we have to do it anyway.
776 # issue fixed in xxxxx we have to do it anyway.
777 fdroots = list(
777 fdroots = list(
778 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
778 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
779 )
779 )
780 fdroots = [f.node() for f in fdroots]
780 fdroots = [f.node() for f in fdroots]
781 future = list(unfi.set(revset, fdroots, pushop.futureheads))
781 future = list(unfi.set(revset, fdroots, pushop.futureheads))
782 pushop.outdatedphases = future
782 pushop.outdatedphases = future
783 pushop.fallbackoutdatedphases = fallback
783 pushop.fallbackoutdatedphases = fallback
784
784
785
785
786 @pushdiscovery(b'obsmarker')
786 @pushdiscovery(b'obsmarker')
787 def _pushdiscoveryobsmarkers(pushop):
787 def _pushdiscoveryobsmarkers(pushop):
788 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
788 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
789 return
789 return
790
790
791 if not pushop.repo.obsstore:
791 if not pushop.repo.obsstore:
792 return
792 return
793
793
794 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
794 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
795 return
795 return
796
796
797 repo = pushop.repo
797 repo = pushop.repo
798 # very naive computation, that can be quite expensive on big repo.
798 # very naive computation, that can be quite expensive on big repo.
799 # However: evolution is currently slow on them anyway.
799 # However: evolution is currently slow on them anyway.
800 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
800 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
801 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
801 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
802
802
803
803
804 @pushdiscovery(b'bookmarks')
804 @pushdiscovery(b'bookmarks')
805 def _pushdiscoverybookmarks(pushop):
805 def _pushdiscoverybookmarks(pushop):
806 ui = pushop.ui
806 ui = pushop.ui
807 repo = pushop.repo.unfiltered()
807 repo = pushop.repo.unfiltered()
808 remote = pushop.remote
808 remote = pushop.remote
809 ui.debug(b"checking for updated bookmarks\n")
809 ui.debug(b"checking for updated bookmarks\n")
810 ancestors = ()
810 ancestors = ()
811 if pushop.revs:
811 if pushop.revs:
812 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
812 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
813 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
813 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
814
814
815 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
815 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
816
816
817 explicit = {
817 explicit = {
818 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
818 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
819 }
819 }
820
820
821 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
821 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
822 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
822 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
823
823
824
824
825 def _processcompared(pushop, pushed, explicit, remotebms, comp):
825 def _processcompared(pushop, pushed, explicit, remotebms, comp):
826 """take decision on bookmarks to push to the remote repo
826 """take decision on bookmarks to push to the remote repo
827
827
828 Exists to help extensions alter this behavior.
828 Exists to help extensions alter this behavior.
829 """
829 """
830 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
830 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
831
831
832 repo = pushop.repo
832 repo = pushop.repo
833
833
834 for b, scid, dcid in advsrc:
834 for b, scid, dcid in advsrc:
835 if b in explicit:
835 if b in explicit:
836 explicit.remove(b)
836 explicit.remove(b)
837 if not pushed or repo[scid].rev() in pushed:
837 if not pushed or repo[scid].rev() in pushed:
838 pushop.outbookmarks.append((b, dcid, scid))
838 pushop.outbookmarks.append((b, dcid, scid))
839 # search added bookmark
839 # search added bookmark
840 for b, scid, dcid in addsrc:
840 for b, scid, dcid in addsrc:
841 if b in explicit:
841 if b in explicit:
842 explicit.remove(b)
842 explicit.remove(b)
843 pushop.outbookmarks.append((b, b'', scid))
843 pushop.outbookmarks.append((b, b'', scid))
844 # search for overwritten bookmark
844 # search for overwritten bookmark
845 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
845 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
846 if b in explicit:
846 if b in explicit:
847 explicit.remove(b)
847 explicit.remove(b)
848 pushop.outbookmarks.append((b, dcid, scid))
848 pushop.outbookmarks.append((b, dcid, scid))
849 # search for bookmark to delete
849 # search for bookmark to delete
850 for b, scid, dcid in adddst:
850 for b, scid, dcid in adddst:
851 if b in explicit:
851 if b in explicit:
852 explicit.remove(b)
852 explicit.remove(b)
853 # treat as "deleted locally"
853 # treat as "deleted locally"
854 pushop.outbookmarks.append((b, dcid, b''))
854 pushop.outbookmarks.append((b, dcid, b''))
855 # identical bookmarks shouldn't get reported
855 # identical bookmarks shouldn't get reported
856 for b, scid, dcid in same:
856 for b, scid, dcid in same:
857 if b in explicit:
857 if b in explicit:
858 explicit.remove(b)
858 explicit.remove(b)
859
859
860 if explicit:
860 if explicit:
861 explicit = sorted(explicit)
861 explicit = sorted(explicit)
862 # we should probably list all of them
862 # we should probably list all of them
863 pushop.ui.warn(
863 pushop.ui.warn(
864 _(
864 _(
865 b'bookmark %s does not exist on the local '
865 b'bookmark %s does not exist on the local '
866 b'or remote repository!\n'
866 b'or remote repository!\n'
867 )
867 )
868 % explicit[0]
868 % explicit[0]
869 )
869 )
870 pushop.bkresult = 2
870 pushop.bkresult = 2
871
871
872 pushop.outbookmarks.sort()
872 pushop.outbookmarks.sort()
873
873
874
874
875 def _pushcheckoutgoing(pushop):
875 def _pushcheckoutgoing(pushop):
876 outgoing = pushop.outgoing
876 outgoing = pushop.outgoing
877 unfi = pushop.repo.unfiltered()
877 unfi = pushop.repo.unfiltered()
878 if not outgoing.missing:
878 if not outgoing.missing:
879 # nothing to push
879 # nothing to push
880 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
880 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
881 return False
881 return False
882 # something to push
882 # something to push
883 if not pushop.force:
883 if not pushop.force:
884 # if repo.obsstore == False --> no obsolete
884 # if repo.obsstore == False --> no obsolete
885 # then, save the iteration
885 # then, save the iteration
886 if unfi.obsstore:
886 if unfi.obsstore:
887 # this message are here for 80 char limit reason
887 # this message are here for 80 char limit reason
888 mso = _(b"push includes obsolete changeset: %s!")
888 mso = _(b"push includes obsolete changeset: %s!")
889 mspd = _(b"push includes phase-divergent changeset: %s!")
889 mspd = _(b"push includes phase-divergent changeset: %s!")
890 mscd = _(b"push includes content-divergent changeset: %s!")
890 mscd = _(b"push includes content-divergent changeset: %s!")
891 mst = {
891 mst = {
892 b"orphan": _(b"push includes orphan changeset: %s!"),
892 b"orphan": _(b"push includes orphan changeset: %s!"),
893 b"phase-divergent": mspd,
893 b"phase-divergent": mspd,
894 b"content-divergent": mscd,
894 b"content-divergent": mscd,
895 }
895 }
896 # If we are to push if there is at least one
896 # If we are to push if there is at least one
897 # obsolete or unstable changeset in missing, at
897 # obsolete or unstable changeset in missing, at
898 # least one of the missinghead will be obsolete or
898 # least one of the missinghead will be obsolete or
899 # unstable. So checking heads only is ok
899 # unstable. So checking heads only is ok
900 for node in outgoing.missingheads:
900 for node in outgoing.missingheads:
901 ctx = unfi[node]
901 ctx = unfi[node]
902 if ctx.obsolete():
902 if ctx.obsolete():
903 raise error.Abort(mso % ctx)
903 raise error.Abort(mso % ctx)
904 elif ctx.isunstable():
904 elif ctx.isunstable():
905 # TODO print more than one instability in the abort
905 # TODO print more than one instability in the abort
906 # message
906 # message
907 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
907 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
908
908
909 discovery.checkheads(pushop)
909 discovery.checkheads(pushop)
910 return True
910 return True
911
911
912
912
913 # List of names of steps to perform for an outgoing bundle2, order matters.
913 # List of names of steps to perform for an outgoing bundle2, order matters.
914 b2partsgenorder = []
914 b2partsgenorder = []
915
915
916 # Mapping between step name and function
916 # Mapping between step name and function
917 #
917 #
918 # This exists to help extensions wrap steps if necessary
918 # This exists to help extensions wrap steps if necessary
919 b2partsgenmapping = {}
919 b2partsgenmapping = {}
920
920
921
921
922 def b2partsgenerator(stepname, idx=None):
922 def b2partsgenerator(stepname, idx=None):
923 """decorator for function generating bundle2 part
923 """decorator for function generating bundle2 part
924
924
925 The function is added to the step -> function mapping and appended to the
925 The function is added to the step -> function mapping and appended to the
926 list of steps. Beware that decorated functions will be added in order
926 list of steps. Beware that decorated functions will be added in order
927 (this may matter).
927 (this may matter).
928
928
929 You can only use this decorator for new steps, if you want to wrap a step
929 You can only use this decorator for new steps, if you want to wrap a step
930 from an extension, attack the b2partsgenmapping dictionary directly."""
930 from an extension, attack the b2partsgenmapping dictionary directly."""
931
931
932 def dec(func):
932 def dec(func):
933 assert stepname not in b2partsgenmapping
933 assert stepname not in b2partsgenmapping
934 b2partsgenmapping[stepname] = func
934 b2partsgenmapping[stepname] = func
935 if idx is None:
935 if idx is None:
936 b2partsgenorder.append(stepname)
936 b2partsgenorder.append(stepname)
937 else:
937 else:
938 b2partsgenorder.insert(idx, stepname)
938 b2partsgenorder.insert(idx, stepname)
939 return func
939 return func
940
940
941 return dec
941 return dec
942
942
943
943
944 def _pushb2ctxcheckheads(pushop, bundler):
944 def _pushb2ctxcheckheads(pushop, bundler):
945 """Generate race condition checking parts
945 """Generate race condition checking parts
946
946
947 Exists as an independent function to aid extensions
947 Exists as an independent function to aid extensions
948 """
948 """
949 # * 'force' do not check for push race,
949 # * 'force' do not check for push race,
950 # * if we don't push anything, there are nothing to check.
950 # * if we don't push anything, there are nothing to check.
951 if not pushop.force and pushop.outgoing.missingheads:
951 if not pushop.force and pushop.outgoing.missingheads:
952 allowunrelated = b'related' in bundler.capabilities.get(
952 allowunrelated = b'related' in bundler.capabilities.get(
953 b'checkheads', ()
953 b'checkheads', ()
954 )
954 )
955 emptyremote = pushop.pushbranchmap is None
955 emptyremote = pushop.pushbranchmap is None
956 if not allowunrelated or emptyremote:
956 if not allowunrelated or emptyremote:
957 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
957 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
958 else:
958 else:
959 affected = set()
959 affected = set()
960 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
960 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
961 remoteheads, newheads, unsyncedheads, discardedheads = heads
961 remoteheads, newheads, unsyncedheads, discardedheads = heads
962 if remoteheads is not None:
962 if remoteheads is not None:
963 remote = set(remoteheads)
963 remote = set(remoteheads)
964 affected |= set(discardedheads) & remote
964 affected |= set(discardedheads) & remote
965 affected |= remote - set(newheads)
965 affected |= remote - set(newheads)
966 if affected:
966 if affected:
967 data = iter(sorted(affected))
967 data = iter(sorted(affected))
968 bundler.newpart(b'check:updated-heads', data=data)
968 bundler.newpart(b'check:updated-heads', data=data)
969
969
970
970
971 def _pushing(pushop):
971 def _pushing(pushop):
972 """return True if we are pushing anything"""
972 """return True if we are pushing anything"""
973 return bool(
973 return bool(
974 pushop.outgoing.missing
974 pushop.outgoing.missing
975 or pushop.outdatedphases
975 or pushop.outdatedphases
976 or pushop.outobsmarkers
976 or pushop.outobsmarkers
977 or pushop.outbookmarks
977 or pushop.outbookmarks
978 )
978 )
979
979
980
980
981 @b2partsgenerator(b'check-bookmarks')
981 @b2partsgenerator(b'check-bookmarks')
982 def _pushb2checkbookmarks(pushop, bundler):
982 def _pushb2checkbookmarks(pushop, bundler):
983 """insert bookmark move checking"""
983 """insert bookmark move checking"""
984 if not _pushing(pushop) or pushop.force:
984 if not _pushing(pushop) or pushop.force:
985 return
985 return
986 b2caps = bundle2.bundle2caps(pushop.remote)
986 b2caps = bundle2.bundle2caps(pushop.remote)
987 hasbookmarkcheck = b'bookmarks' in b2caps
987 hasbookmarkcheck = b'bookmarks' in b2caps
988 if not (pushop.outbookmarks and hasbookmarkcheck):
988 if not (pushop.outbookmarks and hasbookmarkcheck):
989 return
989 return
990 data = []
990 data = []
991 for book, old, new in pushop.outbookmarks:
991 for book, old, new in pushop.outbookmarks:
992 data.append((book, old))
992 data.append((book, old))
993 checkdata = bookmod.binaryencode(data)
993 checkdata = bookmod.binaryencode(data)
994 bundler.newpart(b'check:bookmarks', data=checkdata)
994 bundler.newpart(b'check:bookmarks', data=checkdata)
995
995
996
996
997 @b2partsgenerator(b'check-phases')
997 @b2partsgenerator(b'check-phases')
998 def _pushb2checkphases(pushop, bundler):
998 def _pushb2checkphases(pushop, bundler):
999 """insert phase move checking"""
999 """insert phase move checking"""
1000 if not _pushing(pushop) or pushop.force:
1000 if not _pushing(pushop) or pushop.force:
1001 return
1001 return
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1003 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1004 if pushop.remotephases is not None and hasphaseheads:
1004 if pushop.remotephases is not None and hasphaseheads:
1005 # check that the remote phase has not changed
1005 # check that the remote phase has not changed
1006 checks = [[] for p in phases.allphases]
1006 checks = [[] for p in phases.allphases]
1007 checks[phases.public].extend(pushop.remotephases.publicheads)
1007 checks[phases.public].extend(pushop.remotephases.publicheads)
1008 checks[phases.draft].extend(pushop.remotephases.draftroots)
1008 checks[phases.draft].extend(pushop.remotephases.draftroots)
1009 if any(checks):
1009 if any(checks):
1010 for nodes in checks:
1010 for nodes in checks:
1011 nodes.sort()
1011 nodes.sort()
1012 checkdata = phases.binaryencode(checks)
1012 checkdata = phases.binaryencode(checks)
1013 bundler.newpart(b'check:phases', data=checkdata)
1013 bundler.newpart(b'check:phases', data=checkdata)
1014
1014
1015
1015
1016 @b2partsgenerator(b'changeset')
1016 @b2partsgenerator(b'changeset')
1017 def _pushb2ctx(pushop, bundler):
1017 def _pushb2ctx(pushop, bundler):
1018 """handle changegroup push through bundle2
1018 """handle changegroup push through bundle2
1019
1019
1020 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1020 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1021 """
1021 """
1022 if b'changesets' in pushop.stepsdone:
1022 if b'changesets' in pushop.stepsdone:
1023 return
1023 return
1024 pushop.stepsdone.add(b'changesets')
1024 pushop.stepsdone.add(b'changesets')
1025 # Send known heads to the server for race detection.
1025 # Send known heads to the server for race detection.
1026 if not _pushcheckoutgoing(pushop):
1026 if not _pushcheckoutgoing(pushop):
1027 return
1027 return
1028 pushop.repo.prepushoutgoinghooks(pushop)
1028 pushop.repo.prepushoutgoinghooks(pushop)
1029
1029
1030 _pushb2ctxcheckheads(pushop, bundler)
1030 _pushb2ctxcheckheads(pushop, bundler)
1031
1031
1032 b2caps = bundle2.bundle2caps(pushop.remote)
1032 b2caps = bundle2.bundle2caps(pushop.remote)
1033 version = b'01'
1033 version = b'01'
1034 cgversions = b2caps.get(b'changegroup')
1034 cgversions = b2caps.get(b'changegroup')
1035 if cgversions: # 3.1 and 3.2 ship with an empty value
1035 if cgversions: # 3.1 and 3.2 ship with an empty value
1036 cgversions = [
1036 cgversions = [
1037 v
1037 v
1038 for v in cgversions
1038 for v in cgversions
1039 if v in changegroup.supportedoutgoingversions(pushop.repo)
1039 if v in changegroup.supportedoutgoingversions(pushop.repo)
1040 ]
1040 ]
1041 if not cgversions:
1041 if not cgversions:
1042 raise error.Abort(_(b'no common changegroup version'))
1042 raise error.Abort(_(b'no common changegroup version'))
1043 version = max(cgversions)
1043 version = max(cgversions)
1044 cgstream = changegroup.makestream(
1044 cgstream = changegroup.makestream(
1045 pushop.repo, pushop.outgoing, version, b'push'
1045 pushop.repo, pushop.outgoing, version, b'push'
1046 )
1046 )
1047 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1047 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1048 if cgversions:
1048 if cgversions:
1049 cgpart.addparam(b'version', version)
1049 cgpart.addparam(b'version', version)
1050 if b'treemanifest' in pushop.repo.requirements:
1050 if b'treemanifest' in pushop.repo.requirements:
1051 cgpart.addparam(b'treemanifest', b'1')
1051 cgpart.addparam(b'treemanifest', b'1')
1052 if b'exp-sidedata-flag' in pushop.repo.requirements:
1052 if b'exp-sidedata-flag' in pushop.repo.requirements:
1053 cgpart.addparam(b'exp-sidedata', b'1')
1053 cgpart.addparam(b'exp-sidedata', b'1')
1054
1054
1055 def handlereply(op):
1055 def handlereply(op):
1056 """extract addchangegroup returns from server reply"""
1056 """extract addchangegroup returns from server reply"""
1057 cgreplies = op.records.getreplies(cgpart.id)
1057 cgreplies = op.records.getreplies(cgpart.id)
1058 assert len(cgreplies[b'changegroup']) == 1
1058 assert len(cgreplies[b'changegroup']) == 1
1059 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1059 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1060
1060
1061 return handlereply
1061 return handlereply
1062
1062
1063
1063
1064 @b2partsgenerator(b'phase')
1064 @b2partsgenerator(b'phase')
1065 def _pushb2phases(pushop, bundler):
1065 def _pushb2phases(pushop, bundler):
1066 """handle phase push through bundle2"""
1066 """handle phase push through bundle2"""
1067 if b'phases' in pushop.stepsdone:
1067 if b'phases' in pushop.stepsdone:
1068 return
1068 return
1069 b2caps = bundle2.bundle2caps(pushop.remote)
1069 b2caps = bundle2.bundle2caps(pushop.remote)
1070 ui = pushop.repo.ui
1070 ui = pushop.repo.ui
1071
1071
1072 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1072 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1073 haspushkey = b'pushkey' in b2caps
1073 haspushkey = b'pushkey' in b2caps
1074 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1074 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1075
1075
1076 if hasphaseheads and not legacyphase:
1076 if hasphaseheads and not legacyphase:
1077 return _pushb2phaseheads(pushop, bundler)
1077 return _pushb2phaseheads(pushop, bundler)
1078 elif haspushkey:
1078 elif haspushkey:
1079 return _pushb2phasespushkey(pushop, bundler)
1079 return _pushb2phasespushkey(pushop, bundler)
1080
1080
1081
1081
1082 def _pushb2phaseheads(pushop, bundler):
1082 def _pushb2phaseheads(pushop, bundler):
1083 """push phase information through a bundle2 - binary part"""
1083 """push phase information through a bundle2 - binary part"""
1084 pushop.stepsdone.add(b'phases')
1084 pushop.stepsdone.add(b'phases')
1085 if pushop.outdatedphases:
1085 if pushop.outdatedphases:
1086 updates = [[] for p in phases.allphases]
1086 updates = [[] for p in phases.allphases]
1087 updates[0].extend(h.node() for h in pushop.outdatedphases)
1087 updates[0].extend(h.node() for h in pushop.outdatedphases)
1088 phasedata = phases.binaryencode(updates)
1088 phasedata = phases.binaryencode(updates)
1089 bundler.newpart(b'phase-heads', data=phasedata)
1089 bundler.newpart(b'phase-heads', data=phasedata)
1090
1090
1091
1091
1092 def _pushb2phasespushkey(pushop, bundler):
1092 def _pushb2phasespushkey(pushop, bundler):
1093 """push phase information through a bundle2 - pushkey part"""
1093 """push phase information through a bundle2 - pushkey part"""
1094 pushop.stepsdone.add(b'phases')
1094 pushop.stepsdone.add(b'phases')
1095 part2node = []
1095 part2node = []
1096
1096
1097 def handlefailure(pushop, exc):
1097 def handlefailure(pushop, exc):
1098 targetid = int(exc.partid)
1098 targetid = int(exc.partid)
1099 for partid, node in part2node:
1099 for partid, node in part2node:
1100 if partid == targetid:
1100 if partid == targetid:
1101 raise error.Abort(_(b'updating %s to public failed') % node)
1101 raise error.Abort(_(b'updating %s to public failed') % node)
1102
1102
1103 enc = pushkey.encode
1103 enc = pushkey.encode
1104 for newremotehead in pushop.outdatedphases:
1104 for newremotehead in pushop.outdatedphases:
1105 part = bundler.newpart(b'pushkey')
1105 part = bundler.newpart(b'pushkey')
1106 part.addparam(b'namespace', enc(b'phases'))
1106 part.addparam(b'namespace', enc(b'phases'))
1107 part.addparam(b'key', enc(newremotehead.hex()))
1107 part.addparam(b'key', enc(newremotehead.hex()))
1108 part.addparam(b'old', enc(b'%d' % phases.draft))
1108 part.addparam(b'old', enc(b'%d' % phases.draft))
1109 part.addparam(b'new', enc(b'%d' % phases.public))
1109 part.addparam(b'new', enc(b'%d' % phases.public))
1110 part2node.append((part.id, newremotehead))
1110 part2node.append((part.id, newremotehead))
1111 pushop.pkfailcb[part.id] = handlefailure
1111 pushop.pkfailcb[part.id] = handlefailure
1112
1112
1113 def handlereply(op):
1113 def handlereply(op):
1114 for partid, node in part2node:
1114 for partid, node in part2node:
1115 partrep = op.records.getreplies(partid)
1115 partrep = op.records.getreplies(partid)
1116 results = partrep[b'pushkey']
1116 results = partrep[b'pushkey']
1117 assert len(results) <= 1
1117 assert len(results) <= 1
1118 msg = None
1118 msg = None
1119 if not results:
1119 if not results:
1120 msg = _(b'server ignored update of %s to public!\n') % node
1120 msg = _(b'server ignored update of %s to public!\n') % node
1121 elif not int(results[0][b'return']):
1121 elif not int(results[0][b'return']):
1122 msg = _(b'updating %s to public failed!\n') % node
1122 msg = _(b'updating %s to public failed!\n') % node
1123 if msg is not None:
1123 if msg is not None:
1124 pushop.ui.warn(msg)
1124 pushop.ui.warn(msg)
1125
1125
1126 return handlereply
1126 return handlereply
1127
1127
1128
1128
1129 @b2partsgenerator(b'obsmarkers')
1129 @b2partsgenerator(b'obsmarkers')
1130 def _pushb2obsmarkers(pushop, bundler):
1130 def _pushb2obsmarkers(pushop, bundler):
1131 if b'obsmarkers' in pushop.stepsdone:
1131 if b'obsmarkers' in pushop.stepsdone:
1132 return
1132 return
1133 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1133 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1134 if obsolete.commonversion(remoteversions) is None:
1134 if obsolete.commonversion(remoteversions) is None:
1135 return
1135 return
1136 pushop.stepsdone.add(b'obsmarkers')
1136 pushop.stepsdone.add(b'obsmarkers')
1137 if pushop.outobsmarkers:
1137 if pushop.outobsmarkers:
1138 markers = sorted(pushop.outobsmarkers)
1138 markers = sorted(pushop.outobsmarkers)
1139 bundle2.buildobsmarkerspart(bundler, markers)
1139 bundle2.buildobsmarkerspart(bundler, markers)
1140
1140
1141
1141
1142 @b2partsgenerator(b'bookmarks')
1142 @b2partsgenerator(b'bookmarks')
1143 def _pushb2bookmarks(pushop, bundler):
1143 def _pushb2bookmarks(pushop, bundler):
1144 """handle bookmark push through bundle2"""
1144 """handle bookmark push through bundle2"""
1145 if b'bookmarks' in pushop.stepsdone:
1145 if b'bookmarks' in pushop.stepsdone:
1146 return
1146 return
1147 b2caps = bundle2.bundle2caps(pushop.remote)
1147 b2caps = bundle2.bundle2caps(pushop.remote)
1148
1148
1149 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1149 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1150 legacybooks = b'bookmarks' in legacy
1150 legacybooks = b'bookmarks' in legacy
1151
1151
1152 if not legacybooks and b'bookmarks' in b2caps:
1152 if not legacybooks and b'bookmarks' in b2caps:
1153 return _pushb2bookmarkspart(pushop, bundler)
1153 return _pushb2bookmarkspart(pushop, bundler)
1154 elif b'pushkey' in b2caps:
1154 elif b'pushkey' in b2caps:
1155 return _pushb2bookmarkspushkey(pushop, bundler)
1155 return _pushb2bookmarkspushkey(pushop, bundler)
1156
1156
1157
1157
1158 def _bmaction(old, new):
1158 def _bmaction(old, new):
1159 """small utility for bookmark pushing"""
1159 """small utility for bookmark pushing"""
1160 if not old:
1160 if not old:
1161 return b'export'
1161 return b'export'
1162 elif not new:
1162 elif not new:
1163 return b'delete'
1163 return b'delete'
1164 return b'update'
1164 return b'update'
1165
1165
1166
1166
1167 def _abortonsecretctx(pushop, node, b):
1167 def _abortonsecretctx(pushop, node, b):
1168 """abort if a given bookmark points to a secret changeset"""
1168 """abort if a given bookmark points to a secret changeset"""
1169 if node and pushop.repo[node].phase() == phases.secret:
1169 if node and pushop.repo[node].phase() == phases.secret:
1170 raise error.Abort(
1170 raise error.Abort(
1171 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1171 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1172 )
1172 )
1173
1173
1174
1174
1175 def _pushb2bookmarkspart(pushop, bundler):
1175 def _pushb2bookmarkspart(pushop, bundler):
1176 pushop.stepsdone.add(b'bookmarks')
1176 pushop.stepsdone.add(b'bookmarks')
1177 if not pushop.outbookmarks:
1177 if not pushop.outbookmarks:
1178 return
1178 return
1179
1179
1180 allactions = []
1180 allactions = []
1181 data = []
1181 data = []
1182 for book, old, new in pushop.outbookmarks:
1182 for book, old, new in pushop.outbookmarks:
1183 _abortonsecretctx(pushop, new, book)
1183 _abortonsecretctx(pushop, new, book)
1184 data.append((book, new))
1184 data.append((book, new))
1185 allactions.append((book, _bmaction(old, new)))
1185 allactions.append((book, _bmaction(old, new)))
1186 checkdata = bookmod.binaryencode(data)
1186 checkdata = bookmod.binaryencode(data)
1187 bundler.newpart(b'bookmarks', data=checkdata)
1187 bundler.newpart(b'bookmarks', data=checkdata)
1188
1188
1189 def handlereply(op):
1189 def handlereply(op):
1190 ui = pushop.ui
1190 ui = pushop.ui
1191 # if success
1191 # if success
1192 for book, action in allactions:
1192 for book, action in allactions:
1193 ui.status(bookmsgmap[action][0] % book)
1193 ui.status(bookmsgmap[action][0] % book)
1194
1194
1195 return handlereply
1195 return handlereply
1196
1196
1197
1197
1198 def _pushb2bookmarkspushkey(pushop, bundler):
1198 def _pushb2bookmarkspushkey(pushop, bundler):
1199 pushop.stepsdone.add(b'bookmarks')
1199 pushop.stepsdone.add(b'bookmarks')
1200 part2book = []
1200 part2book = []
1201 enc = pushkey.encode
1201 enc = pushkey.encode
1202
1202
1203 def handlefailure(pushop, exc):
1203 def handlefailure(pushop, exc):
1204 targetid = int(exc.partid)
1204 targetid = int(exc.partid)
1205 for partid, book, action in part2book:
1205 for partid, book, action in part2book:
1206 if partid == targetid:
1206 if partid == targetid:
1207 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1207 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1208 # we should not be called for part we did not generated
1208 # we should not be called for part we did not generated
1209 assert False
1209 assert False
1210
1210
1211 for book, old, new in pushop.outbookmarks:
1211 for book, old, new in pushop.outbookmarks:
1212 _abortonsecretctx(pushop, new, book)
1212 _abortonsecretctx(pushop, new, book)
1213 part = bundler.newpart(b'pushkey')
1213 part = bundler.newpart(b'pushkey')
1214 part.addparam(b'namespace', enc(b'bookmarks'))
1214 part.addparam(b'namespace', enc(b'bookmarks'))
1215 part.addparam(b'key', enc(book))
1215 part.addparam(b'key', enc(book))
1216 part.addparam(b'old', enc(hex(old)))
1216 part.addparam(b'old', enc(hex(old)))
1217 part.addparam(b'new', enc(hex(new)))
1217 part.addparam(b'new', enc(hex(new)))
1218 action = b'update'
1218 action = b'update'
1219 if not old:
1219 if not old:
1220 action = b'export'
1220 action = b'export'
1221 elif not new:
1221 elif not new:
1222 action = b'delete'
1222 action = b'delete'
1223 part2book.append((part.id, book, action))
1223 part2book.append((part.id, book, action))
1224 pushop.pkfailcb[part.id] = handlefailure
1224 pushop.pkfailcb[part.id] = handlefailure
1225
1225
1226 def handlereply(op):
1226 def handlereply(op):
1227 ui = pushop.ui
1227 ui = pushop.ui
1228 for partid, book, action in part2book:
1228 for partid, book, action in part2book:
1229 partrep = op.records.getreplies(partid)
1229 partrep = op.records.getreplies(partid)
1230 results = partrep[b'pushkey']
1230 results = partrep[b'pushkey']
1231 assert len(results) <= 1
1231 assert len(results) <= 1
1232 if not results:
1232 if not results:
1233 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1233 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1234 else:
1234 else:
1235 ret = int(results[0][b'return'])
1235 ret = int(results[0][b'return'])
1236 if ret:
1236 if ret:
1237 ui.status(bookmsgmap[action][0] % book)
1237 ui.status(bookmsgmap[action][0] % book)
1238 else:
1238 else:
1239 ui.warn(bookmsgmap[action][1] % book)
1239 ui.warn(bookmsgmap[action][1] % book)
1240 if pushop.bkresult is not None:
1240 if pushop.bkresult is not None:
1241 pushop.bkresult = 1
1241 pushop.bkresult = 1
1242
1242
1243 return handlereply
1243 return handlereply
1244
1244
1245
1245
1246 @b2partsgenerator(b'pushvars', idx=0)
1246 @b2partsgenerator(b'pushvars', idx=0)
1247 def _getbundlesendvars(pushop, bundler):
1247 def _getbundlesendvars(pushop, bundler):
1248 '''send shellvars via bundle2'''
1248 '''send shellvars via bundle2'''
1249 pushvars = pushop.pushvars
1249 pushvars = pushop.pushvars
1250 if pushvars:
1250 if pushvars:
1251 shellvars = {}
1251 shellvars = {}
1252 for raw in pushvars:
1252 for raw in pushvars:
1253 if b'=' not in raw:
1253 if b'=' not in raw:
1254 msg = (
1254 msg = (
1255 b"unable to parse variable '%s', should follow "
1255 b"unable to parse variable '%s', should follow "
1256 b"'KEY=VALUE' or 'KEY=' format"
1256 b"'KEY=VALUE' or 'KEY=' format"
1257 )
1257 )
1258 raise error.Abort(msg % raw)
1258 raise error.Abort(msg % raw)
1259 k, v = raw.split(b'=', 1)
1259 k, v = raw.split(b'=', 1)
1260 shellvars[k] = v
1260 shellvars[k] = v
1261
1261
1262 part = bundler.newpart(b'pushvars')
1262 part = bundler.newpart(b'pushvars')
1263
1263
1264 for key, value in pycompat.iteritems(shellvars):
1264 for key, value in pycompat.iteritems(shellvars):
1265 part.addparam(key, value, mandatory=False)
1265 part.addparam(key, value, mandatory=False)
1266
1266
1267
1267
1268 def _pushbundle2(pushop):
1268 def _pushbundle2(pushop):
1269 """push data to the remote using bundle2
1269 """push data to the remote using bundle2
1270
1270
1271 The only currently supported type of data is changegroup but this will
1271 The only currently supported type of data is changegroup but this will
1272 evolve in the future."""
1272 evolve in the future."""
1273 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1273 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1274 pushback = pushop.trmanager and pushop.ui.configbool(
1274 pushback = pushop.trmanager and pushop.ui.configbool(
1275 b'experimental', b'bundle2.pushback'
1275 b'experimental', b'bundle2.pushback'
1276 )
1276 )
1277
1277
1278 # create reply capability
1278 # create reply capability
1279 capsblob = bundle2.encodecaps(
1279 capsblob = bundle2.encodecaps(
1280 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1280 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1281 )
1281 )
1282 bundler.newpart(b'replycaps', data=capsblob)
1282 bundler.newpart(b'replycaps', data=capsblob)
1283 replyhandlers = []
1283 replyhandlers = []
1284 for partgenname in b2partsgenorder:
1284 for partgenname in b2partsgenorder:
1285 partgen = b2partsgenmapping[partgenname]
1285 partgen = b2partsgenmapping[partgenname]
1286 ret = partgen(pushop, bundler)
1286 ret = partgen(pushop, bundler)
1287 if callable(ret):
1287 if callable(ret):
1288 replyhandlers.append(ret)
1288 replyhandlers.append(ret)
1289 # do not push if nothing to push
1289 # do not push if nothing to push
1290 if bundler.nbparts <= 1:
1290 if bundler.nbparts <= 1:
1291 return
1291 return
1292 stream = util.chunkbuffer(bundler.getchunks())
1292 stream = util.chunkbuffer(bundler.getchunks())
1293 try:
1293 try:
1294 try:
1294 try:
1295 with pushop.remote.commandexecutor() as e:
1295 with pushop.remote.commandexecutor() as e:
1296 reply = e.callcommand(
1296 reply = e.callcommand(
1297 b'unbundle',
1297 b'unbundle',
1298 {
1298 {
1299 b'bundle': stream,
1299 b'bundle': stream,
1300 b'heads': [b'force'],
1300 b'heads': [b'force'],
1301 b'url': pushop.remote.url(),
1301 b'url': pushop.remote.url(),
1302 },
1302 },
1303 ).result()
1303 ).result()
1304 except error.BundleValueError as exc:
1304 except error.BundleValueError as exc:
1305 raise error.Abort(_(b'missing support for %s') % exc)
1305 raise error.Abort(_(b'missing support for %s') % exc)
1306 try:
1306 try:
1307 trgetter = None
1307 trgetter = None
1308 if pushback:
1308 if pushback:
1309 trgetter = pushop.trmanager.transaction
1309 trgetter = pushop.trmanager.transaction
1310 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1310 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1311 except error.BundleValueError as exc:
1311 except error.BundleValueError as exc:
1312 raise error.Abort(_(b'missing support for %s') % exc)
1312 raise error.Abort(_(b'missing support for %s') % exc)
1313 except bundle2.AbortFromPart as exc:
1313 except bundle2.AbortFromPart as exc:
1314 pushop.ui.status(_(b'remote: %s\n') % exc)
1314 pushop.ui.status(_(b'remote: %s\n') % exc)
1315 if exc.hint is not None:
1315 if exc.hint is not None:
1316 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1316 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1317 raise error.Abort(_(b'push failed on remote'))
1317 raise error.Abort(_(b'push failed on remote'))
1318 except error.PushkeyFailed as exc:
1318 except error.PushkeyFailed as exc:
1319 partid = int(exc.partid)
1319 partid = int(exc.partid)
1320 if partid not in pushop.pkfailcb:
1320 if partid not in pushop.pkfailcb:
1321 raise
1321 raise
1322 pushop.pkfailcb[partid](pushop, exc)
1322 pushop.pkfailcb[partid](pushop, exc)
1323 for rephand in replyhandlers:
1323 for rephand in replyhandlers:
1324 rephand(op)
1324 rephand(op)
1325
1325
1326
1326
1327 def _pushchangeset(pushop):
1327 def _pushchangeset(pushop):
1328 """Make the actual push of changeset bundle to remote repo"""
1328 """Make the actual push of changeset bundle to remote repo"""
1329 if b'changesets' in pushop.stepsdone:
1329 if b'changesets' in pushop.stepsdone:
1330 return
1330 return
1331 pushop.stepsdone.add(b'changesets')
1331 pushop.stepsdone.add(b'changesets')
1332 if not _pushcheckoutgoing(pushop):
1332 if not _pushcheckoutgoing(pushop):
1333 return
1333 return
1334
1334
1335 # Should have verified this in push().
1335 # Should have verified this in push().
1336 assert pushop.remote.capable(b'unbundle')
1336 assert pushop.remote.capable(b'unbundle')
1337
1337
1338 pushop.repo.prepushoutgoinghooks(pushop)
1338 pushop.repo.prepushoutgoinghooks(pushop)
1339 outgoing = pushop.outgoing
1339 outgoing = pushop.outgoing
1340 # TODO: get bundlecaps from remote
1340 # TODO: get bundlecaps from remote
1341 bundlecaps = None
1341 bundlecaps = None
1342 # create a changegroup from local
1342 # create a changegroup from local
1343 if pushop.revs is None and not (
1343 if pushop.revs is None and not (
1344 outgoing.excluded or pushop.repo.changelog.filteredrevs
1344 outgoing.excluded or pushop.repo.changelog.filteredrevs
1345 ):
1345 ):
1346 # push everything,
1346 # push everything,
1347 # use the fast path, no race possible on push
1347 # use the fast path, no race possible on push
1348 cg = changegroup.makechangegroup(
1348 cg = changegroup.makechangegroup(
1349 pushop.repo,
1349 pushop.repo,
1350 outgoing,
1350 outgoing,
1351 b'01',
1351 b'01',
1352 b'push',
1352 b'push',
1353 fastpath=True,
1353 fastpath=True,
1354 bundlecaps=bundlecaps,
1354 bundlecaps=bundlecaps,
1355 )
1355 )
1356 else:
1356 else:
1357 cg = changegroup.makechangegroup(
1357 cg = changegroup.makechangegroup(
1358 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1358 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1359 )
1359 )
1360
1360
1361 # apply changegroup to remote
1361 # apply changegroup to remote
1362 # local repo finds heads on server, finds out what
1362 # local repo finds heads on server, finds out what
1363 # revs it must push. once revs transferred, if server
1363 # revs it must push. once revs transferred, if server
1364 # finds it has different heads (someone else won
1364 # finds it has different heads (someone else won
1365 # commit/push race), server aborts.
1365 # commit/push race), server aborts.
1366 if pushop.force:
1366 if pushop.force:
1367 remoteheads = [b'force']
1367 remoteheads = [b'force']
1368 else:
1368 else:
1369 remoteheads = pushop.remoteheads
1369 remoteheads = pushop.remoteheads
1370 # ssh: return remote's addchangegroup()
1370 # ssh: return remote's addchangegroup()
1371 # http: return remote's addchangegroup() or 0 for error
1371 # http: return remote's addchangegroup() or 0 for error
1372 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1372 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1373
1373
1374
1374
1375 def _pushsyncphase(pushop):
1375 def _pushsyncphase(pushop):
1376 """synchronise phase information locally and remotely"""
1376 """synchronise phase information locally and remotely"""
1377 cheads = pushop.commonheads
1377 cheads = pushop.commonheads
1378 # even when we don't push, exchanging phase data is useful
1378 # even when we don't push, exchanging phase data is useful
1379 remotephases = listkeys(pushop.remote, b'phases')
1379 remotephases = listkeys(pushop.remote, b'phases')
1380 if (
1380 if (
1381 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1381 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1382 and remotephases # server supports phases
1382 and remotephases # server supports phases
1383 and pushop.cgresult is None # nothing was pushed
1383 and pushop.cgresult is None # nothing was pushed
1384 and remotephases.get(b'publishing', False)
1384 and remotephases.get(b'publishing', False)
1385 ):
1385 ):
1386 # When:
1386 # When:
1387 # - this is a subrepo push
1387 # - this is a subrepo push
1388 # - and remote support phase
1388 # - and remote support phase
1389 # - and no changeset was pushed
1389 # - and no changeset was pushed
1390 # - and remote is publishing
1390 # - and remote is publishing
1391 # We may be in issue 3871 case!
1391 # We may be in issue 3871 case!
1392 # We drop the possible phase synchronisation done by
1392 # We drop the possible phase synchronisation done by
1393 # courtesy to publish changesets possibly locally draft
1393 # courtesy to publish changesets possibly locally draft
1394 # on the remote.
1394 # on the remote.
1395 remotephases = {b'publishing': b'True'}
1395 remotephases = {b'publishing': b'True'}
1396 if not remotephases: # old server or public only reply from non-publishing
1396 if not remotephases: # old server or public only reply from non-publishing
1397 _localphasemove(pushop, cheads)
1397 _localphasemove(pushop, cheads)
1398 # don't push any phase data as there is nothing to push
1398 # don't push any phase data as there is nothing to push
1399 else:
1399 else:
1400 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1400 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1401 pheads, droots = ana
1401 pheads, droots = ana
1402 ### Apply remote phase on local
1402 ### Apply remote phase on local
1403 if remotephases.get(b'publishing', False):
1403 if remotephases.get(b'publishing', False):
1404 _localphasemove(pushop, cheads)
1404 _localphasemove(pushop, cheads)
1405 else: # publish = False
1405 else: # publish = False
1406 _localphasemove(pushop, pheads)
1406 _localphasemove(pushop, pheads)
1407 _localphasemove(pushop, cheads, phases.draft)
1407 _localphasemove(pushop, cheads, phases.draft)
1408 ### Apply local phase on remote
1408 ### Apply local phase on remote
1409
1409
1410 if pushop.cgresult:
1410 if pushop.cgresult:
1411 if b'phases' in pushop.stepsdone:
1411 if b'phases' in pushop.stepsdone:
1412 # phases already pushed though bundle2
1412 # phases already pushed though bundle2
1413 return
1413 return
1414 outdated = pushop.outdatedphases
1414 outdated = pushop.outdatedphases
1415 else:
1415 else:
1416 outdated = pushop.fallbackoutdatedphases
1416 outdated = pushop.fallbackoutdatedphases
1417
1417
1418 pushop.stepsdone.add(b'phases')
1418 pushop.stepsdone.add(b'phases')
1419
1419
1420 # filter heads already turned public by the push
1420 # filter heads already turned public by the push
1421 outdated = [c for c in outdated if c.node() not in pheads]
1421 outdated = [c for c in outdated if c.node() not in pheads]
1422 # fallback to independent pushkey command
1422 # fallback to independent pushkey command
1423 for newremotehead in outdated:
1423 for newremotehead in outdated:
1424 with pushop.remote.commandexecutor() as e:
1424 with pushop.remote.commandexecutor() as e:
1425 r = e.callcommand(
1425 r = e.callcommand(
1426 b'pushkey',
1426 b'pushkey',
1427 {
1427 {
1428 b'namespace': b'phases',
1428 b'namespace': b'phases',
1429 b'key': newremotehead.hex(),
1429 b'key': newremotehead.hex(),
1430 b'old': b'%d' % phases.draft,
1430 b'old': b'%d' % phases.draft,
1431 b'new': b'%d' % phases.public,
1431 b'new': b'%d' % phases.public,
1432 },
1432 },
1433 ).result()
1433 ).result()
1434
1434
1435 if not r:
1435 if not r:
1436 pushop.ui.warn(
1436 pushop.ui.warn(
1437 _(b'updating %s to public failed!\n') % newremotehead
1437 _(b'updating %s to public failed!\n') % newremotehead
1438 )
1438 )
1439
1439
1440
1440
1441 def _localphasemove(pushop, nodes, phase=phases.public):
1441 def _localphasemove(pushop, nodes, phase=phases.public):
1442 """move <nodes> to <phase> in the local source repo"""
1442 """move <nodes> to <phase> in the local source repo"""
1443 if pushop.trmanager:
1443 if pushop.trmanager:
1444 phases.advanceboundary(
1444 phases.advanceboundary(
1445 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1445 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1446 )
1446 )
1447 else:
1447 else:
1448 # repo is not locked, do not change any phases!
1448 # repo is not locked, do not change any phases!
1449 # Informs the user that phases should have been moved when
1449 # Informs the user that phases should have been moved when
1450 # applicable.
1450 # applicable.
1451 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1451 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1452 phasestr = phases.phasenames[phase]
1452 phasestr = phases.phasenames[phase]
1453 if actualmoves:
1453 if actualmoves:
1454 pushop.ui.status(
1454 pushop.ui.status(
1455 _(
1455 _(
1456 b'cannot lock source repo, skipping '
1456 b'cannot lock source repo, skipping '
1457 b'local %s phase update\n'
1457 b'local %s phase update\n'
1458 )
1458 )
1459 % phasestr
1459 % phasestr
1460 )
1460 )
1461
1461
1462
1462
1463 def _pushobsolete(pushop):
1463 def _pushobsolete(pushop):
1464 """utility function to push obsolete markers to a remote"""
1464 """utility function to push obsolete markers to a remote"""
1465 if b'obsmarkers' in pushop.stepsdone:
1465 if b'obsmarkers' in pushop.stepsdone:
1466 return
1466 return
1467 repo = pushop.repo
1467 repo = pushop.repo
1468 remote = pushop.remote
1468 remote = pushop.remote
1469 pushop.stepsdone.add(b'obsmarkers')
1469 pushop.stepsdone.add(b'obsmarkers')
1470 if pushop.outobsmarkers:
1470 if pushop.outobsmarkers:
1471 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1471 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1472 rslts = []
1472 rslts = []
1473 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1473 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1474 for key in sorted(remotedata, reverse=True):
1474 for key in sorted(remotedata, reverse=True):
1475 # reverse sort to ensure we end with dump0
1475 # reverse sort to ensure we end with dump0
1476 data = remotedata[key]
1476 data = remotedata[key]
1477 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1477 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1478 if [r for r in rslts if not r]:
1478 if [r for r in rslts if not r]:
1479 msg = _(b'failed to push some obsolete markers!\n')
1479 msg = _(b'failed to push some obsolete markers!\n')
1480 repo.ui.warn(msg)
1480 repo.ui.warn(msg)
1481
1481
1482
1482
1483 def _pushbookmark(pushop):
1483 def _pushbookmark(pushop):
1484 """Update bookmark position on remote"""
1484 """Update bookmark position on remote"""
1485 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1485 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1486 return
1486 return
1487 pushop.stepsdone.add(b'bookmarks')
1487 pushop.stepsdone.add(b'bookmarks')
1488 ui = pushop.ui
1488 ui = pushop.ui
1489 remote = pushop.remote
1489 remote = pushop.remote
1490
1490
1491 for b, old, new in pushop.outbookmarks:
1491 for b, old, new in pushop.outbookmarks:
1492 action = b'update'
1492 action = b'update'
1493 if not old:
1493 if not old:
1494 action = b'export'
1494 action = b'export'
1495 elif not new:
1495 elif not new:
1496 action = b'delete'
1496 action = b'delete'
1497
1497
1498 with remote.commandexecutor() as e:
1498 with remote.commandexecutor() as e:
1499 r = e.callcommand(
1499 r = e.callcommand(
1500 b'pushkey',
1500 b'pushkey',
1501 {
1501 {
1502 b'namespace': b'bookmarks',
1502 b'namespace': b'bookmarks',
1503 b'key': b,
1503 b'key': b,
1504 b'old': hex(old),
1504 b'old': hex(old),
1505 b'new': hex(new),
1505 b'new': hex(new),
1506 },
1506 },
1507 ).result()
1507 ).result()
1508
1508
1509 if r:
1509 if r:
1510 ui.status(bookmsgmap[action][0] % b)
1510 ui.status(bookmsgmap[action][0] % b)
1511 else:
1511 else:
1512 ui.warn(bookmsgmap[action][1] % b)
1512 ui.warn(bookmsgmap[action][1] % b)
1513 # discovery can have set the value form invalid entry
1513 # discovery can have set the value form invalid entry
1514 if pushop.bkresult is not None:
1514 if pushop.bkresult is not None:
1515 pushop.bkresult = 1
1515 pushop.bkresult = 1
1516
1516
1517
1517
1518 class pulloperation(object):
1518 class pulloperation(object):
1519 """A object that represent a single pull operation
1519 """A object that represent a single pull operation
1520
1520
1521 It purpose is to carry pull related state and very common operation.
1521 It purpose is to carry pull related state and very common operation.
1522
1522
1523 A new should be created at the beginning of each pull and discarded
1523 A new should be created at the beginning of each pull and discarded
1524 afterward.
1524 afterward.
1525 """
1525 """
1526
1526
1527 def __init__(
1527 def __init__(
1528 self,
1528 self,
1529 repo,
1529 repo,
1530 remote,
1530 remote,
1531 heads=None,
1531 heads=None,
1532 force=False,
1532 force=False,
1533 bookmarks=(),
1533 bookmarks=(),
1534 remotebookmarks=None,
1534 remotebookmarks=None,
1535 streamclonerequested=None,
1535 streamclonerequested=None,
1536 includepats=None,
1536 includepats=None,
1537 excludepats=None,
1537 excludepats=None,
1538 depth=None,
1538 depth=None,
1539 ):
1539 ):
1540 # repo we pull into
1540 # repo we pull into
1541 self.repo = repo
1541 self.repo = repo
1542 # repo we pull from
1542 # repo we pull from
1543 self.remote = remote
1543 self.remote = remote
1544 # revision we try to pull (None is "all")
1544 # revision we try to pull (None is "all")
1545 self.heads = heads
1545 self.heads = heads
1546 # bookmark pulled explicitly
1546 # bookmark pulled explicitly
1547 self.explicitbookmarks = [
1547 self.explicitbookmarks = [
1548 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1548 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1549 ]
1549 ]
1550 # do we force pull?
1550 # do we force pull?
1551 self.force = force
1551 self.force = force
1552 # whether a streaming clone was requested
1552 # whether a streaming clone was requested
1553 self.streamclonerequested = streamclonerequested
1553 self.streamclonerequested = streamclonerequested
1554 # transaction manager
1554 # transaction manager
1555 self.trmanager = None
1555 self.trmanager = None
1556 # set of common changeset between local and remote before pull
1556 # set of common changeset between local and remote before pull
1557 self.common = None
1557 self.common = None
1558 # set of pulled head
1558 # set of pulled head
1559 self.rheads = None
1559 self.rheads = None
1560 # list of missing changeset to fetch remotely
1560 # list of missing changeset to fetch remotely
1561 self.fetch = None
1561 self.fetch = None
1562 # remote bookmarks data
1562 # remote bookmarks data
1563 self.remotebookmarks = remotebookmarks
1563 self.remotebookmarks = remotebookmarks
1564 # result of changegroup pulling (used as return code by pull)
1564 # result of changegroup pulling (used as return code by pull)
1565 self.cgresult = None
1565 self.cgresult = None
1566 # list of step already done
1566 # list of step already done
1567 self.stepsdone = set()
1567 self.stepsdone = set()
1568 # Whether we attempted a clone from pre-generated bundles.
1568 # Whether we attempted a clone from pre-generated bundles.
1569 self.clonebundleattempted = False
1569 self.clonebundleattempted = False
1570 # Set of file patterns to include.
1570 # Set of file patterns to include.
1571 self.includepats = includepats
1571 self.includepats = includepats
1572 # Set of file patterns to exclude.
1572 # Set of file patterns to exclude.
1573 self.excludepats = excludepats
1573 self.excludepats = excludepats
1574 # Number of ancestor changesets to pull from each pulled head.
1574 # Number of ancestor changesets to pull from each pulled head.
1575 self.depth = depth
1575 self.depth = depth
1576
1576
1577 @util.propertycache
1577 @util.propertycache
1578 def pulledsubset(self):
1578 def pulledsubset(self):
1579 """heads of the set of changeset target by the pull"""
1579 """heads of the set of changeset target by the pull"""
1580 # compute target subset
1580 # compute target subset
1581 if self.heads is None:
1581 if self.heads is None:
1582 # We pulled every thing possible
1582 # We pulled every thing possible
1583 # sync on everything common
1583 # sync on everything common
1584 c = set(self.common)
1584 c = set(self.common)
1585 ret = list(self.common)
1585 ret = list(self.common)
1586 for n in self.rheads:
1586 for n in self.rheads:
1587 if n not in c:
1587 if n not in c:
1588 ret.append(n)
1588 ret.append(n)
1589 return ret
1589 return ret
1590 else:
1590 else:
1591 # We pulled a specific subset
1591 # We pulled a specific subset
1592 # sync on this subset
1592 # sync on this subset
1593 return self.heads
1593 return self.heads
1594
1594
1595 @util.propertycache
1595 @util.propertycache
1596 def canusebundle2(self):
1596 def canusebundle2(self):
1597 return not _forcebundle1(self)
1597 return not _forcebundle1(self)
1598
1598
1599 @util.propertycache
1599 @util.propertycache
1600 def remotebundle2caps(self):
1600 def remotebundle2caps(self):
1601 return bundle2.bundle2caps(self.remote)
1601 return bundle2.bundle2caps(self.remote)
1602
1602
1603 def gettransaction(self):
1603 def gettransaction(self):
1604 # deprecated; talk to trmanager directly
1604 # deprecated; talk to trmanager directly
1605 return self.trmanager.transaction()
1605 return self.trmanager.transaction()
1606
1606
1607
1607
1608 class transactionmanager(util.transactional):
1608 class transactionmanager(util.transactional):
1609 """An object to manage the life cycle of a transaction
1609 """An object to manage the life cycle of a transaction
1610
1610
1611 It creates the transaction on demand and calls the appropriate hooks when
1611 It creates the transaction on demand and calls the appropriate hooks when
1612 closing the transaction."""
1612 closing the transaction."""
1613
1613
1614 def __init__(self, repo, source, url):
1614 def __init__(self, repo, source, url):
1615 self.repo = repo
1615 self.repo = repo
1616 self.source = source
1616 self.source = source
1617 self.url = url
1617 self.url = url
1618 self._tr = None
1618 self._tr = None
1619
1619
1620 def transaction(self):
1620 def transaction(self):
1621 """Return an open transaction object, constructing if necessary"""
1621 """Return an open transaction object, constructing if necessary"""
1622 if not self._tr:
1622 if not self._tr:
1623 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1623 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1624 self._tr = self.repo.transaction(trname)
1624 self._tr = self.repo.transaction(trname)
1625 self._tr.hookargs[b'source'] = self.source
1625 self._tr.hookargs[b'source'] = self.source
1626 self._tr.hookargs[b'url'] = self.url
1626 self._tr.hookargs[b'url'] = self.url
1627 return self._tr
1627 return self._tr
1628
1628
1629 def close(self):
1629 def close(self):
1630 """close transaction if created"""
1630 """close transaction if created"""
1631 if self._tr is not None:
1631 if self._tr is not None:
1632 self._tr.close()
1632 self._tr.close()
1633
1633
1634 def release(self):
1634 def release(self):
1635 """release transaction if created"""
1635 """release transaction if created"""
1636 if self._tr is not None:
1636 if self._tr is not None:
1637 self._tr.release()
1637 self._tr.release()
1638
1638
1639
1639
1640 def listkeys(remote, namespace):
1640 def listkeys(remote, namespace):
1641 with remote.commandexecutor() as e:
1641 with remote.commandexecutor() as e:
1642 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1642 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1643
1643
1644
1644
1645 def _fullpullbundle2(repo, pullop):
1645 def _fullpullbundle2(repo, pullop):
1646 # The server may send a partial reply, i.e. when inlining
1646 # The server may send a partial reply, i.e. when inlining
1647 # pre-computed bundles. In that case, update the common
1647 # pre-computed bundles. In that case, update the common
1648 # set based on the results and pull another bundle.
1648 # set based on the results and pull another bundle.
1649 #
1649 #
1650 # There are two indicators that the process is finished:
1650 # There are two indicators that the process is finished:
1651 # - no changeset has been added, or
1651 # - no changeset has been added, or
1652 # - all remote heads are known locally.
1652 # - all remote heads are known locally.
1653 # The head check must use the unfiltered view as obsoletion
1653 # The head check must use the unfiltered view as obsoletion
1654 # markers can hide heads.
1654 # markers can hide heads.
1655 unfi = repo.unfiltered()
1655 unfi = repo.unfiltered()
1656 unficl = unfi.changelog
1656 unficl = unfi.changelog
1657
1657
1658 def headsofdiff(h1, h2):
1658 def headsofdiff(h1, h2):
1659 """Returns heads(h1 % h2)"""
1659 """Returns heads(h1 % h2)"""
1660 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1660 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1661 return set(ctx.node() for ctx in res)
1661 return set(ctx.node() for ctx in res)
1662
1662
1663 def headsofunion(h1, h2):
1663 def headsofunion(h1, h2):
1664 """Returns heads((h1 + h2) - null)"""
1664 """Returns heads((h1 + h2) - null)"""
1665 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1665 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1666 return set(ctx.node() for ctx in res)
1666 return set(ctx.node() for ctx in res)
1667
1667
1668 while True:
1668 while True:
1669 old_heads = unficl.heads()
1669 old_heads = unficl.heads()
1670 clstart = len(unficl)
1670 clstart = len(unficl)
1671 _pullbundle2(pullop)
1671 _pullbundle2(pullop)
1672 if repository.NARROW_REQUIREMENT in repo.requirements:
1672 if repository.NARROW_REQUIREMENT in repo.requirements:
1673 # XXX narrow clones filter the heads on the server side during
1673 # XXX narrow clones filter the heads on the server side during
1674 # XXX getbundle and result in partial replies as well.
1674 # XXX getbundle and result in partial replies as well.
1675 # XXX Disable pull bundles in this case as band aid to avoid
1675 # XXX Disable pull bundles in this case as band aid to avoid
1676 # XXX extra round trips.
1676 # XXX extra round trips.
1677 break
1677 break
1678 if clstart == len(unficl):
1678 if clstart == len(unficl):
1679 break
1679 break
1680 if all(unficl.hasnode(n) for n in pullop.rheads):
1680 if all(unficl.hasnode(n) for n in pullop.rheads):
1681 break
1681 break
1682 new_heads = headsofdiff(unficl.heads(), old_heads)
1682 new_heads = headsofdiff(unficl.heads(), old_heads)
1683 pullop.common = headsofunion(new_heads, pullop.common)
1683 pullop.common = headsofunion(new_heads, pullop.common)
1684 pullop.rheads = set(pullop.rheads) - pullop.common
1684 pullop.rheads = set(pullop.rheads) - pullop.common
1685
1685
1686
1686
1687 def pull(
1687 def pull(
1688 repo,
1688 repo,
1689 remote,
1689 remote,
1690 heads=None,
1690 heads=None,
1691 force=False,
1691 force=False,
1692 bookmarks=(),
1692 bookmarks=(),
1693 opargs=None,
1693 opargs=None,
1694 streamclonerequested=None,
1694 streamclonerequested=None,
1695 includepats=None,
1695 includepats=None,
1696 excludepats=None,
1696 excludepats=None,
1697 depth=None,
1697 depth=None,
1698 ):
1698 ):
1699 """Fetch repository data from a remote.
1699 """Fetch repository data from a remote.
1700
1700
1701 This is the main function used to retrieve data from a remote repository.
1701 This is the main function used to retrieve data from a remote repository.
1702
1702
1703 ``repo`` is the local repository to clone into.
1703 ``repo`` is the local repository to clone into.
1704 ``remote`` is a peer instance.
1704 ``remote`` is a peer instance.
1705 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1705 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1706 default) means to pull everything from the remote.
1706 default) means to pull everything from the remote.
1707 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1707 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1708 default, all remote bookmarks are pulled.
1708 default, all remote bookmarks are pulled.
1709 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1709 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1710 initialization.
1710 initialization.
1711 ``streamclonerequested`` is a boolean indicating whether a "streaming
1711 ``streamclonerequested`` is a boolean indicating whether a "streaming
1712 clone" is requested. A "streaming clone" is essentially a raw file copy
1712 clone" is requested. A "streaming clone" is essentially a raw file copy
1713 of revlogs from the server. This only works when the local repository is
1713 of revlogs from the server. This only works when the local repository is
1714 empty. The default value of ``None`` means to respect the server
1714 empty. The default value of ``None`` means to respect the server
1715 configuration for preferring stream clones.
1715 configuration for preferring stream clones.
1716 ``includepats`` and ``excludepats`` define explicit file patterns to
1716 ``includepats`` and ``excludepats`` define explicit file patterns to
1717 include and exclude in storage, respectively. If not defined, narrow
1717 include and exclude in storage, respectively. If not defined, narrow
1718 patterns from the repo instance are used, if available.
1718 patterns from the repo instance are used, if available.
1719 ``depth`` is an integer indicating the DAG depth of history we're
1719 ``depth`` is an integer indicating the DAG depth of history we're
1720 interested in. If defined, for each revision specified in ``heads``, we
1720 interested in. If defined, for each revision specified in ``heads``, we
1721 will fetch up to this many of its ancestors and data associated with them.
1721 will fetch up to this many of its ancestors and data associated with them.
1722
1722
1723 Returns the ``pulloperation`` created for this pull.
1723 Returns the ``pulloperation`` created for this pull.
1724 """
1724 """
1725 if opargs is None:
1725 if opargs is None:
1726 opargs = {}
1726 opargs = {}
1727
1727
1728 # We allow the narrow patterns to be passed in explicitly to provide more
1728 # We allow the narrow patterns to be passed in explicitly to provide more
1729 # flexibility for API consumers.
1729 # flexibility for API consumers.
1730 if includepats or excludepats:
1730 if includepats or excludepats:
1731 includepats = includepats or set()
1731 includepats = includepats or set()
1732 excludepats = excludepats or set()
1732 excludepats = excludepats or set()
1733 else:
1733 else:
1734 includepats, excludepats = repo.narrowpats
1734 includepats, excludepats = repo.narrowpats
1735
1735
1736 narrowspec.validatepatterns(includepats)
1736 narrowspec.validatepatterns(includepats)
1737 narrowspec.validatepatterns(excludepats)
1737 narrowspec.validatepatterns(excludepats)
1738
1738
1739 pullop = pulloperation(
1739 pullop = pulloperation(
1740 repo,
1740 repo,
1741 remote,
1741 remote,
1742 heads,
1742 heads,
1743 force,
1743 force,
1744 bookmarks=bookmarks,
1744 bookmarks=bookmarks,
1745 streamclonerequested=streamclonerequested,
1745 streamclonerequested=streamclonerequested,
1746 includepats=includepats,
1746 includepats=includepats,
1747 excludepats=excludepats,
1747 excludepats=excludepats,
1748 depth=depth,
1748 depth=depth,
1749 **pycompat.strkwargs(opargs)
1749 **pycompat.strkwargs(opargs)
1750 )
1750 )
1751
1751
1752 peerlocal = pullop.remote.local()
1752 peerlocal = pullop.remote.local()
1753 if peerlocal:
1753 if peerlocal:
1754 missing = set(peerlocal.requirements) - pullop.repo.supported
1754 missing = set(peerlocal.requirements) - pullop.repo.supported
1755 if missing:
1755 if missing:
1756 msg = _(
1756 msg = _(
1757 b"required features are not"
1757 b"required features are not"
1758 b" supported in the destination:"
1758 b" supported in the destination:"
1759 b" %s"
1759 b" %s"
1760 ) % (b', '.join(sorted(missing)))
1760 ) % (b', '.join(sorted(missing)))
1761 raise error.Abort(msg)
1761 raise error.Abort(msg)
1762
1762
1763 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1763 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1764 wlock = util.nullcontextmanager()
1764 wlock = util.nullcontextmanager()
1765 if not bookmod.bookmarksinstore(repo):
1765 if not bookmod.bookmarksinstore(repo):
1766 wlock = repo.wlock()
1766 wlock = repo.wlock()
1767 with wlock, repo.lock(), pullop.trmanager:
1767 with wlock, repo.lock(), pullop.trmanager:
1768 # Use the modern wire protocol, if available.
1768 # Use the modern wire protocol, if available.
1769 if remote.capable(b'command-changesetdata'):
1769 if remote.capable(b'command-changesetdata'):
1770 exchangev2.pull(pullop)
1770 exchangev2.pull(pullop)
1771 else:
1771 else:
1772 # This should ideally be in _pullbundle2(). However, it needs to run
1772 # This should ideally be in _pullbundle2(). However, it needs to run
1773 # before discovery to avoid extra work.
1773 # before discovery to avoid extra work.
1774 _maybeapplyclonebundle(pullop)
1774 _maybeapplyclonebundle(pullop)
1775 streamclone.maybeperformlegacystreamclone(pullop)
1775 streamclone.maybeperformlegacystreamclone(pullop)
1776 _pulldiscovery(pullop)
1776 _pulldiscovery(pullop)
1777 if pullop.canusebundle2:
1777 if pullop.canusebundle2:
1778 _fullpullbundle2(repo, pullop)
1778 _fullpullbundle2(repo, pullop)
1779 _pullchangeset(pullop)
1779 _pullchangeset(pullop)
1780 _pullphase(pullop)
1780 _pullphase(pullop)
1781 _pullbookmarks(pullop)
1781 _pullbookmarks(pullop)
1782 _pullobsolete(pullop)
1782 _pullobsolete(pullop)
1783
1783
1784 # storing remotenames
1784 # storing remotenames
1785 if repo.ui.configbool(b'experimental', b'remotenames'):
1785 if repo.ui.configbool(b'experimental', b'remotenames'):
1786 logexchange.pullremotenames(repo, remote)
1786 logexchange.pullremotenames(repo, remote)
1787
1787
1788 return pullop
1788 return pullop
1789
1789
1790
1790
1791 # list of steps to perform discovery before pull
1791 # list of steps to perform discovery before pull
1792 pulldiscoveryorder = []
1792 pulldiscoveryorder = []
1793
1793
1794 # Mapping between step name and function
1794 # Mapping between step name and function
1795 #
1795 #
1796 # This exists to help extensions wrap steps if necessary
1796 # This exists to help extensions wrap steps if necessary
1797 pulldiscoverymapping = {}
1797 pulldiscoverymapping = {}
1798
1798
1799
1799
1800 def pulldiscovery(stepname):
1800 def pulldiscovery(stepname):
1801 """decorator for function performing discovery before pull
1801 """decorator for function performing discovery before pull
1802
1802
1803 The function is added to the step -> function mapping and appended to the
1803 The function is added to the step -> function mapping and appended to the
1804 list of steps. Beware that decorated function will be added in order (this
1804 list of steps. Beware that decorated function will be added in order (this
1805 may matter).
1805 may matter).
1806
1806
1807 You can only use this decorator for a new step, if you want to wrap a step
1807 You can only use this decorator for a new step, if you want to wrap a step
1808 from an extension, change the pulldiscovery dictionary directly."""
1808 from an extension, change the pulldiscovery dictionary directly."""
1809
1809
1810 def dec(func):
1810 def dec(func):
1811 assert stepname not in pulldiscoverymapping
1811 assert stepname not in pulldiscoverymapping
1812 pulldiscoverymapping[stepname] = func
1812 pulldiscoverymapping[stepname] = func
1813 pulldiscoveryorder.append(stepname)
1813 pulldiscoveryorder.append(stepname)
1814 return func
1814 return func
1815
1815
1816 return dec
1816 return dec
1817
1817
1818
1818
1819 def _pulldiscovery(pullop):
1819 def _pulldiscovery(pullop):
1820 """Run all discovery steps"""
1820 """Run all discovery steps"""
1821 for stepname in pulldiscoveryorder:
1821 for stepname in pulldiscoveryorder:
1822 step = pulldiscoverymapping[stepname]
1822 step = pulldiscoverymapping[stepname]
1823 step(pullop)
1823 step(pullop)
1824
1824
1825
1825
1826 @pulldiscovery(b'b1:bookmarks')
1826 @pulldiscovery(b'b1:bookmarks')
1827 def _pullbookmarkbundle1(pullop):
1827 def _pullbookmarkbundle1(pullop):
1828 """fetch bookmark data in bundle1 case
1828 """fetch bookmark data in bundle1 case
1829
1829
1830 If not using bundle2, we have to fetch bookmarks before changeset
1830 If not using bundle2, we have to fetch bookmarks before changeset
1831 discovery to reduce the chance and impact of race conditions."""
1831 discovery to reduce the chance and impact of race conditions."""
1832 if pullop.remotebookmarks is not None:
1832 if pullop.remotebookmarks is not None:
1833 return
1833 return
1834 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1834 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1835 # all known bundle2 servers now support listkeys, but lets be nice with
1835 # all known bundle2 servers now support listkeys, but lets be nice with
1836 # new implementation.
1836 # new implementation.
1837 return
1837 return
1838 books = listkeys(pullop.remote, b'bookmarks')
1838 books = listkeys(pullop.remote, b'bookmarks')
1839 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1839 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1840
1840
1841
1841
1842 @pulldiscovery(b'changegroup')
1842 @pulldiscovery(b'changegroup')
1843 def _pulldiscoverychangegroup(pullop):
1843 def _pulldiscoverychangegroup(pullop):
1844 """discovery phase for the pull
1844 """discovery phase for the pull
1845
1845
1846 Current handle changeset discovery only, will change handle all discovery
1846 Current handle changeset discovery only, will change handle all discovery
1847 at some point."""
1847 at some point."""
1848 tmp = discovery.findcommonincoming(
1848 tmp = discovery.findcommonincoming(
1849 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1849 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1850 )
1850 )
1851 common, fetch, rheads = tmp
1851 common, fetch, rheads = tmp
1852 nm = pullop.repo.unfiltered().changelog.nodemap
1852 nm = pullop.repo.unfiltered().changelog.nodemap
1853 if fetch and rheads:
1853 if fetch and rheads:
1854 # If a remote heads is filtered locally, put in back in common.
1854 # If a remote heads is filtered locally, put in back in common.
1855 #
1855 #
1856 # This is a hackish solution to catch most of "common but locally
1856 # This is a hackish solution to catch most of "common but locally
1857 # hidden situation". We do not performs discovery on unfiltered
1857 # hidden situation". We do not performs discovery on unfiltered
1858 # repository because it end up doing a pathological amount of round
1858 # repository because it end up doing a pathological amount of round
1859 # trip for w huge amount of changeset we do not care about.
1859 # trip for w huge amount of changeset we do not care about.
1860 #
1860 #
1861 # If a set of such "common but filtered" changeset exist on the server
1861 # If a set of such "common but filtered" changeset exist on the server
1862 # but are not including a remote heads, we'll not be able to detect it,
1862 # but are not including a remote heads, we'll not be able to detect it,
1863 scommon = set(common)
1863 scommon = set(common)
1864 for n in rheads:
1864 for n in rheads:
1865 if n in nm:
1865 if n in nm:
1866 if n not in scommon:
1866 if n not in scommon:
1867 common.append(n)
1867 common.append(n)
1868 if set(rheads).issubset(set(common)):
1868 if set(rheads).issubset(set(common)):
1869 fetch = []
1869 fetch = []
1870 pullop.common = common
1870 pullop.common = common
1871 pullop.fetch = fetch
1871 pullop.fetch = fetch
1872 pullop.rheads = rheads
1872 pullop.rheads = rheads
1873
1873
1874
1874
1875 def _pullbundle2(pullop):
1875 def _pullbundle2(pullop):
1876 """pull data using bundle2
1876 """pull data using bundle2
1877
1877
1878 For now, the only supported data are changegroup."""
1878 For now, the only supported data are changegroup."""
1879 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1879 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1880
1880
1881 # make ui easier to access
1881 # make ui easier to access
1882 ui = pullop.repo.ui
1882 ui = pullop.repo.ui
1883
1883
1884 # At the moment we don't do stream clones over bundle2. If that is
1884 # At the moment we don't do stream clones over bundle2. If that is
1885 # implemented then here's where the check for that will go.
1885 # implemented then here's where the check for that will go.
1886 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1886 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1887
1887
1888 # declare pull perimeters
1888 # declare pull perimeters
1889 kwargs[b'common'] = pullop.common
1889 kwargs[b'common'] = pullop.common
1890 kwargs[b'heads'] = pullop.heads or pullop.rheads
1890 kwargs[b'heads'] = pullop.heads or pullop.rheads
1891
1891
1892 # check server supports narrow and then adding includepats and excludepats
1892 # check server supports narrow and then adding includepats and excludepats
1893 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1893 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1894 if servernarrow and pullop.includepats:
1894 if servernarrow and pullop.includepats:
1895 kwargs[b'includepats'] = pullop.includepats
1895 kwargs[b'includepats'] = pullop.includepats
1896 if servernarrow and pullop.excludepats:
1896 if servernarrow and pullop.excludepats:
1897 kwargs[b'excludepats'] = pullop.excludepats
1897 kwargs[b'excludepats'] = pullop.excludepats
1898
1898
1899 if streaming:
1899 if streaming:
1900 kwargs[b'cg'] = False
1900 kwargs[b'cg'] = False
1901 kwargs[b'stream'] = True
1901 kwargs[b'stream'] = True
1902 pullop.stepsdone.add(b'changegroup')
1902 pullop.stepsdone.add(b'changegroup')
1903 pullop.stepsdone.add(b'phases')
1903 pullop.stepsdone.add(b'phases')
1904
1904
1905 else:
1905 else:
1906 # pulling changegroup
1906 # pulling changegroup
1907 pullop.stepsdone.add(b'changegroup')
1907 pullop.stepsdone.add(b'changegroup')
1908
1908
1909 kwargs[b'cg'] = pullop.fetch
1909 kwargs[b'cg'] = pullop.fetch
1910
1910
1911 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1911 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1912 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1912 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1913 if not legacyphase and hasbinaryphase:
1913 if not legacyphase and hasbinaryphase:
1914 kwargs[b'phases'] = True
1914 kwargs[b'phases'] = True
1915 pullop.stepsdone.add(b'phases')
1915 pullop.stepsdone.add(b'phases')
1916
1916
1917 if b'listkeys' in pullop.remotebundle2caps:
1917 if b'listkeys' in pullop.remotebundle2caps:
1918 if b'phases' not in pullop.stepsdone:
1918 if b'phases' not in pullop.stepsdone:
1919 kwargs[b'listkeys'] = [b'phases']
1919 kwargs[b'listkeys'] = [b'phases']
1920
1920
1921 bookmarksrequested = False
1921 bookmarksrequested = False
1922 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1922 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1923 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1923 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1924
1924
1925 if pullop.remotebookmarks is not None:
1925 if pullop.remotebookmarks is not None:
1926 pullop.stepsdone.add(b'request-bookmarks')
1926 pullop.stepsdone.add(b'request-bookmarks')
1927
1927
1928 if (
1928 if (
1929 b'request-bookmarks' not in pullop.stepsdone
1929 b'request-bookmarks' not in pullop.stepsdone
1930 and pullop.remotebookmarks is None
1930 and pullop.remotebookmarks is None
1931 and not legacybookmark
1931 and not legacybookmark
1932 and hasbinarybook
1932 and hasbinarybook
1933 ):
1933 ):
1934 kwargs[b'bookmarks'] = True
1934 kwargs[b'bookmarks'] = True
1935 bookmarksrequested = True
1935 bookmarksrequested = True
1936
1936
1937 if b'listkeys' in pullop.remotebundle2caps:
1937 if b'listkeys' in pullop.remotebundle2caps:
1938 if b'request-bookmarks' not in pullop.stepsdone:
1938 if b'request-bookmarks' not in pullop.stepsdone:
1939 # make sure to always includes bookmark data when migrating
1939 # make sure to always includes bookmark data when migrating
1940 # `hg incoming --bundle` to using this function.
1940 # `hg incoming --bundle` to using this function.
1941 pullop.stepsdone.add(b'request-bookmarks')
1941 pullop.stepsdone.add(b'request-bookmarks')
1942 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1942 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1943
1943
1944 # If this is a full pull / clone and the server supports the clone bundles
1944 # If this is a full pull / clone and the server supports the clone bundles
1945 # feature, tell the server whether we attempted a clone bundle. The
1945 # feature, tell the server whether we attempted a clone bundle. The
1946 # presence of this flag indicates the client supports clone bundles. This
1946 # presence of this flag indicates the client supports clone bundles. This
1947 # will enable the server to treat clients that support clone bundles
1947 # will enable the server to treat clients that support clone bundles
1948 # differently from those that don't.
1948 # differently from those that don't.
1949 if (
1949 if (
1950 pullop.remote.capable(b'clonebundles')
1950 pullop.remote.capable(b'clonebundles')
1951 and pullop.heads is None
1951 and pullop.heads is None
1952 and list(pullop.common) == [nullid]
1952 and list(pullop.common) == [nullid]
1953 ):
1953 ):
1954 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1954 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1955
1955
1956 if streaming:
1956 if streaming:
1957 pullop.repo.ui.status(_(b'streaming all changes\n'))
1957 pullop.repo.ui.status(_(b'streaming all changes\n'))
1958 elif not pullop.fetch:
1958 elif not pullop.fetch:
1959 pullop.repo.ui.status(_(b"no changes found\n"))
1959 pullop.repo.ui.status(_(b"no changes found\n"))
1960 pullop.cgresult = 0
1960 pullop.cgresult = 0
1961 else:
1961 else:
1962 if pullop.heads is None and list(pullop.common) == [nullid]:
1962 if pullop.heads is None and list(pullop.common) == [nullid]:
1963 pullop.repo.ui.status(_(b"requesting all changes\n"))
1963 pullop.repo.ui.status(_(b"requesting all changes\n"))
1964 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1964 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1965 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1965 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1966 if obsolete.commonversion(remoteversions) is not None:
1966 if obsolete.commonversion(remoteversions) is not None:
1967 kwargs[b'obsmarkers'] = True
1967 kwargs[b'obsmarkers'] = True
1968 pullop.stepsdone.add(b'obsmarkers')
1968 pullop.stepsdone.add(b'obsmarkers')
1969 _pullbundle2extraprepare(pullop, kwargs)
1969 _pullbundle2extraprepare(pullop, kwargs)
1970
1970
1971 with pullop.remote.commandexecutor() as e:
1971 with pullop.remote.commandexecutor() as e:
1972 args = dict(kwargs)
1972 args = dict(kwargs)
1973 args[b'source'] = b'pull'
1973 args[b'source'] = b'pull'
1974 bundle = e.callcommand(b'getbundle', args).result()
1974 bundle = e.callcommand(b'getbundle', args).result()
1975
1975
1976 try:
1976 try:
1977 op = bundle2.bundleoperation(
1977 op = bundle2.bundleoperation(
1978 pullop.repo, pullop.gettransaction, source=b'pull'
1978 pullop.repo, pullop.gettransaction, source=b'pull'
1979 )
1979 )
1980 op.modes[b'bookmarks'] = b'records'
1980 op.modes[b'bookmarks'] = b'records'
1981 bundle2.processbundle(pullop.repo, bundle, op=op)
1981 bundle2.processbundle(pullop.repo, bundle, op=op)
1982 except bundle2.AbortFromPart as exc:
1982 except bundle2.AbortFromPart as exc:
1983 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1983 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1984 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1984 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1985 except error.BundleValueError as exc:
1985 except error.BundleValueError as exc:
1986 raise error.Abort(_(b'missing support for %s') % exc)
1986 raise error.Abort(_(b'missing support for %s') % exc)
1987
1987
1988 if pullop.fetch:
1988 if pullop.fetch:
1989 pullop.cgresult = bundle2.combinechangegroupresults(op)
1989 pullop.cgresult = bundle2.combinechangegroupresults(op)
1990
1990
1991 # processing phases change
1991 # processing phases change
1992 for namespace, value in op.records[b'listkeys']:
1992 for namespace, value in op.records[b'listkeys']:
1993 if namespace == b'phases':
1993 if namespace == b'phases':
1994 _pullapplyphases(pullop, value)
1994 _pullapplyphases(pullop, value)
1995
1995
1996 # processing bookmark update
1996 # processing bookmark update
1997 if bookmarksrequested:
1997 if bookmarksrequested:
1998 books = {}
1998 books = {}
1999 for record in op.records[b'bookmarks']:
1999 for record in op.records[b'bookmarks']:
2000 books[record[b'bookmark']] = record[b"node"]
2000 books[record[b'bookmark']] = record[b"node"]
2001 pullop.remotebookmarks = books
2001 pullop.remotebookmarks = books
2002 else:
2002 else:
2003 for namespace, value in op.records[b'listkeys']:
2003 for namespace, value in op.records[b'listkeys']:
2004 if namespace == b'bookmarks':
2004 if namespace == b'bookmarks':
2005 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2005 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2006
2006
2007 # bookmark data were either already there or pulled in the bundle
2007 # bookmark data were either already there or pulled in the bundle
2008 if pullop.remotebookmarks is not None:
2008 if pullop.remotebookmarks is not None:
2009 _pullbookmarks(pullop)
2009 _pullbookmarks(pullop)
2010
2010
2011
2011
2012 def _pullbundle2extraprepare(pullop, kwargs):
2012 def _pullbundle2extraprepare(pullop, kwargs):
2013 """hook function so that extensions can extend the getbundle call"""
2013 """hook function so that extensions can extend the getbundle call"""
2014
2014
2015
2015
2016 def _pullchangeset(pullop):
2016 def _pullchangeset(pullop):
2017 """pull changeset from unbundle into the local repo"""
2017 """pull changeset from unbundle into the local repo"""
2018 # We delay the open of the transaction as late as possible so we
2018 # We delay the open of the transaction as late as possible so we
2019 # don't open transaction for nothing or you break future useful
2019 # don't open transaction for nothing or you break future useful
2020 # rollback call
2020 # rollback call
2021 if b'changegroup' in pullop.stepsdone:
2021 if b'changegroup' in pullop.stepsdone:
2022 return
2022 return
2023 pullop.stepsdone.add(b'changegroup')
2023 pullop.stepsdone.add(b'changegroup')
2024 if not pullop.fetch:
2024 if not pullop.fetch:
2025 pullop.repo.ui.status(_(b"no changes found\n"))
2025 pullop.repo.ui.status(_(b"no changes found\n"))
2026 pullop.cgresult = 0
2026 pullop.cgresult = 0
2027 return
2027 return
2028 tr = pullop.gettransaction()
2028 tr = pullop.gettransaction()
2029 if pullop.heads is None and list(pullop.common) == [nullid]:
2029 if pullop.heads is None and list(pullop.common) == [nullid]:
2030 pullop.repo.ui.status(_(b"requesting all changes\n"))
2030 pullop.repo.ui.status(_(b"requesting all changes\n"))
2031 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2031 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2032 # issue1320, avoid a race if remote changed after discovery
2032 # issue1320, avoid a race if remote changed after discovery
2033 pullop.heads = pullop.rheads
2033 pullop.heads = pullop.rheads
2034
2034
2035 if pullop.remote.capable(b'getbundle'):
2035 if pullop.remote.capable(b'getbundle'):
2036 # TODO: get bundlecaps from remote
2036 # TODO: get bundlecaps from remote
2037 cg = pullop.remote.getbundle(
2037 cg = pullop.remote.getbundle(
2038 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2038 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2039 )
2039 )
2040 elif pullop.heads is None:
2040 elif pullop.heads is None:
2041 with pullop.remote.commandexecutor() as e:
2041 with pullop.remote.commandexecutor() as e:
2042 cg = e.callcommand(
2042 cg = e.callcommand(
2043 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2043 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2044 ).result()
2044 ).result()
2045
2045
2046 elif not pullop.remote.capable(b'changegroupsubset'):
2046 elif not pullop.remote.capable(b'changegroupsubset'):
2047 raise error.Abort(
2047 raise error.Abort(
2048 _(
2048 _(
2049 b"partial pull cannot be done because "
2049 b"partial pull cannot be done because "
2050 b"other repository doesn't support "
2050 b"other repository doesn't support "
2051 b"changegroupsubset."
2051 b"changegroupsubset."
2052 )
2052 )
2053 )
2053 )
2054 else:
2054 else:
2055 with pullop.remote.commandexecutor() as e:
2055 with pullop.remote.commandexecutor() as e:
2056 cg = e.callcommand(
2056 cg = e.callcommand(
2057 b'changegroupsubset',
2057 b'changegroupsubset',
2058 {
2058 {
2059 b'bases': pullop.fetch,
2059 b'bases': pullop.fetch,
2060 b'heads': pullop.heads,
2060 b'heads': pullop.heads,
2061 b'source': b'pull',
2061 b'source': b'pull',
2062 },
2062 },
2063 ).result()
2063 ).result()
2064
2064
2065 bundleop = bundle2.applybundle(
2065 bundleop = bundle2.applybundle(
2066 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2066 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2067 )
2067 )
2068 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2068 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2069
2069
2070
2070
2071 def _pullphase(pullop):
2071 def _pullphase(pullop):
2072 # Get remote phases data from remote
2072 # Get remote phases data from remote
2073 if b'phases' in pullop.stepsdone:
2073 if b'phases' in pullop.stepsdone:
2074 return
2074 return
2075 remotephases = listkeys(pullop.remote, b'phases')
2075 remotephases = listkeys(pullop.remote, b'phases')
2076 _pullapplyphases(pullop, remotephases)
2076 _pullapplyphases(pullop, remotephases)
2077
2077
2078
2078
2079 def _pullapplyphases(pullop, remotephases):
2079 def _pullapplyphases(pullop, remotephases):
2080 """apply phase movement from observed remote state"""
2080 """apply phase movement from observed remote state"""
2081 if b'phases' in pullop.stepsdone:
2081 if b'phases' in pullop.stepsdone:
2082 return
2082 return
2083 pullop.stepsdone.add(b'phases')
2083 pullop.stepsdone.add(b'phases')
2084 publishing = bool(remotephases.get(b'publishing', False))
2084 publishing = bool(remotephases.get(b'publishing', False))
2085 if remotephases and not publishing:
2085 if remotephases and not publishing:
2086 # remote is new and non-publishing
2086 # remote is new and non-publishing
2087 pheads, _dr = phases.analyzeremotephases(
2087 pheads, _dr = phases.analyzeremotephases(
2088 pullop.repo, pullop.pulledsubset, remotephases
2088 pullop.repo, pullop.pulledsubset, remotephases
2089 )
2089 )
2090 dheads = pullop.pulledsubset
2090 dheads = pullop.pulledsubset
2091 else:
2091 else:
2092 # Remote is old or publishing all common changesets
2092 # Remote is old or publishing all common changesets
2093 # should be seen as public
2093 # should be seen as public
2094 pheads = pullop.pulledsubset
2094 pheads = pullop.pulledsubset
2095 dheads = []
2095 dheads = []
2096 unfi = pullop.repo.unfiltered()
2096 unfi = pullop.repo.unfiltered()
2097 phase = unfi._phasecache.phase
2097 phase = unfi._phasecache.phase
2098 rev = unfi.changelog.nodemap.get
2098 rev = unfi.changelog.nodemap.get
2099 public = phases.public
2099 public = phases.public
2100 draft = phases.draft
2100 draft = phases.draft
2101
2101
2102 # exclude changesets already public locally and update the others
2102 # exclude changesets already public locally and update the others
2103 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2103 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2104 if pheads:
2104 if pheads:
2105 tr = pullop.gettransaction()
2105 tr = pullop.gettransaction()
2106 phases.advanceboundary(pullop.repo, tr, public, pheads)
2106 phases.advanceboundary(pullop.repo, tr, public, pheads)
2107
2107
2108 # exclude changesets already draft locally and update the others
2108 # exclude changesets already draft locally and update the others
2109 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2109 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2110 if dheads:
2110 if dheads:
2111 tr = pullop.gettransaction()
2111 tr = pullop.gettransaction()
2112 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2112 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2113
2113
2114
2114
2115 def _pullbookmarks(pullop):
2115 def _pullbookmarks(pullop):
2116 """process the remote bookmark information to update the local one"""
2116 """process the remote bookmark information to update the local one"""
2117 if b'bookmarks' in pullop.stepsdone:
2117 if b'bookmarks' in pullop.stepsdone:
2118 return
2118 return
2119 pullop.stepsdone.add(b'bookmarks')
2119 pullop.stepsdone.add(b'bookmarks')
2120 repo = pullop.repo
2120 repo = pullop.repo
2121 remotebookmarks = pullop.remotebookmarks
2121 remotebookmarks = pullop.remotebookmarks
2122 bookmod.updatefromremote(
2122 bookmod.updatefromremote(
2123 repo.ui,
2123 repo.ui,
2124 repo,
2124 repo,
2125 remotebookmarks,
2125 remotebookmarks,
2126 pullop.remote.url(),
2126 pullop.remote.url(),
2127 pullop.gettransaction,
2127 pullop.gettransaction,
2128 explicit=pullop.explicitbookmarks,
2128 explicit=pullop.explicitbookmarks,
2129 )
2129 )
2130
2130
2131
2131
2132 def _pullobsolete(pullop):
2132 def _pullobsolete(pullop):
2133 """utility function to pull obsolete markers from a remote
2133 """utility function to pull obsolete markers from a remote
2134
2134
2135 The `gettransaction` is function that return the pull transaction, creating
2135 The `gettransaction` is function that return the pull transaction, creating
2136 one if necessary. We return the transaction to inform the calling code that
2136 one if necessary. We return the transaction to inform the calling code that
2137 a new transaction have been created (when applicable).
2137 a new transaction have been created (when applicable).
2138
2138
2139 Exists mostly to allow overriding for experimentation purpose"""
2139 Exists mostly to allow overriding for experimentation purpose"""
2140 if b'obsmarkers' in pullop.stepsdone:
2140 if b'obsmarkers' in pullop.stepsdone:
2141 return
2141 return
2142 pullop.stepsdone.add(b'obsmarkers')
2142 pullop.stepsdone.add(b'obsmarkers')
2143 tr = None
2143 tr = None
2144 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2144 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2145 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2145 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2146 remoteobs = listkeys(pullop.remote, b'obsolete')
2146 remoteobs = listkeys(pullop.remote, b'obsolete')
2147 if b'dump0' in remoteobs:
2147 if b'dump0' in remoteobs:
2148 tr = pullop.gettransaction()
2148 tr = pullop.gettransaction()
2149 markers = []
2149 markers = []
2150 for key in sorted(remoteobs, reverse=True):
2150 for key in sorted(remoteobs, reverse=True):
2151 if key.startswith(b'dump'):
2151 if key.startswith(b'dump'):
2152 data = util.b85decode(remoteobs[key])
2152 data = util.b85decode(remoteobs[key])
2153 version, newmarks = obsolete._readmarkers(data)
2153 version, newmarks = obsolete._readmarkers(data)
2154 markers += newmarks
2154 markers += newmarks
2155 if markers:
2155 if markers:
2156 pullop.repo.obsstore.add(tr, markers)
2156 pullop.repo.obsstore.add(tr, markers)
2157 pullop.repo.invalidatevolatilesets()
2157 pullop.repo.invalidatevolatilesets()
2158 return tr
2158 return tr
2159
2159
2160
2160
2161 def applynarrowacl(repo, kwargs):
2161 def applynarrowacl(repo, kwargs):
2162 """Apply narrow fetch access control.
2162 """Apply narrow fetch access control.
2163
2163
2164 This massages the named arguments for getbundle wire protocol commands
2164 This massages the named arguments for getbundle wire protocol commands
2165 so requested data is filtered through access control rules.
2165 so requested data is filtered through access control rules.
2166 """
2166 """
2167 ui = repo.ui
2167 ui = repo.ui
2168 # TODO this assumes existence of HTTP and is a layering violation.
2168 # TODO this assumes existence of HTTP and is a layering violation.
2169 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2169 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2170 user_includes = ui.configlist(
2170 user_includes = ui.configlist(
2171 _NARROWACL_SECTION,
2171 _NARROWACL_SECTION,
2172 username + b'.includes',
2172 username + b'.includes',
2173 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2173 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2174 )
2174 )
2175 user_excludes = ui.configlist(
2175 user_excludes = ui.configlist(
2176 _NARROWACL_SECTION,
2176 _NARROWACL_SECTION,
2177 username + b'.excludes',
2177 username + b'.excludes',
2178 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2178 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2179 )
2179 )
2180 if not user_includes:
2180 if not user_includes:
2181 raise error.Abort(
2181 raise error.Abort(
2182 _(b"{} configuration for user {} is empty").format(
2182 _(b"{} configuration for user {} is empty").format(
2183 _NARROWACL_SECTION, username
2183 _NARROWACL_SECTION, username
2184 )
2184 )
2185 )
2185 )
2186
2186
2187 user_includes = [
2187 user_includes = [
2188 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2188 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2189 ]
2189 ]
2190 user_excludes = [
2190 user_excludes = [
2191 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2191 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2192 ]
2192 ]
2193
2193
2194 req_includes = set(kwargs.get(r'includepats', []))
2194 req_includes = set(kwargs.get(r'includepats', []))
2195 req_excludes = set(kwargs.get(r'excludepats', []))
2195 req_excludes = set(kwargs.get(r'excludepats', []))
2196
2196
2197 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2197 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2198 req_includes, req_excludes, user_includes, user_excludes
2198 req_includes, req_excludes, user_includes, user_excludes
2199 )
2199 )
2200
2200
2201 if invalid_includes:
2201 if invalid_includes:
2202 raise error.Abort(
2202 raise error.Abort(
2203 _(b"The following includes are not accessible for {}: {}").format(
2203 _(b"The following includes are not accessible for {}: {}").format(
2204 username, invalid_includes
2204 username, invalid_includes
2205 )
2205 )
2206 )
2206 )
2207
2207
2208 new_args = {}
2208 new_args = {}
2209 new_args.update(kwargs)
2209 new_args.update(kwargs)
2210 new_args[r'narrow'] = True
2210 new_args[r'narrow'] = True
2211 new_args[r'narrow_acl'] = True
2211 new_args[r'narrow_acl'] = True
2212 new_args[r'includepats'] = req_includes
2212 new_args[r'includepats'] = req_includes
2213 if req_excludes:
2213 if req_excludes:
2214 new_args[r'excludepats'] = req_excludes
2214 new_args[r'excludepats'] = req_excludes
2215
2215
2216 return new_args
2216 return new_args
2217
2217
2218
2218
2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2219 def _computeellipsis(repo, common, heads, known, match, depth=None):
2220 """Compute the shape of a narrowed DAG.
2220 """Compute the shape of a narrowed DAG.
2221
2221
2222 Args:
2222 Args:
2223 repo: The repository we're transferring.
2223 repo: The repository we're transferring.
2224 common: The roots of the DAG range we're transferring.
2224 common: The roots of the DAG range we're transferring.
2225 May be just [nullid], which means all ancestors of heads.
2225 May be just [nullid], which means all ancestors of heads.
2226 heads: The heads of the DAG range we're transferring.
2226 heads: The heads of the DAG range we're transferring.
2227 match: The narrowmatcher that allows us to identify relevant changes.
2227 match: The narrowmatcher that allows us to identify relevant changes.
2228 depth: If not None, only consider nodes to be full nodes if they are at
2228 depth: If not None, only consider nodes to be full nodes if they are at
2229 most depth changesets away from one of heads.
2229 most depth changesets away from one of heads.
2230
2230
2231 Returns:
2231 Returns:
2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2232 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2233
2233
2234 visitnodes: The list of nodes (either full or ellipsis) which
2234 visitnodes: The list of nodes (either full or ellipsis) which
2235 need to be sent to the client.
2235 need to be sent to the client.
2236 relevant_nodes: The set of changelog nodes which change a file inside
2236 relevant_nodes: The set of changelog nodes which change a file inside
2237 the narrowspec. The client needs these as non-ellipsis nodes.
2237 the narrowspec. The client needs these as non-ellipsis nodes.
2238 ellipsisroots: A dict of {rev: parents} that is used in
2238 ellipsisroots: A dict of {rev: parents} that is used in
2239 narrowchangegroup to produce ellipsis nodes with the
2239 narrowchangegroup to produce ellipsis nodes with the
2240 correct parents.
2240 correct parents.
2241 """
2241 """
2242 cl = repo.changelog
2242 cl = repo.changelog
2243 mfl = repo.manifestlog
2243 mfl = repo.manifestlog
2244
2244
2245 clrev = cl.rev
2245 clrev = cl.rev
2246
2246
2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2247 commonrevs = {clrev(n) for n in common} | {nullrev}
2248 headsrevs = {clrev(n) for n in heads}
2248 headsrevs = {clrev(n) for n in heads}
2249
2249
2250 if depth:
2250 if depth:
2251 revdepth = {h: 0 for h in headsrevs}
2251 revdepth = {h: 0 for h in headsrevs}
2252
2252
2253 ellipsisheads = collections.defaultdict(set)
2253 ellipsisheads = collections.defaultdict(set)
2254 ellipsisroots = collections.defaultdict(set)
2254 ellipsisroots = collections.defaultdict(set)
2255
2255
2256 def addroot(head, curchange):
2256 def addroot(head, curchange):
2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2257 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2258 ellipsisroots[head].add(curchange)
2258 ellipsisroots[head].add(curchange)
2259 # Recursively split ellipsis heads with 3 roots by finding the
2259 # Recursively split ellipsis heads with 3 roots by finding the
2260 # roots' youngest common descendant which is an elided merge commit.
2260 # roots' youngest common descendant which is an elided merge commit.
2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2261 # That descendant takes 2 of the 3 roots as its own, and becomes a
2262 # root of the head.
2262 # root of the head.
2263 while len(ellipsisroots[head]) > 2:
2263 while len(ellipsisroots[head]) > 2:
2264 child, roots = splithead(head)
2264 child, roots = splithead(head)
2265 splitroots(head, child, roots)
2265 splitroots(head, child, roots)
2266 head = child # Recurse in case we just added a 3rd root
2266 head = child # Recurse in case we just added a 3rd root
2267
2267
2268 def splitroots(head, child, roots):
2268 def splitroots(head, child, roots):
2269 ellipsisroots[head].difference_update(roots)
2269 ellipsisroots[head].difference_update(roots)
2270 ellipsisroots[head].add(child)
2270 ellipsisroots[head].add(child)
2271 ellipsisroots[child].update(roots)
2271 ellipsisroots[child].update(roots)
2272 ellipsisroots[child].discard(child)
2272 ellipsisroots[child].discard(child)
2273
2273
2274 def splithead(head):
2274 def splithead(head):
2275 r1, r2, r3 = sorted(ellipsisroots[head])
2275 r1, r2, r3 = sorted(ellipsisroots[head])
2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2276 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2277 mid = repo.revs(
2277 mid = repo.revs(
2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2278 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2279 )
2279 )
2280 for j in mid:
2280 for j in mid:
2281 if j == nr2:
2281 if j == nr2:
2282 return nr2, (nr1, nr2)
2282 return nr2, (nr1, nr2)
2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2283 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2284 return j, (nr1, nr2)
2284 return j, (nr1, nr2)
2285 raise error.Abort(
2285 raise error.Abort(
2286 _(
2286 _(
2287 b'Failed to split up ellipsis node! head: %d, '
2287 b'Failed to split up ellipsis node! head: %d, '
2288 b'roots: %d %d %d'
2288 b'roots: %d %d %d'
2289 )
2289 )
2290 % (head, r1, r2, r3)
2290 % (head, r1, r2, r3)
2291 )
2291 )
2292
2292
2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2293 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2294 visit = reversed(missing)
2294 visit = reversed(missing)
2295 relevant_nodes = set()
2295 relevant_nodes = set()
2296 visitnodes = [cl.node(m) for m in missing]
2296 visitnodes = [cl.node(m) for m in missing]
2297 required = set(headsrevs) | known
2297 required = set(headsrevs) | known
2298 for rev in visit:
2298 for rev in visit:
2299 clrev = cl.changelogrevision(rev)
2299 clrev = cl.changelogrevision(rev)
2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2300 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2301 if depth is not None:
2301 if depth is not None:
2302 curdepth = revdepth[rev]
2302 curdepth = revdepth[rev]
2303 for p in ps:
2303 for p in ps:
2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2304 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2305 needed = False
2305 needed = False
2306 shallow_enough = depth is None or revdepth[rev] <= depth
2306 shallow_enough = depth is None or revdepth[rev] <= depth
2307 if shallow_enough:
2307 if shallow_enough:
2308 curmf = mfl[clrev.manifest].read()
2308 curmf = mfl[clrev.manifest].read()
2309 if ps:
2309 if ps:
2310 # We choose to not trust the changed files list in
2310 # We choose to not trust the changed files list in
2311 # changesets because it's not always correct. TODO: could
2311 # changesets because it's not always correct. TODO: could
2312 # we trust it for the non-merge case?
2312 # we trust it for the non-merge case?
2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2313 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2314 needed = bool(curmf.diff(p1mf, match))
2314 needed = bool(curmf.diff(p1mf, match))
2315 if not needed and len(ps) > 1:
2315 if not needed and len(ps) > 1:
2316 # For merge changes, the list of changed files is not
2316 # For merge changes, the list of changed files is not
2317 # helpful, since we need to emit the merge if a file
2317 # helpful, since we need to emit the merge if a file
2318 # in the narrow spec has changed on either side of the
2318 # in the narrow spec has changed on either side of the
2319 # merge. As a result, we do a manifest diff to check.
2319 # merge. As a result, we do a manifest diff to check.
2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2320 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2321 needed = bool(curmf.diff(p2mf, match))
2321 needed = bool(curmf.diff(p2mf, match))
2322 else:
2322 else:
2323 # For a root node, we need to include the node if any
2323 # For a root node, we need to include the node if any
2324 # files in the node match the narrowspec.
2324 # files in the node match the narrowspec.
2325 needed = any(curmf.walk(match))
2325 needed = any(curmf.walk(match))
2326
2326
2327 if needed:
2327 if needed:
2328 for head in ellipsisheads[rev]:
2328 for head in ellipsisheads[rev]:
2329 addroot(head, rev)
2329 addroot(head, rev)
2330 for p in ps:
2330 for p in ps:
2331 required.add(p)
2331 required.add(p)
2332 relevant_nodes.add(cl.node(rev))
2332 relevant_nodes.add(cl.node(rev))
2333 else:
2333 else:
2334 if not ps:
2334 if not ps:
2335 ps = [nullrev]
2335 ps = [nullrev]
2336 if rev in required:
2336 if rev in required:
2337 for head in ellipsisheads[rev]:
2337 for head in ellipsisheads[rev]:
2338 addroot(head, rev)
2338 addroot(head, rev)
2339 for p in ps:
2339 for p in ps:
2340 ellipsisheads[p].add(rev)
2340 ellipsisheads[p].add(rev)
2341 else:
2341 else:
2342 for p in ps:
2342 for p in ps:
2343 ellipsisheads[p] |= ellipsisheads[rev]
2343 ellipsisheads[p] |= ellipsisheads[rev]
2344
2344
2345 # add common changesets as roots of their reachable ellipsis heads
2345 # add common changesets as roots of their reachable ellipsis heads
2346 for c in commonrevs:
2346 for c in commonrevs:
2347 for head in ellipsisheads[c]:
2347 for head in ellipsisheads[c]:
2348 addroot(head, c)
2348 addroot(head, c)
2349 return visitnodes, relevant_nodes, ellipsisroots
2349 return visitnodes, relevant_nodes, ellipsisroots
2350
2350
2351
2351
2352 def caps20to10(repo, role):
2352 def caps20to10(repo, role):
2353 """return a set with appropriate options to use bundle20 during getbundle"""
2353 """return a set with appropriate options to use bundle20 during getbundle"""
2354 caps = {b'HG20'}
2354 caps = {b'HG20'}
2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2355 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2356 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2357 return caps
2357 return caps
2358
2358
2359
2359
2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2360 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2361 getbundle2partsorder = []
2361 getbundle2partsorder = []
2362
2362
2363 # Mapping between step name and function
2363 # Mapping between step name and function
2364 #
2364 #
2365 # This exists to help extensions wrap steps if necessary
2365 # This exists to help extensions wrap steps if necessary
2366 getbundle2partsmapping = {}
2366 getbundle2partsmapping = {}
2367
2367
2368
2368
2369 def getbundle2partsgenerator(stepname, idx=None):
2369 def getbundle2partsgenerator(stepname, idx=None):
2370 """decorator for function generating bundle2 part for getbundle
2370 """decorator for function generating bundle2 part for getbundle
2371
2371
2372 The function is added to the step -> function mapping and appended to the
2372 The function is added to the step -> function mapping and appended to the
2373 list of steps. Beware that decorated functions will be added in order
2373 list of steps. Beware that decorated functions will be added in order
2374 (this may matter).
2374 (this may matter).
2375
2375
2376 You can only use this decorator for new steps, if you want to wrap a step
2376 You can only use this decorator for new steps, if you want to wrap a step
2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2377 from an extension, attack the getbundle2partsmapping dictionary directly."""
2378
2378
2379 def dec(func):
2379 def dec(func):
2380 assert stepname not in getbundle2partsmapping
2380 assert stepname not in getbundle2partsmapping
2381 getbundle2partsmapping[stepname] = func
2381 getbundle2partsmapping[stepname] = func
2382 if idx is None:
2382 if idx is None:
2383 getbundle2partsorder.append(stepname)
2383 getbundle2partsorder.append(stepname)
2384 else:
2384 else:
2385 getbundle2partsorder.insert(idx, stepname)
2385 getbundle2partsorder.insert(idx, stepname)
2386 return func
2386 return func
2387
2387
2388 return dec
2388 return dec
2389
2389
2390
2390
2391 def bundle2requested(bundlecaps):
2391 def bundle2requested(bundlecaps):
2392 if bundlecaps is not None:
2392 if bundlecaps is not None:
2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2393 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2394 return False
2394 return False
2395
2395
2396
2396
2397 def getbundlechunks(
2397 def getbundlechunks(
2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2398 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2399 ):
2399 ):
2400 """Return chunks constituting a bundle's raw data.
2400 """Return chunks constituting a bundle's raw data.
2401
2401
2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2402 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2403 passed.
2403 passed.
2404
2404
2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2405 Returns a 2-tuple of a dict with metadata about the generated bundle
2406 and an iterator over raw chunks (of varying sizes).
2406 and an iterator over raw chunks (of varying sizes).
2407 """
2407 """
2408 kwargs = pycompat.byteskwargs(kwargs)
2408 kwargs = pycompat.byteskwargs(kwargs)
2409 info = {}
2409 info = {}
2410 usebundle2 = bundle2requested(bundlecaps)
2410 usebundle2 = bundle2requested(bundlecaps)
2411 # bundle10 case
2411 # bundle10 case
2412 if not usebundle2:
2412 if not usebundle2:
2413 if bundlecaps and not kwargs.get(b'cg', True):
2413 if bundlecaps and not kwargs.get(b'cg', True):
2414 raise ValueError(
2414 raise ValueError(
2415 _(b'request for bundle10 must include changegroup')
2415 _(b'request for bundle10 must include changegroup')
2416 )
2416 )
2417
2417
2418 if kwargs:
2418 if kwargs:
2419 raise ValueError(
2419 raise ValueError(
2420 _(b'unsupported getbundle arguments: %s')
2420 _(b'unsupported getbundle arguments: %s')
2421 % b', '.join(sorted(kwargs.keys()))
2421 % b', '.join(sorted(kwargs.keys()))
2422 )
2422 )
2423 outgoing = _computeoutgoing(repo, heads, common)
2423 outgoing = _computeoutgoing(repo, heads, common)
2424 info[b'bundleversion'] = 1
2424 info[b'bundleversion'] = 1
2425 return (
2425 return (
2426 info,
2426 info,
2427 changegroup.makestream(
2427 changegroup.makestream(
2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2428 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2429 ),
2429 ),
2430 )
2430 )
2431
2431
2432 # bundle20 case
2432 # bundle20 case
2433 info[b'bundleversion'] = 2
2433 info[b'bundleversion'] = 2
2434 b2caps = {}
2434 b2caps = {}
2435 for bcaps in bundlecaps:
2435 for bcaps in bundlecaps:
2436 if bcaps.startswith(b'bundle2='):
2436 if bcaps.startswith(b'bundle2='):
2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2437 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2438 b2caps.update(bundle2.decodecaps(blob))
2438 b2caps.update(bundle2.decodecaps(blob))
2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2439 bundler = bundle2.bundle20(repo.ui, b2caps)
2440
2440
2441 kwargs[b'heads'] = heads
2441 kwargs[b'heads'] = heads
2442 kwargs[b'common'] = common
2442 kwargs[b'common'] = common
2443
2443
2444 for name in getbundle2partsorder:
2444 for name in getbundle2partsorder:
2445 func = getbundle2partsmapping[name]
2445 func = getbundle2partsmapping[name]
2446 func(
2446 func(
2447 bundler,
2447 bundler,
2448 repo,
2448 repo,
2449 source,
2449 source,
2450 bundlecaps=bundlecaps,
2450 bundlecaps=bundlecaps,
2451 b2caps=b2caps,
2451 b2caps=b2caps,
2452 **pycompat.strkwargs(kwargs)
2452 **pycompat.strkwargs(kwargs)
2453 )
2453 )
2454
2454
2455 info[b'prefercompressed'] = bundler.prefercompressed
2455 info[b'prefercompressed'] = bundler.prefercompressed
2456
2456
2457 return info, bundler.getchunks()
2457 return info, bundler.getchunks()
2458
2458
2459
2459
2460 @getbundle2partsgenerator(b'stream2')
2460 @getbundle2partsgenerator(b'stream2')
2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2461 def _getbundlestream2(bundler, repo, *args, **kwargs):
2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2462 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2463
2463
2464
2464
2465 @getbundle2partsgenerator(b'changegroup')
2465 @getbundle2partsgenerator(b'changegroup')
2466 def _getbundlechangegrouppart(
2466 def _getbundlechangegrouppart(
2467 bundler,
2467 bundler,
2468 repo,
2468 repo,
2469 source,
2469 source,
2470 bundlecaps=None,
2470 bundlecaps=None,
2471 b2caps=None,
2471 b2caps=None,
2472 heads=None,
2472 heads=None,
2473 common=None,
2473 common=None,
2474 **kwargs
2474 **kwargs
2475 ):
2475 ):
2476 """add a changegroup part to the requested bundle"""
2476 """add a changegroup part to the requested bundle"""
2477 if not kwargs.get(r'cg', True):
2477 if not kwargs.get(r'cg', True):
2478 return
2478 return
2479
2479
2480 version = b'01'
2480 version = b'01'
2481 cgversions = b2caps.get(b'changegroup')
2481 cgversions = b2caps.get(b'changegroup')
2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2482 if cgversions: # 3.1 and 3.2 ship with an empty value
2483 cgversions = [
2483 cgversions = [
2484 v
2484 v
2485 for v in cgversions
2485 for v in cgversions
2486 if v in changegroup.supportedoutgoingversions(repo)
2486 if v in changegroup.supportedoutgoingversions(repo)
2487 ]
2487 ]
2488 if not cgversions:
2488 if not cgversions:
2489 raise error.Abort(_(b'no common changegroup version'))
2489 raise error.Abort(_(b'no common changegroup version'))
2490 version = max(cgversions)
2490 version = max(cgversions)
2491
2491
2492 outgoing = _computeoutgoing(repo, heads, common)
2492 outgoing = _computeoutgoing(repo, heads, common)
2493 if not outgoing.missing:
2493 if not outgoing.missing:
2494 return
2494 return
2495
2495
2496 if kwargs.get(r'narrow', False):
2496 if kwargs.get(r'narrow', False):
2497 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2497 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2498 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2498 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2499 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2500 else:
2500 else:
2501 matcher = None
2501 matcher = None
2502
2502
2503 cgstream = changegroup.makestream(
2503 cgstream = changegroup.makestream(
2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2504 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2505 )
2505 )
2506
2506
2507 part = bundler.newpart(b'changegroup', data=cgstream)
2507 part = bundler.newpart(b'changegroup', data=cgstream)
2508 if cgversions:
2508 if cgversions:
2509 part.addparam(b'version', version)
2509 part.addparam(b'version', version)
2510
2510
2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2511 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2512
2512
2513 if b'treemanifest' in repo.requirements:
2513 if b'treemanifest' in repo.requirements:
2514 part.addparam(b'treemanifest', b'1')
2514 part.addparam(b'treemanifest', b'1')
2515
2515
2516 if b'exp-sidedata-flag' in repo.requirements:
2516 if b'exp-sidedata-flag' in repo.requirements:
2517 part.addparam(b'exp-sidedata', b'1')
2517 part.addparam(b'exp-sidedata', b'1')
2518
2518
2519 if (
2519 if (
2520 kwargs.get(r'narrow', False)
2520 kwargs.get(r'narrow', False)
2521 and kwargs.get(r'narrow_acl', False)
2521 and kwargs.get(r'narrow_acl', False)
2522 and (include or exclude)
2522 and (include or exclude)
2523 ):
2523 ):
2524 # this is mandatory because otherwise ACL clients won't work
2524 # this is mandatory because otherwise ACL clients won't work
2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2525 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2526 narrowspecpart.data = b'%s\0%s' % (
2526 narrowspecpart.data = b'%s\0%s' % (
2527 b'\n'.join(include),
2527 b'\n'.join(include),
2528 b'\n'.join(exclude),
2528 b'\n'.join(exclude),
2529 )
2529 )
2530
2530
2531
2531
2532 @getbundle2partsgenerator(b'bookmarks')
2532 @getbundle2partsgenerator(b'bookmarks')
2533 def _getbundlebookmarkpart(
2533 def _getbundlebookmarkpart(
2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2534 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2535 ):
2535 ):
2536 """add a bookmark part to the requested bundle"""
2536 """add a bookmark part to the requested bundle"""
2537 if not kwargs.get(r'bookmarks', False):
2537 if not kwargs.get(r'bookmarks', False):
2538 return
2538 return
2539 if b'bookmarks' not in b2caps:
2539 if b'bookmarks' not in b2caps:
2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2540 raise error.Abort(_(b'no common bookmarks exchange method'))
2541 books = bookmod.listbinbookmarks(repo)
2541 books = bookmod.listbinbookmarks(repo)
2542 data = bookmod.binaryencode(books)
2542 data = bookmod.binaryencode(books)
2543 if data:
2543 if data:
2544 bundler.newpart(b'bookmarks', data=data)
2544 bundler.newpart(b'bookmarks', data=data)
2545
2545
2546
2546
2547 @getbundle2partsgenerator(b'listkeys')
2547 @getbundle2partsgenerator(b'listkeys')
2548 def _getbundlelistkeysparts(
2548 def _getbundlelistkeysparts(
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 ):
2550 ):
2551 """add parts containing listkeys namespaces to the requested bundle"""
2551 """add parts containing listkeys namespaces to the requested bundle"""
2552 listkeys = kwargs.get(r'listkeys', ())
2552 listkeys = kwargs.get(r'listkeys', ())
2553 for namespace in listkeys:
2553 for namespace in listkeys:
2554 part = bundler.newpart(b'listkeys')
2554 part = bundler.newpart(b'listkeys')
2555 part.addparam(b'namespace', namespace)
2555 part.addparam(b'namespace', namespace)
2556 keys = repo.listkeys(namespace).items()
2556 keys = repo.listkeys(namespace).items()
2557 part.data = pushkey.encodekeys(keys)
2557 part.data = pushkey.encodekeys(keys)
2558
2558
2559
2559
2560 @getbundle2partsgenerator(b'obsmarkers')
2560 @getbundle2partsgenerator(b'obsmarkers')
2561 def _getbundleobsmarkerpart(
2561 def _getbundleobsmarkerpart(
2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2562 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2563 ):
2563 ):
2564 """add an obsolescence markers part to the requested bundle"""
2564 """add an obsolescence markers part to the requested bundle"""
2565 if kwargs.get(r'obsmarkers', False):
2565 if kwargs.get(r'obsmarkers', False):
2566 if heads is None:
2566 if heads is None:
2567 heads = repo.heads()
2567 heads = repo.heads()
2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2568 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2569 markers = repo.obsstore.relevantmarkers(subset)
2569 markers = repo.obsstore.relevantmarkers(subset)
2570 markers = sorted(markers)
2570 # last item of marker tuple ('parents') may be None or a tuple
2571 markers = sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
2571 bundle2.buildobsmarkerspart(bundler, markers)
2572 bundle2.buildobsmarkerspart(bundler, markers)
2572
2573
2573
2574
2574 @getbundle2partsgenerator(b'phases')
2575 @getbundle2partsgenerator(b'phases')
2575 def _getbundlephasespart(
2576 def _getbundlephasespart(
2576 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 ):
2578 ):
2578 """add phase heads part to the requested bundle"""
2579 """add phase heads part to the requested bundle"""
2579 if kwargs.get(r'phases', False):
2580 if kwargs.get(r'phases', False):
2580 if not b'heads' in b2caps.get(b'phases'):
2581 if not b'heads' in b2caps.get(b'phases'):
2581 raise error.Abort(_(b'no common phases exchange method'))
2582 raise error.Abort(_(b'no common phases exchange method'))
2582 if heads is None:
2583 if heads is None:
2583 heads = repo.heads()
2584 heads = repo.heads()
2584
2585
2585 headsbyphase = collections.defaultdict(set)
2586 headsbyphase = collections.defaultdict(set)
2586 if repo.publishing():
2587 if repo.publishing():
2587 headsbyphase[phases.public] = heads
2588 headsbyphase[phases.public] = heads
2588 else:
2589 else:
2589 # find the appropriate heads to move
2590 # find the appropriate heads to move
2590
2591
2591 phase = repo._phasecache.phase
2592 phase = repo._phasecache.phase
2592 node = repo.changelog.node
2593 node = repo.changelog.node
2593 rev = repo.changelog.rev
2594 rev = repo.changelog.rev
2594 for h in heads:
2595 for h in heads:
2595 headsbyphase[phase(repo, rev(h))].add(h)
2596 headsbyphase[phase(repo, rev(h))].add(h)
2596 seenphases = list(headsbyphase.keys())
2597 seenphases = list(headsbyphase.keys())
2597
2598
2598 # We do not handle anything but public and draft phase for now)
2599 # We do not handle anything but public and draft phase for now)
2599 if seenphases:
2600 if seenphases:
2600 assert max(seenphases) <= phases.draft
2601 assert max(seenphases) <= phases.draft
2601
2602
2602 # if client is pulling non-public changesets, we need to find
2603 # if client is pulling non-public changesets, we need to find
2603 # intermediate public heads.
2604 # intermediate public heads.
2604 draftheads = headsbyphase.get(phases.draft, set())
2605 draftheads = headsbyphase.get(phases.draft, set())
2605 if draftheads:
2606 if draftheads:
2606 publicheads = headsbyphase.get(phases.public, set())
2607 publicheads = headsbyphase.get(phases.public, set())
2607
2608
2608 revset = b'heads(only(%ln, %ln) and public())'
2609 revset = b'heads(only(%ln, %ln) and public())'
2609 extraheads = repo.revs(revset, draftheads, publicheads)
2610 extraheads = repo.revs(revset, draftheads, publicheads)
2610 for r in extraheads:
2611 for r in extraheads:
2611 headsbyphase[phases.public].add(node(r))
2612 headsbyphase[phases.public].add(node(r))
2612
2613
2613 # transform data in a format used by the encoding function
2614 # transform data in a format used by the encoding function
2614 phasemapping = []
2615 phasemapping = []
2615 for phase in phases.allphases:
2616 for phase in phases.allphases:
2616 phasemapping.append(sorted(headsbyphase[phase]))
2617 phasemapping.append(sorted(headsbyphase[phase]))
2617
2618
2618 # generate the actual part
2619 # generate the actual part
2619 phasedata = phases.binaryencode(phasemapping)
2620 phasedata = phases.binaryencode(phasemapping)
2620 bundler.newpart(b'phase-heads', data=phasedata)
2621 bundler.newpart(b'phase-heads', data=phasedata)
2621
2622
2622
2623
2623 @getbundle2partsgenerator(b'hgtagsfnodes')
2624 @getbundle2partsgenerator(b'hgtagsfnodes')
2624 def _getbundletagsfnodes(
2625 def _getbundletagsfnodes(
2625 bundler,
2626 bundler,
2626 repo,
2627 repo,
2627 source,
2628 source,
2628 bundlecaps=None,
2629 bundlecaps=None,
2629 b2caps=None,
2630 b2caps=None,
2630 heads=None,
2631 heads=None,
2631 common=None,
2632 common=None,
2632 **kwargs
2633 **kwargs
2633 ):
2634 ):
2634 """Transfer the .hgtags filenodes mapping.
2635 """Transfer the .hgtags filenodes mapping.
2635
2636
2636 Only values for heads in this bundle will be transferred.
2637 Only values for heads in this bundle will be transferred.
2637
2638
2638 The part data consists of pairs of 20 byte changeset node and .hgtags
2639 The part data consists of pairs of 20 byte changeset node and .hgtags
2639 filenodes raw values.
2640 filenodes raw values.
2640 """
2641 """
2641 # Don't send unless:
2642 # Don't send unless:
2642 # - changeset are being exchanged,
2643 # - changeset are being exchanged,
2643 # - the client supports it.
2644 # - the client supports it.
2644 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2645 if not (kwargs.get(r'cg', True) and b'hgtagsfnodes' in b2caps):
2645 return
2646 return
2646
2647
2647 outgoing = _computeoutgoing(repo, heads, common)
2648 outgoing = _computeoutgoing(repo, heads, common)
2648 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2649 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2649
2650
2650
2651
2651 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2652 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2652 def _getbundlerevbranchcache(
2653 def _getbundlerevbranchcache(
2653 bundler,
2654 bundler,
2654 repo,
2655 repo,
2655 source,
2656 source,
2656 bundlecaps=None,
2657 bundlecaps=None,
2657 b2caps=None,
2658 b2caps=None,
2658 heads=None,
2659 heads=None,
2659 common=None,
2660 common=None,
2660 **kwargs
2661 **kwargs
2661 ):
2662 ):
2662 """Transfer the rev-branch-cache mapping
2663 """Transfer the rev-branch-cache mapping
2663
2664
2664 The payload is a series of data related to each branch
2665 The payload is a series of data related to each branch
2665
2666
2666 1) branch name length
2667 1) branch name length
2667 2) number of open heads
2668 2) number of open heads
2668 3) number of closed heads
2669 3) number of closed heads
2669 4) open heads nodes
2670 4) open heads nodes
2670 5) closed heads nodes
2671 5) closed heads nodes
2671 """
2672 """
2672 # Don't send unless:
2673 # Don't send unless:
2673 # - changeset are being exchanged,
2674 # - changeset are being exchanged,
2674 # - the client supports it.
2675 # - the client supports it.
2675 # - narrow bundle isn't in play (not currently compatible).
2676 # - narrow bundle isn't in play (not currently compatible).
2676 if (
2677 if (
2677 not kwargs.get(r'cg', True)
2678 not kwargs.get(r'cg', True)
2678 or b'rev-branch-cache' not in b2caps
2679 or b'rev-branch-cache' not in b2caps
2679 or kwargs.get(r'narrow', False)
2680 or kwargs.get(r'narrow', False)
2680 or repo.ui.has_section(_NARROWACL_SECTION)
2681 or repo.ui.has_section(_NARROWACL_SECTION)
2681 ):
2682 ):
2682 return
2683 return
2683
2684
2684 outgoing = _computeoutgoing(repo, heads, common)
2685 outgoing = _computeoutgoing(repo, heads, common)
2685 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2686
2687
2687
2688
2688 def check_heads(repo, their_heads, context):
2689 def check_heads(repo, their_heads, context):
2689 """check if the heads of a repo have been modified
2690 """check if the heads of a repo have been modified
2690
2691
2691 Used by peer for unbundling.
2692 Used by peer for unbundling.
2692 """
2693 """
2693 heads = repo.heads()
2694 heads = repo.heads()
2694 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2695 if not (
2696 if not (
2696 their_heads == [b'force']
2697 their_heads == [b'force']
2697 or their_heads == heads
2698 or their_heads == heads
2698 or their_heads == [b'hashed', heads_hash]
2699 or their_heads == [b'hashed', heads_hash]
2699 ):
2700 ):
2700 # someone else committed/pushed/unbundled while we
2701 # someone else committed/pushed/unbundled while we
2701 # were transferring data
2702 # were transferring data
2702 raise error.PushRaced(
2703 raise error.PushRaced(
2703 b'repository changed while %s - please try again' % context
2704 b'repository changed while %s - please try again' % context
2704 )
2705 )
2705
2706
2706
2707
2707 def unbundle(repo, cg, heads, source, url):
2708 def unbundle(repo, cg, heads, source, url):
2708 """Apply a bundle to a repo.
2709 """Apply a bundle to a repo.
2709
2710
2710 this function makes sure the repo is locked during the application and have
2711 this function makes sure the repo is locked during the application and have
2711 mechanism to check that no push race occurred between the creation of the
2712 mechanism to check that no push race occurred between the creation of the
2712 bundle and its application.
2713 bundle and its application.
2713
2714
2714 If the push was raced as PushRaced exception is raised."""
2715 If the push was raced as PushRaced exception is raised."""
2715 r = 0
2716 r = 0
2716 # need a transaction when processing a bundle2 stream
2717 # need a transaction when processing a bundle2 stream
2717 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2718 lockandtr = [None, None, None]
2719 lockandtr = [None, None, None]
2719 recordout = None
2720 recordout = None
2720 # quick fix for output mismatch with bundle2 in 3.4
2721 # quick fix for output mismatch with bundle2 in 3.4
2721 captureoutput = repo.ui.configbool(
2722 captureoutput = repo.ui.configbool(
2722 b'experimental', b'bundle2-output-capture'
2723 b'experimental', b'bundle2-output-capture'
2723 )
2724 )
2724 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2725 captureoutput = True
2726 captureoutput = True
2726 try:
2727 try:
2727 # note: outside bundle1, 'heads' is expected to be empty and this
2728 # note: outside bundle1, 'heads' is expected to be empty and this
2728 # 'check_heads' call wil be a no-op
2729 # 'check_heads' call wil be a no-op
2729 check_heads(repo, heads, b'uploading changes')
2730 check_heads(repo, heads, b'uploading changes')
2730 # push can proceed
2731 # push can proceed
2731 if not isinstance(cg, bundle2.unbundle20):
2732 if not isinstance(cg, bundle2.unbundle20):
2732 # legacy case: bundle1 (changegroup 01)
2733 # legacy case: bundle1 (changegroup 01)
2733 txnname = b"\n".join([source, util.hidepassword(url)])
2734 txnname = b"\n".join([source, util.hidepassword(url)])
2734 with repo.lock(), repo.transaction(txnname) as tr:
2735 with repo.lock(), repo.transaction(txnname) as tr:
2735 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 op = bundle2.applybundle(repo, cg, tr, source, url)
2736 r = bundle2.combinechangegroupresults(op)
2737 r = bundle2.combinechangegroupresults(op)
2737 else:
2738 else:
2738 r = None
2739 r = None
2739 try:
2740 try:
2740
2741
2741 def gettransaction():
2742 def gettransaction():
2742 if not lockandtr[2]:
2743 if not lockandtr[2]:
2743 if not bookmod.bookmarksinstore(repo):
2744 if not bookmod.bookmarksinstore(repo):
2744 lockandtr[0] = repo.wlock()
2745 lockandtr[0] = repo.wlock()
2745 lockandtr[1] = repo.lock()
2746 lockandtr[1] = repo.lock()
2746 lockandtr[2] = repo.transaction(source)
2747 lockandtr[2] = repo.transaction(source)
2747 lockandtr[2].hookargs[b'source'] = source
2748 lockandtr[2].hookargs[b'source'] = source
2748 lockandtr[2].hookargs[b'url'] = url
2749 lockandtr[2].hookargs[b'url'] = url
2749 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 lockandtr[2].hookargs[b'bundle2'] = b'1'
2750 return lockandtr[2]
2751 return lockandtr[2]
2751
2752
2752 # Do greedy locking by default until we're satisfied with lazy
2753 # Do greedy locking by default until we're satisfied with lazy
2753 # locking.
2754 # locking.
2754 if not repo.ui.configbool(
2755 if not repo.ui.configbool(
2755 b'experimental', b'bundle2lazylocking'
2756 b'experimental', b'bundle2lazylocking'
2756 ):
2757 ):
2757 gettransaction()
2758 gettransaction()
2758
2759
2759 op = bundle2.bundleoperation(
2760 op = bundle2.bundleoperation(
2760 repo,
2761 repo,
2761 gettransaction,
2762 gettransaction,
2762 captureoutput=captureoutput,
2763 captureoutput=captureoutput,
2763 source=b'push',
2764 source=b'push',
2764 )
2765 )
2765 try:
2766 try:
2766 op = bundle2.processbundle(repo, cg, op=op)
2767 op = bundle2.processbundle(repo, cg, op=op)
2767 finally:
2768 finally:
2768 r = op.reply
2769 r = op.reply
2769 if captureoutput and r is not None:
2770 if captureoutput and r is not None:
2770 repo.ui.pushbuffer(error=True, subproc=True)
2771 repo.ui.pushbuffer(error=True, subproc=True)
2771
2772
2772 def recordout(output):
2773 def recordout(output):
2773 r.newpart(b'output', data=output, mandatory=False)
2774 r.newpart(b'output', data=output, mandatory=False)
2774
2775
2775 if lockandtr[2] is not None:
2776 if lockandtr[2] is not None:
2776 lockandtr[2].close()
2777 lockandtr[2].close()
2777 except BaseException as exc:
2778 except BaseException as exc:
2778 exc.duringunbundle2 = True
2779 exc.duringunbundle2 = True
2779 if captureoutput and r is not None:
2780 if captureoutput and r is not None:
2780 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2781
2782
2782 def recordout(output):
2783 def recordout(output):
2783 part = bundle2.bundlepart(
2784 part = bundle2.bundlepart(
2784 b'output', data=output, mandatory=False
2785 b'output', data=output, mandatory=False
2785 )
2786 )
2786 parts.append(part)
2787 parts.append(part)
2787
2788
2788 raise
2789 raise
2789 finally:
2790 finally:
2790 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2791 if recordout is not None:
2792 if recordout is not None:
2792 recordout(repo.ui.popbuffer())
2793 recordout(repo.ui.popbuffer())
2793 return r
2794 return r
2794
2795
2795
2796
2796 def _maybeapplyclonebundle(pullop):
2797 def _maybeapplyclonebundle(pullop):
2797 """Apply a clone bundle from a remote, if possible."""
2798 """Apply a clone bundle from a remote, if possible."""
2798
2799
2799 repo = pullop.repo
2800 repo = pullop.repo
2800 remote = pullop.remote
2801 remote = pullop.remote
2801
2802
2802 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 if not repo.ui.configbool(b'ui', b'clonebundles'):
2803 return
2804 return
2804
2805
2805 # Only run if local repo is empty.
2806 # Only run if local repo is empty.
2806 if len(repo):
2807 if len(repo):
2807 return
2808 return
2808
2809
2809 if pullop.heads:
2810 if pullop.heads:
2810 return
2811 return
2811
2812
2812 if not remote.capable(b'clonebundles'):
2813 if not remote.capable(b'clonebundles'):
2813 return
2814 return
2814
2815
2815 with remote.commandexecutor() as e:
2816 with remote.commandexecutor() as e:
2816 res = e.callcommand(b'clonebundles', {}).result()
2817 res = e.callcommand(b'clonebundles', {}).result()
2817
2818
2818 # If we call the wire protocol command, that's good enough to record the
2819 # If we call the wire protocol command, that's good enough to record the
2819 # attempt.
2820 # attempt.
2820 pullop.clonebundleattempted = True
2821 pullop.clonebundleattempted = True
2821
2822
2822 entries = parseclonebundlesmanifest(repo, res)
2823 entries = parseclonebundlesmanifest(repo, res)
2823 if not entries:
2824 if not entries:
2824 repo.ui.note(
2825 repo.ui.note(
2825 _(
2826 _(
2826 b'no clone bundles available on remote; '
2827 b'no clone bundles available on remote; '
2827 b'falling back to regular clone\n'
2828 b'falling back to regular clone\n'
2828 )
2829 )
2829 )
2830 )
2830 return
2831 return
2831
2832
2832 entries = filterclonebundleentries(
2833 entries = filterclonebundleentries(
2833 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 repo, entries, streamclonerequested=pullop.streamclonerequested
2834 )
2835 )
2835
2836
2836 if not entries:
2837 if not entries:
2837 # There is a thundering herd concern here. However, if a server
2838 # There is a thundering herd concern here. However, if a server
2838 # operator doesn't advertise bundles appropriate for its clients,
2839 # operator doesn't advertise bundles appropriate for its clients,
2839 # they deserve what's coming. Furthermore, from a client's
2840 # they deserve what's coming. Furthermore, from a client's
2840 # perspective, no automatic fallback would mean not being able to
2841 # perspective, no automatic fallback would mean not being able to
2841 # clone!
2842 # clone!
2842 repo.ui.warn(
2843 repo.ui.warn(
2843 _(
2844 _(
2844 b'no compatible clone bundles available on server; '
2845 b'no compatible clone bundles available on server; '
2845 b'falling back to regular clone\n'
2846 b'falling back to regular clone\n'
2846 )
2847 )
2847 )
2848 )
2848 repo.ui.warn(
2849 repo.ui.warn(
2849 _(b'(you may want to report this to the server operator)\n')
2850 _(b'(you may want to report this to the server operator)\n')
2850 )
2851 )
2851 return
2852 return
2852
2853
2853 entries = sortclonebundleentries(repo.ui, entries)
2854 entries = sortclonebundleentries(repo.ui, entries)
2854
2855
2855 url = entries[0][b'URL']
2856 url = entries[0][b'URL']
2856 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2857 if trypullbundlefromurl(repo.ui, repo, url):
2858 if trypullbundlefromurl(repo.ui, repo, url):
2858 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 repo.ui.status(_(b'finished applying clone bundle\n'))
2859 # Bundle failed.
2860 # Bundle failed.
2860 #
2861 #
2861 # We abort by default to avoid the thundering herd of
2862 # We abort by default to avoid the thundering herd of
2862 # clients flooding a server that was expecting expensive
2863 # clients flooding a server that was expecting expensive
2863 # clone load to be offloaded.
2864 # clone load to be offloaded.
2864 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2865 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 repo.ui.warn(_(b'falling back to normal clone\n'))
2866 else:
2867 else:
2867 raise error.Abort(
2868 raise error.Abort(
2868 _(b'error applying bundle'),
2869 _(b'error applying bundle'),
2869 hint=_(
2870 hint=_(
2870 b'if this error persists, consider contacting '
2871 b'if this error persists, consider contacting '
2871 b'the server operator or disable clone '
2872 b'the server operator or disable clone '
2872 b'bundles via '
2873 b'bundles via '
2873 b'"--config ui.clonebundles=false"'
2874 b'"--config ui.clonebundles=false"'
2874 ),
2875 ),
2875 )
2876 )
2876
2877
2877
2878
2878 def parseclonebundlesmanifest(repo, s):
2879 def parseclonebundlesmanifest(repo, s):
2879 """Parses the raw text of a clone bundles manifest.
2880 """Parses the raw text of a clone bundles manifest.
2880
2881
2881 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2882 to the URL and other keys are the attributes for the entry.
2883 to the URL and other keys are the attributes for the entry.
2883 """
2884 """
2884 m = []
2885 m = []
2885 for line in s.splitlines():
2886 for line in s.splitlines():
2886 fields = line.split()
2887 fields = line.split()
2887 if not fields:
2888 if not fields:
2888 continue
2889 continue
2889 attrs = {b'URL': fields[0]}
2890 attrs = {b'URL': fields[0]}
2890 for rawattr in fields[1:]:
2891 for rawattr in fields[1:]:
2891 key, value = rawattr.split(b'=', 1)
2892 key, value = rawattr.split(b'=', 1)
2892 key = urlreq.unquote(key)
2893 key = urlreq.unquote(key)
2893 value = urlreq.unquote(value)
2894 value = urlreq.unquote(value)
2894 attrs[key] = value
2895 attrs[key] = value
2895
2896
2896 # Parse BUNDLESPEC into components. This makes client-side
2897 # Parse BUNDLESPEC into components. This makes client-side
2897 # preferences easier to specify since you can prefer a single
2898 # preferences easier to specify since you can prefer a single
2898 # component of the BUNDLESPEC.
2899 # component of the BUNDLESPEC.
2899 if key == b'BUNDLESPEC':
2900 if key == b'BUNDLESPEC':
2900 try:
2901 try:
2901 bundlespec = parsebundlespec(repo, value)
2902 bundlespec = parsebundlespec(repo, value)
2902 attrs[b'COMPRESSION'] = bundlespec.compression
2903 attrs[b'COMPRESSION'] = bundlespec.compression
2903 attrs[b'VERSION'] = bundlespec.version
2904 attrs[b'VERSION'] = bundlespec.version
2904 except error.InvalidBundleSpecification:
2905 except error.InvalidBundleSpecification:
2905 pass
2906 pass
2906 except error.UnsupportedBundleSpecification:
2907 except error.UnsupportedBundleSpecification:
2907 pass
2908 pass
2908
2909
2909 m.append(attrs)
2910 m.append(attrs)
2910
2911
2911 return m
2912 return m
2912
2913
2913
2914
2914 def isstreamclonespec(bundlespec):
2915 def isstreamclonespec(bundlespec):
2915 # Stream clone v1
2916 # Stream clone v1
2916 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2917 return True
2918 return True
2918
2919
2919 # Stream clone v2
2920 # Stream clone v2
2920 if (
2921 if (
2921 bundlespec.wirecompression == b'UN'
2922 bundlespec.wirecompression == b'UN'
2922 and bundlespec.wireversion == b'02'
2923 and bundlespec.wireversion == b'02'
2923 and bundlespec.contentopts.get(b'streamv2')
2924 and bundlespec.contentopts.get(b'streamv2')
2924 ):
2925 ):
2925 return True
2926 return True
2926
2927
2927 return False
2928 return False
2928
2929
2929
2930
2930 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2931 """Remove incompatible clone bundle manifest entries.
2932 """Remove incompatible clone bundle manifest entries.
2932
2933
2933 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2934 and returns a new list consisting of only the entries that this client
2935 and returns a new list consisting of only the entries that this client
2935 should be able to apply.
2936 should be able to apply.
2936
2937
2937 There is no guarantee we'll be able to apply all returned entries because
2938 There is no guarantee we'll be able to apply all returned entries because
2938 the metadata we use to filter on may be missing or wrong.
2939 the metadata we use to filter on may be missing or wrong.
2939 """
2940 """
2940 newentries = []
2941 newentries = []
2941 for entry in entries:
2942 for entry in entries:
2942 spec = entry.get(b'BUNDLESPEC')
2943 spec = entry.get(b'BUNDLESPEC')
2943 if spec:
2944 if spec:
2944 try:
2945 try:
2945 bundlespec = parsebundlespec(repo, spec, strict=True)
2946 bundlespec = parsebundlespec(repo, spec, strict=True)
2946
2947
2947 # If a stream clone was requested, filter out non-streamclone
2948 # If a stream clone was requested, filter out non-streamclone
2948 # entries.
2949 # entries.
2949 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 if streamclonerequested and not isstreamclonespec(bundlespec):
2950 repo.ui.debug(
2951 repo.ui.debug(
2951 b'filtering %s because not a stream clone\n'
2952 b'filtering %s because not a stream clone\n'
2952 % entry[b'URL']
2953 % entry[b'URL']
2953 )
2954 )
2954 continue
2955 continue
2955
2956
2956 except error.InvalidBundleSpecification as e:
2957 except error.InvalidBundleSpecification as e:
2957 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2958 continue
2959 continue
2959 except error.UnsupportedBundleSpecification as e:
2960 except error.UnsupportedBundleSpecification as e:
2960 repo.ui.debug(
2961 repo.ui.debug(
2961 b'filtering %s because unsupported bundle '
2962 b'filtering %s because unsupported bundle '
2962 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2963 )
2964 )
2964 continue
2965 continue
2965 # If we don't have a spec and requested a stream clone, we don't know
2966 # If we don't have a spec and requested a stream clone, we don't know
2966 # what the entry is so don't attempt to apply it.
2967 # what the entry is so don't attempt to apply it.
2967 elif streamclonerequested:
2968 elif streamclonerequested:
2968 repo.ui.debug(
2969 repo.ui.debug(
2969 b'filtering %s because cannot determine if a stream '
2970 b'filtering %s because cannot determine if a stream '
2970 b'clone bundle\n' % entry[b'URL']
2971 b'clone bundle\n' % entry[b'URL']
2971 )
2972 )
2972 continue
2973 continue
2973
2974
2974 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 if b'REQUIRESNI' in entry and not sslutil.hassni:
2975 repo.ui.debug(
2976 repo.ui.debug(
2976 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 b'filtering %s because SNI not supported\n' % entry[b'URL']
2977 )
2978 )
2978 continue
2979 continue
2979
2980
2980 newentries.append(entry)
2981 newentries.append(entry)
2981
2982
2982 return newentries
2983 return newentries
2983
2984
2984
2985
2985 class clonebundleentry(object):
2986 class clonebundleentry(object):
2986 """Represents an item in a clone bundles manifest.
2987 """Represents an item in a clone bundles manifest.
2987
2988
2988 This rich class is needed to support sorting since sorted() in Python 3
2989 This rich class is needed to support sorting since sorted() in Python 3
2989 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2990 won't work.
2991 won't work.
2991 """
2992 """
2992
2993
2993 def __init__(self, value, prefers):
2994 def __init__(self, value, prefers):
2994 self.value = value
2995 self.value = value
2995 self.prefers = prefers
2996 self.prefers = prefers
2996
2997
2997 def _cmp(self, other):
2998 def _cmp(self, other):
2998 for prefkey, prefvalue in self.prefers:
2999 for prefkey, prefvalue in self.prefers:
2999 avalue = self.value.get(prefkey)
3000 avalue = self.value.get(prefkey)
3000 bvalue = other.value.get(prefkey)
3001 bvalue = other.value.get(prefkey)
3001
3002
3002 # Special case for b missing attribute and a matches exactly.
3003 # Special case for b missing attribute and a matches exactly.
3003 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 if avalue is not None and bvalue is None and avalue == prefvalue:
3004 return -1
3005 return -1
3005
3006
3006 # Special case for a missing attribute and b matches exactly.
3007 # Special case for a missing attribute and b matches exactly.
3007 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 if bvalue is not None and avalue is None and bvalue == prefvalue:
3008 return 1
3009 return 1
3009
3010
3010 # We can't compare unless attribute present on both.
3011 # We can't compare unless attribute present on both.
3011 if avalue is None or bvalue is None:
3012 if avalue is None or bvalue is None:
3012 continue
3013 continue
3013
3014
3014 # Same values should fall back to next attribute.
3015 # Same values should fall back to next attribute.
3015 if avalue == bvalue:
3016 if avalue == bvalue:
3016 continue
3017 continue
3017
3018
3018 # Exact matches come first.
3019 # Exact matches come first.
3019 if avalue == prefvalue:
3020 if avalue == prefvalue:
3020 return -1
3021 return -1
3021 if bvalue == prefvalue:
3022 if bvalue == prefvalue:
3022 return 1
3023 return 1
3023
3024
3024 # Fall back to next attribute.
3025 # Fall back to next attribute.
3025 continue
3026 continue
3026
3027
3027 # If we got here we couldn't sort by attributes and prefers. Fall
3028 # If we got here we couldn't sort by attributes and prefers. Fall
3028 # back to index order.
3029 # back to index order.
3029 return 0
3030 return 0
3030
3031
3031 def __lt__(self, other):
3032 def __lt__(self, other):
3032 return self._cmp(other) < 0
3033 return self._cmp(other) < 0
3033
3034
3034 def __gt__(self, other):
3035 def __gt__(self, other):
3035 return self._cmp(other) > 0
3036 return self._cmp(other) > 0
3036
3037
3037 def __eq__(self, other):
3038 def __eq__(self, other):
3038 return self._cmp(other) == 0
3039 return self._cmp(other) == 0
3039
3040
3040 def __le__(self, other):
3041 def __le__(self, other):
3041 return self._cmp(other) <= 0
3042 return self._cmp(other) <= 0
3042
3043
3043 def __ge__(self, other):
3044 def __ge__(self, other):
3044 return self._cmp(other) >= 0
3045 return self._cmp(other) >= 0
3045
3046
3046 def __ne__(self, other):
3047 def __ne__(self, other):
3047 return self._cmp(other) != 0
3048 return self._cmp(other) != 0
3048
3049
3049
3050
3050 def sortclonebundleentries(ui, entries):
3051 def sortclonebundleentries(ui, entries):
3051 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3052 if not prefers:
3053 if not prefers:
3053 return list(entries)
3054 return list(entries)
3054
3055
3055 prefers = [p.split(b'=', 1) for p in prefers]
3056 prefers = [p.split(b'=', 1) for p in prefers]
3056
3057
3057 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 items = sorted(clonebundleentry(v, prefers) for v in entries)
3058 return [i.value for i in items]
3059 return [i.value for i in items]
3059
3060
3060
3061
3061 def trypullbundlefromurl(ui, repo, url):
3062 def trypullbundlefromurl(ui, repo, url):
3062 """Attempt to apply a bundle from a URL."""
3063 """Attempt to apply a bundle from a URL."""
3063 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3064 try:
3065 try:
3065 fh = urlmod.open(ui, url)
3066 fh = urlmod.open(ui, url)
3066 cg = readbundle(ui, fh, b'stream')
3067 cg = readbundle(ui, fh, b'stream')
3067
3068
3068 if isinstance(cg, streamclone.streamcloneapplier):
3069 if isinstance(cg, streamclone.streamcloneapplier):
3069 cg.apply(repo)
3070 cg.apply(repo)
3070 else:
3071 else:
3071 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3072 return True
3073 return True
3073 except urlerr.httperror as e:
3074 except urlerr.httperror as e:
3074 ui.warn(
3075 ui.warn(
3075 _(b'HTTP error fetching bundle: %s\n')
3076 _(b'HTTP error fetching bundle: %s\n')
3076 % stringutil.forcebytestr(e)
3077 % stringutil.forcebytestr(e)
3077 )
3078 )
3078 except urlerr.urlerror as e:
3079 except urlerr.urlerror as e:
3079 ui.warn(
3080 ui.warn(
3080 _(b'error fetching bundle: %s\n')
3081 _(b'error fetching bundle: %s\n')
3081 % stringutil.forcebytestr(e.reason)
3082 % stringutil.forcebytestr(e.reason)
3082 )
3083 )
3083
3084
3084 return False
3085 return False
General Comments 0
You need to be logged in to leave comments. Login now