##// END OF EJS Templates
index: use `index.has_node` in `exchange._pulldiscoverychangegroup`...
marmoute -
r43945:9c1f4e2f default
parent child Browse files
Show More
@@ -1,3090 +1,3090 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 phases,
31 phases,
32 pushkey,
32 pushkey,
33 pycompat,
33 pycompat,
34 scmutil,
34 scmutil,
35 sslutil,
35 sslutil,
36 streamclone,
36 streamclone,
37 url as urlmod,
37 url as urlmod,
38 util,
38 util,
39 wireprototypes,
39 wireprototypes,
40 )
40 )
41 from .interfaces import repository
41 from .interfaces import repository
42 from .utils import stringutil
42 from .utils import stringutil
43
43
44 urlerr = util.urlerr
44 urlerr = util.urlerr
45 urlreq = util.urlreq
45 urlreq = util.urlreq
46
46
47 _NARROWACL_SECTION = b'narrowacl'
47 _NARROWACL_SECTION = b'narrowacl'
48
48
49 # Maps bundle version human names to changegroup versions.
49 # Maps bundle version human names to changegroup versions.
50 _bundlespeccgversions = {
50 _bundlespeccgversions = {
51 b'v1': b'01',
51 b'v1': b'01',
52 b'v2': b'02',
52 b'v2': b'02',
53 b'packed1': b's1',
53 b'packed1': b's1',
54 b'bundle2': b'02', # legacy
54 b'bundle2': b'02', # legacy
55 }
55 }
56
56
57 # Maps bundle version with content opts to choose which part to bundle
57 # Maps bundle version with content opts to choose which part to bundle
58 _bundlespeccontentopts = {
58 _bundlespeccontentopts = {
59 b'v1': {
59 b'v1': {
60 b'changegroup': True,
60 b'changegroup': True,
61 b'cg.version': b'01',
61 b'cg.version': b'01',
62 b'obsolescence': False,
62 b'obsolescence': False,
63 b'phases': False,
63 b'phases': False,
64 b'tagsfnodescache': False,
64 b'tagsfnodescache': False,
65 b'revbranchcache': False,
65 b'revbranchcache': False,
66 },
66 },
67 b'v2': {
67 b'v2': {
68 b'changegroup': True,
68 b'changegroup': True,
69 b'cg.version': b'02',
69 b'cg.version': b'02',
70 b'obsolescence': False,
70 b'obsolescence': False,
71 b'phases': False,
71 b'phases': False,
72 b'tagsfnodescache': True,
72 b'tagsfnodescache': True,
73 b'revbranchcache': True,
73 b'revbranchcache': True,
74 },
74 },
75 b'packed1': {b'cg.version': b's1'},
75 b'packed1': {b'cg.version': b's1'},
76 }
76 }
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
77 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
78
78
79 _bundlespecvariants = {
79 _bundlespecvariants = {
80 b"streamv2": {
80 b"streamv2": {
81 b"changegroup": False,
81 b"changegroup": False,
82 b"streamv2": True,
82 b"streamv2": True,
83 b"tagsfnodescache": False,
83 b"tagsfnodescache": False,
84 b"revbranchcache": False,
84 b"revbranchcache": False,
85 }
85 }
86 }
86 }
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
89 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
90
90
91
91
92 @attr.s
92 @attr.s
93 class bundlespec(object):
93 class bundlespec(object):
94 compression = attr.ib()
94 compression = attr.ib()
95 wirecompression = attr.ib()
95 wirecompression = attr.ib()
96 version = attr.ib()
96 version = attr.ib()
97 wireversion = attr.ib()
97 wireversion = attr.ib()
98 params = attr.ib()
98 params = attr.ib()
99 contentopts = attr.ib()
99 contentopts = attr.ib()
100
100
101
101
102 def _sortedmarkers(markers):
102 def _sortedmarkers(markers):
103 # last item of marker tuple ('parents') may be None or a tuple
103 # last item of marker tuple ('parents') may be None or a tuple
104 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
104 return sorted(markers, key=lambda m: m[:-1] + (m[-1] or (),))
105
105
106
106
107 def parsebundlespec(repo, spec, strict=True):
107 def parsebundlespec(repo, spec, strict=True):
108 """Parse a bundle string specification into parts.
108 """Parse a bundle string specification into parts.
109
109
110 Bundle specifications denote a well-defined bundle/exchange format.
110 Bundle specifications denote a well-defined bundle/exchange format.
111 The content of a given specification should not change over time in
111 The content of a given specification should not change over time in
112 order to ensure that bundles produced by a newer version of Mercurial are
112 order to ensure that bundles produced by a newer version of Mercurial are
113 readable from an older version.
113 readable from an older version.
114
114
115 The string currently has the form:
115 The string currently has the form:
116
116
117 <compression>-<type>[;<parameter0>[;<parameter1>]]
117 <compression>-<type>[;<parameter0>[;<parameter1>]]
118
118
119 Where <compression> is one of the supported compression formats
119 Where <compression> is one of the supported compression formats
120 and <type> is (currently) a version string. A ";" can follow the type and
120 and <type> is (currently) a version string. A ";" can follow the type and
121 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 all text afterwards is interpreted as URI encoded, ";" delimited key=value
122 pairs.
122 pairs.
123
123
124 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 If ``strict`` is True (the default) <compression> is required. Otherwise,
125 it is optional.
125 it is optional.
126
126
127 Returns a bundlespec object of (compression, version, parameters).
127 Returns a bundlespec object of (compression, version, parameters).
128 Compression will be ``None`` if not in strict mode and a compression isn't
128 Compression will be ``None`` if not in strict mode and a compression isn't
129 defined.
129 defined.
130
130
131 An ``InvalidBundleSpecification`` is raised when the specification is
131 An ``InvalidBundleSpecification`` is raised when the specification is
132 not syntactically well formed.
132 not syntactically well formed.
133
133
134 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 An ``UnsupportedBundleSpecification`` is raised when the compression or
135 bundle type/version is not recognized.
135 bundle type/version is not recognized.
136
136
137 Note: this function will likely eventually return a more complex data
137 Note: this function will likely eventually return a more complex data
138 structure, including bundle2 part information.
138 structure, including bundle2 part information.
139 """
139 """
140
140
141 def parseparams(s):
141 def parseparams(s):
142 if b';' not in s:
142 if b';' not in s:
143 return s, {}
143 return s, {}
144
144
145 params = {}
145 params = {}
146 version, paramstr = s.split(b';', 1)
146 version, paramstr = s.split(b';', 1)
147
147
148 for p in paramstr.split(b';'):
148 for p in paramstr.split(b';'):
149 if b'=' not in p:
149 if b'=' not in p:
150 raise error.InvalidBundleSpecification(
150 raise error.InvalidBundleSpecification(
151 _(
151 _(
152 b'invalid bundle specification: '
152 b'invalid bundle specification: '
153 b'missing "=" in parameter: %s'
153 b'missing "=" in parameter: %s'
154 )
154 )
155 % p
155 % p
156 )
156 )
157
157
158 key, value = p.split(b'=', 1)
158 key, value = p.split(b'=', 1)
159 key = urlreq.unquote(key)
159 key = urlreq.unquote(key)
160 value = urlreq.unquote(value)
160 value = urlreq.unquote(value)
161 params[key] = value
161 params[key] = value
162
162
163 return version, params
163 return version, params
164
164
165 if strict and b'-' not in spec:
165 if strict and b'-' not in spec:
166 raise error.InvalidBundleSpecification(
166 raise error.InvalidBundleSpecification(
167 _(
167 _(
168 b'invalid bundle specification; '
168 b'invalid bundle specification; '
169 b'must be prefixed with compression: %s'
169 b'must be prefixed with compression: %s'
170 )
170 )
171 % spec
171 % spec
172 )
172 )
173
173
174 if b'-' in spec:
174 if b'-' in spec:
175 compression, version = spec.split(b'-', 1)
175 compression, version = spec.split(b'-', 1)
176
176
177 if compression not in util.compengines.supportedbundlenames:
177 if compression not in util.compengines.supportedbundlenames:
178 raise error.UnsupportedBundleSpecification(
178 raise error.UnsupportedBundleSpecification(
179 _(b'%s compression is not supported') % compression
179 _(b'%s compression is not supported') % compression
180 )
180 )
181
181
182 version, params = parseparams(version)
182 version, params = parseparams(version)
183
183
184 if version not in _bundlespeccgversions:
184 if version not in _bundlespeccgversions:
185 raise error.UnsupportedBundleSpecification(
185 raise error.UnsupportedBundleSpecification(
186 _(b'%s is not a recognized bundle version') % version
186 _(b'%s is not a recognized bundle version') % version
187 )
187 )
188 else:
188 else:
189 # Value could be just the compression or just the version, in which
189 # Value could be just the compression or just the version, in which
190 # case some defaults are assumed (but only when not in strict mode).
190 # case some defaults are assumed (but only when not in strict mode).
191 assert not strict
191 assert not strict
192
192
193 spec, params = parseparams(spec)
193 spec, params = parseparams(spec)
194
194
195 if spec in util.compengines.supportedbundlenames:
195 if spec in util.compengines.supportedbundlenames:
196 compression = spec
196 compression = spec
197 version = b'v1'
197 version = b'v1'
198 # Generaldelta repos require v2.
198 # Generaldelta repos require v2.
199 if b'generaldelta' in repo.requirements:
199 if b'generaldelta' in repo.requirements:
200 version = b'v2'
200 version = b'v2'
201 # Modern compression engines require v2.
201 # Modern compression engines require v2.
202 if compression not in _bundlespecv1compengines:
202 if compression not in _bundlespecv1compengines:
203 version = b'v2'
203 version = b'v2'
204 elif spec in _bundlespeccgversions:
204 elif spec in _bundlespeccgversions:
205 if spec == b'packed1':
205 if spec == b'packed1':
206 compression = b'none'
206 compression = b'none'
207 else:
207 else:
208 compression = b'bzip2'
208 compression = b'bzip2'
209 version = spec
209 version = spec
210 else:
210 else:
211 raise error.UnsupportedBundleSpecification(
211 raise error.UnsupportedBundleSpecification(
212 _(b'%s is not a recognized bundle specification') % spec
212 _(b'%s is not a recognized bundle specification') % spec
213 )
213 )
214
214
215 # Bundle version 1 only supports a known set of compression engines.
215 # Bundle version 1 only supports a known set of compression engines.
216 if version == b'v1' and compression not in _bundlespecv1compengines:
216 if version == b'v1' and compression not in _bundlespecv1compengines:
217 raise error.UnsupportedBundleSpecification(
217 raise error.UnsupportedBundleSpecification(
218 _(b'compression engine %s is not supported on v1 bundles')
218 _(b'compression engine %s is not supported on v1 bundles')
219 % compression
219 % compression
220 )
220 )
221
221
222 # The specification for packed1 can optionally declare the data formats
222 # The specification for packed1 can optionally declare the data formats
223 # required to apply it. If we see this metadata, compare against what the
223 # required to apply it. If we see this metadata, compare against what the
224 # repo supports and error if the bundle isn't compatible.
224 # repo supports and error if the bundle isn't compatible.
225 if version == b'packed1' and b'requirements' in params:
225 if version == b'packed1' and b'requirements' in params:
226 requirements = set(params[b'requirements'].split(b','))
226 requirements = set(params[b'requirements'].split(b','))
227 missingreqs = requirements - repo.supportedformats
227 missingreqs = requirements - repo.supportedformats
228 if missingreqs:
228 if missingreqs:
229 raise error.UnsupportedBundleSpecification(
229 raise error.UnsupportedBundleSpecification(
230 _(b'missing support for repository features: %s')
230 _(b'missing support for repository features: %s')
231 % b', '.join(sorted(missingreqs))
231 % b', '.join(sorted(missingreqs))
232 )
232 )
233
233
234 # Compute contentopts based on the version
234 # Compute contentopts based on the version
235 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235 contentopts = _bundlespeccontentopts.get(version, {}).copy()
236
236
237 # Process the variants
237 # Process the variants
238 if b"stream" in params and params[b"stream"] == b"v2":
238 if b"stream" in params and params[b"stream"] == b"v2":
239 variant = _bundlespecvariants[b"streamv2"]
239 variant = _bundlespecvariants[b"streamv2"]
240 contentopts.update(variant)
240 contentopts.update(variant)
241
241
242 engine = util.compengines.forbundlename(compression)
242 engine = util.compengines.forbundlename(compression)
243 compression, wirecompression = engine.bundletype()
243 compression, wirecompression = engine.bundletype()
244 wireversion = _bundlespeccgversions[version]
244 wireversion = _bundlespeccgversions[version]
245
245
246 return bundlespec(
246 return bundlespec(
247 compression, wirecompression, version, wireversion, params, contentopts
247 compression, wirecompression, version, wireversion, params, contentopts
248 )
248 )
249
249
250
250
251 def readbundle(ui, fh, fname, vfs=None):
251 def readbundle(ui, fh, fname, vfs=None):
252 header = changegroup.readexactly(fh, 4)
252 header = changegroup.readexactly(fh, 4)
253
253
254 alg = None
254 alg = None
255 if not fname:
255 if not fname:
256 fname = b"stream"
256 fname = b"stream"
257 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 if not header.startswith(b'HG') and header.startswith(b'\0'):
258 fh = changegroup.headerlessfixup(fh, header)
258 fh = changegroup.headerlessfixup(fh, header)
259 header = b"HG10"
259 header = b"HG10"
260 alg = b'UN'
260 alg = b'UN'
261 elif vfs:
261 elif vfs:
262 fname = vfs.join(fname)
262 fname = vfs.join(fname)
263
263
264 magic, version = header[0:2], header[2:4]
264 magic, version = header[0:2], header[2:4]
265
265
266 if magic != b'HG':
266 if magic != b'HG':
267 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
268 if version == b'10':
268 if version == b'10':
269 if alg is None:
269 if alg is None:
270 alg = changegroup.readexactly(fh, 2)
270 alg = changegroup.readexactly(fh, 2)
271 return changegroup.cg1unpacker(fh, alg)
271 return changegroup.cg1unpacker(fh, alg)
272 elif version.startswith(b'2'):
272 elif version.startswith(b'2'):
273 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
274 elif version == b'S1':
274 elif version == b'S1':
275 return streamclone.streamcloneapplier(fh)
275 return streamclone.streamcloneapplier(fh)
276 else:
276 else:
277 raise error.Abort(
277 raise error.Abort(
278 _(b'%s: unknown bundle version %s') % (fname, version)
278 _(b'%s: unknown bundle version %s') % (fname, version)
279 )
279 )
280
280
281
281
282 def getbundlespec(ui, fh):
282 def getbundlespec(ui, fh):
283 """Infer the bundlespec from a bundle file handle.
283 """Infer the bundlespec from a bundle file handle.
284
284
285 The input file handle is seeked and the original seek position is not
285 The input file handle is seeked and the original seek position is not
286 restored.
286 restored.
287 """
287 """
288
288
289 def speccompression(alg):
289 def speccompression(alg):
290 try:
290 try:
291 return util.compengines.forbundletype(alg).bundletype()[0]
291 return util.compengines.forbundletype(alg).bundletype()[0]
292 except KeyError:
292 except KeyError:
293 return None
293 return None
294
294
295 b = readbundle(ui, fh, None)
295 b = readbundle(ui, fh, None)
296 if isinstance(b, changegroup.cg1unpacker):
296 if isinstance(b, changegroup.cg1unpacker):
297 alg = b._type
297 alg = b._type
298 if alg == b'_truncatedBZ':
298 if alg == b'_truncatedBZ':
299 alg = b'BZ'
299 alg = b'BZ'
300 comp = speccompression(alg)
300 comp = speccompression(alg)
301 if not comp:
301 if not comp:
302 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
303 return b'%s-v1' % comp
303 return b'%s-v1' % comp
304 elif isinstance(b, bundle2.unbundle20):
304 elif isinstance(b, bundle2.unbundle20):
305 if b'Compression' in b.params:
305 if b'Compression' in b.params:
306 comp = speccompression(b.params[b'Compression'])
306 comp = speccompression(b.params[b'Compression'])
307 if not comp:
307 if not comp:
308 raise error.Abort(
308 raise error.Abort(
309 _(b'unknown compression algorithm: %s') % comp
309 _(b'unknown compression algorithm: %s') % comp
310 )
310 )
311 else:
311 else:
312 comp = b'none'
312 comp = b'none'
313
313
314 version = None
314 version = None
315 for part in b.iterparts():
315 for part in b.iterparts():
316 if part.type == b'changegroup':
316 if part.type == b'changegroup':
317 version = part.params[b'version']
317 version = part.params[b'version']
318 if version in (b'01', b'02'):
318 if version in (b'01', b'02'):
319 version = b'v2'
319 version = b'v2'
320 else:
320 else:
321 raise error.Abort(
321 raise error.Abort(
322 _(
322 _(
323 b'changegroup version %s does not have '
323 b'changegroup version %s does not have '
324 b'a known bundlespec'
324 b'a known bundlespec'
325 )
325 )
326 % version,
326 % version,
327 hint=_(b'try upgrading your Mercurial client'),
327 hint=_(b'try upgrading your Mercurial client'),
328 )
328 )
329 elif part.type == b'stream2' and version is None:
329 elif part.type == b'stream2' and version is None:
330 # A stream2 part requires to be part of a v2 bundle
330 # A stream2 part requires to be part of a v2 bundle
331 requirements = urlreq.unquote(part.params[b'requirements'])
331 requirements = urlreq.unquote(part.params[b'requirements'])
332 splitted = requirements.split()
332 splitted = requirements.split()
333 params = bundle2._formatrequirementsparams(splitted)
333 params = bundle2._formatrequirementsparams(splitted)
334 return b'none-v2;stream=v2;%s' % params
334 return b'none-v2;stream=v2;%s' % params
335
335
336 if not version:
336 if not version:
337 raise error.Abort(
337 raise error.Abort(
338 _(b'could not identify changegroup version in bundle')
338 _(b'could not identify changegroup version in bundle')
339 )
339 )
340
340
341 return b'%s-%s' % (comp, version)
341 return b'%s-%s' % (comp, version)
342 elif isinstance(b, streamclone.streamcloneapplier):
342 elif isinstance(b, streamclone.streamcloneapplier):
343 requirements = streamclone.readbundle1header(fh)[2]
343 requirements = streamclone.readbundle1header(fh)[2]
344 formatted = bundle2._formatrequirementsparams(requirements)
344 formatted = bundle2._formatrequirementsparams(requirements)
345 return b'none-packed1;%s' % formatted
345 return b'none-packed1;%s' % formatted
346 else:
346 else:
347 raise error.Abort(_(b'unknown bundle type: %s') % b)
347 raise error.Abort(_(b'unknown bundle type: %s') % b)
348
348
349
349
350 def _computeoutgoing(repo, heads, common):
350 def _computeoutgoing(repo, heads, common):
351 """Computes which revs are outgoing given a set of common
351 """Computes which revs are outgoing given a set of common
352 and a set of heads.
352 and a set of heads.
353
353
354 This is a separate function so extensions can have access to
354 This is a separate function so extensions can have access to
355 the logic.
355 the logic.
356
356
357 Returns a discovery.outgoing object.
357 Returns a discovery.outgoing object.
358 """
358 """
359 cl = repo.changelog
359 cl = repo.changelog
360 if common:
360 if common:
361 hasnode = cl.hasnode
361 hasnode = cl.hasnode
362 common = [n for n in common if hasnode(n)]
362 common = [n for n in common if hasnode(n)]
363 else:
363 else:
364 common = [nullid]
364 common = [nullid]
365 if not heads:
365 if not heads:
366 heads = cl.heads()
366 heads = cl.heads()
367 return discovery.outgoing(repo, common, heads)
367 return discovery.outgoing(repo, common, heads)
368
368
369
369
370 def _checkpublish(pushop):
370 def _checkpublish(pushop):
371 repo = pushop.repo
371 repo = pushop.repo
372 ui = repo.ui
372 ui = repo.ui
373 behavior = ui.config(b'experimental', b'auto-publish')
373 behavior = ui.config(b'experimental', b'auto-publish')
374 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
375 return
375 return
376 remotephases = listkeys(pushop.remote, b'phases')
376 remotephases = listkeys(pushop.remote, b'phases')
377 if not remotephases.get(b'publishing', False):
377 if not remotephases.get(b'publishing', False):
378 return
378 return
379
379
380 if pushop.revs is None:
380 if pushop.revs is None:
381 published = repo.filtered(b'served').revs(b'not public()')
381 published = repo.filtered(b'served').revs(b'not public()')
382 else:
382 else:
383 published = repo.revs(b'::%ln - public()', pushop.revs)
383 published = repo.revs(b'::%ln - public()', pushop.revs)
384 if published:
384 if published:
385 if behavior == b'warn':
385 if behavior == b'warn':
386 ui.warn(
386 ui.warn(
387 _(b'%i changesets about to be published\n') % len(published)
387 _(b'%i changesets about to be published\n') % len(published)
388 )
388 )
389 elif behavior == b'confirm':
389 elif behavior == b'confirm':
390 if ui.promptchoice(
390 if ui.promptchoice(
391 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
392 % len(published)
392 % len(published)
393 ):
393 ):
394 raise error.Abort(_(b'user quit'))
394 raise error.Abort(_(b'user quit'))
395 elif behavior == b'abort':
395 elif behavior == b'abort':
396 msg = _(b'push would publish %i changesets') % len(published)
396 msg = _(b'push would publish %i changesets') % len(published)
397 hint = _(
397 hint = _(
398 b"use --publish or adjust 'experimental.auto-publish'"
398 b"use --publish or adjust 'experimental.auto-publish'"
399 b" config"
399 b" config"
400 )
400 )
401 raise error.Abort(msg, hint=hint)
401 raise error.Abort(msg, hint=hint)
402
402
403
403
404 def _forcebundle1(op):
404 def _forcebundle1(op):
405 """return true if a pull/push must use bundle1
405 """return true if a pull/push must use bundle1
406
406
407 This function is used to allow testing of the older bundle version"""
407 This function is used to allow testing of the older bundle version"""
408 ui = op.repo.ui
408 ui = op.repo.ui
409 # The goal is this config is to allow developer to choose the bundle
409 # The goal is this config is to allow developer to choose the bundle
410 # version used during exchanged. This is especially handy during test.
410 # version used during exchanged. This is especially handy during test.
411 # Value is a list of bundle version to be picked from, highest version
411 # Value is a list of bundle version to be picked from, highest version
412 # should be used.
412 # should be used.
413 #
413 #
414 # developer config: devel.legacy.exchange
414 # developer config: devel.legacy.exchange
415 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 exchange = ui.configlist(b'devel', b'legacy.exchange')
416 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
417 return forcebundle1 or not op.remote.capable(b'bundle2')
417 return forcebundle1 or not op.remote.capable(b'bundle2')
418
418
419
419
420 class pushoperation(object):
420 class pushoperation(object):
421 """A object that represent a single push operation
421 """A object that represent a single push operation
422
422
423 Its purpose is to carry push related state and very common operations.
423 Its purpose is to carry push related state and very common operations.
424
424
425 A new pushoperation should be created at the beginning of each push and
425 A new pushoperation should be created at the beginning of each push and
426 discarded afterward.
426 discarded afterward.
427 """
427 """
428
428
429 def __init__(
429 def __init__(
430 self,
430 self,
431 repo,
431 repo,
432 remote,
432 remote,
433 force=False,
433 force=False,
434 revs=None,
434 revs=None,
435 newbranch=False,
435 newbranch=False,
436 bookmarks=(),
436 bookmarks=(),
437 publish=False,
437 publish=False,
438 pushvars=None,
438 pushvars=None,
439 ):
439 ):
440 # repo we push from
440 # repo we push from
441 self.repo = repo
441 self.repo = repo
442 self.ui = repo.ui
442 self.ui = repo.ui
443 # repo we push to
443 # repo we push to
444 self.remote = remote
444 self.remote = remote
445 # force option provided
445 # force option provided
446 self.force = force
446 self.force = force
447 # revs to be pushed (None is "all")
447 # revs to be pushed (None is "all")
448 self.revs = revs
448 self.revs = revs
449 # bookmark explicitly pushed
449 # bookmark explicitly pushed
450 self.bookmarks = bookmarks
450 self.bookmarks = bookmarks
451 # allow push of new branch
451 # allow push of new branch
452 self.newbranch = newbranch
452 self.newbranch = newbranch
453 # step already performed
453 # step already performed
454 # (used to check what steps have been already performed through bundle2)
454 # (used to check what steps have been already performed through bundle2)
455 self.stepsdone = set()
455 self.stepsdone = set()
456 # Integer version of the changegroup push result
456 # Integer version of the changegroup push result
457 # - None means nothing to push
457 # - None means nothing to push
458 # - 0 means HTTP error
458 # - 0 means HTTP error
459 # - 1 means we pushed and remote head count is unchanged *or*
459 # - 1 means we pushed and remote head count is unchanged *or*
460 # we have outgoing changesets but refused to push
460 # we have outgoing changesets but refused to push
461 # - other values as described by addchangegroup()
461 # - other values as described by addchangegroup()
462 self.cgresult = None
462 self.cgresult = None
463 # Boolean value for the bookmark push
463 # Boolean value for the bookmark push
464 self.bkresult = None
464 self.bkresult = None
465 # discover.outgoing object (contains common and outgoing data)
465 # discover.outgoing object (contains common and outgoing data)
466 self.outgoing = None
466 self.outgoing = None
467 # all remote topological heads before the push
467 # all remote topological heads before the push
468 self.remoteheads = None
468 self.remoteheads = None
469 # Details of the remote branch pre and post push
469 # Details of the remote branch pre and post push
470 #
470 #
471 # mapping: {'branch': ([remoteheads],
471 # mapping: {'branch': ([remoteheads],
472 # [newheads],
472 # [newheads],
473 # [unsyncedheads],
473 # [unsyncedheads],
474 # [discardedheads])}
474 # [discardedheads])}
475 # - branch: the branch name
475 # - branch: the branch name
476 # - remoteheads: the list of remote heads known locally
476 # - remoteheads: the list of remote heads known locally
477 # None if the branch is new
477 # None if the branch is new
478 # - newheads: the new remote heads (known locally) with outgoing pushed
478 # - newheads: the new remote heads (known locally) with outgoing pushed
479 # - unsyncedheads: the list of remote heads unknown locally.
479 # - unsyncedheads: the list of remote heads unknown locally.
480 # - discardedheads: the list of remote heads made obsolete by the push
480 # - discardedheads: the list of remote heads made obsolete by the push
481 self.pushbranchmap = None
481 self.pushbranchmap = None
482 # testable as a boolean indicating if any nodes are missing locally.
482 # testable as a boolean indicating if any nodes are missing locally.
483 self.incoming = None
483 self.incoming = None
484 # summary of the remote phase situation
484 # summary of the remote phase situation
485 self.remotephases = None
485 self.remotephases = None
486 # phases changes that must be pushed along side the changesets
486 # phases changes that must be pushed along side the changesets
487 self.outdatedphases = None
487 self.outdatedphases = None
488 # phases changes that must be pushed if changeset push fails
488 # phases changes that must be pushed if changeset push fails
489 self.fallbackoutdatedphases = None
489 self.fallbackoutdatedphases = None
490 # outgoing obsmarkers
490 # outgoing obsmarkers
491 self.outobsmarkers = set()
491 self.outobsmarkers = set()
492 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
493 self.outbookmarks = []
493 self.outbookmarks = []
494 # transaction manager
494 # transaction manager
495 self.trmanager = None
495 self.trmanager = None
496 # map { pushkey partid -> callback handling failure}
496 # map { pushkey partid -> callback handling failure}
497 # used to handle exception from mandatory pushkey part failure
497 # used to handle exception from mandatory pushkey part failure
498 self.pkfailcb = {}
498 self.pkfailcb = {}
499 # an iterable of pushvars or None
499 # an iterable of pushvars or None
500 self.pushvars = pushvars
500 self.pushvars = pushvars
501 # publish pushed changesets
501 # publish pushed changesets
502 self.publish = publish
502 self.publish = publish
503
503
504 @util.propertycache
504 @util.propertycache
505 def futureheads(self):
505 def futureheads(self):
506 """future remote heads if the changeset push succeeds"""
506 """future remote heads if the changeset push succeeds"""
507 return self.outgoing.missingheads
507 return self.outgoing.missingheads
508
508
509 @util.propertycache
509 @util.propertycache
510 def fallbackheads(self):
510 def fallbackheads(self):
511 """future remote heads if the changeset push fails"""
511 """future remote heads if the changeset push fails"""
512 if self.revs is None:
512 if self.revs is None:
513 # not target to push, all common are relevant
513 # not target to push, all common are relevant
514 return self.outgoing.commonheads
514 return self.outgoing.commonheads
515 unfi = self.repo.unfiltered()
515 unfi = self.repo.unfiltered()
516 # I want cheads = heads(::missingheads and ::commonheads)
516 # I want cheads = heads(::missingheads and ::commonheads)
517 # (missingheads is revs with secret changeset filtered out)
517 # (missingheads is revs with secret changeset filtered out)
518 #
518 #
519 # This can be expressed as:
519 # This can be expressed as:
520 # cheads = ( (missingheads and ::commonheads)
520 # cheads = ( (missingheads and ::commonheads)
521 # + (commonheads and ::missingheads))"
521 # + (commonheads and ::missingheads))"
522 # )
522 # )
523 #
523 #
524 # while trying to push we already computed the following:
524 # while trying to push we already computed the following:
525 # common = (::commonheads)
525 # common = (::commonheads)
526 # missing = ((commonheads::missingheads) - commonheads)
526 # missing = ((commonheads::missingheads) - commonheads)
527 #
527 #
528 # We can pick:
528 # We can pick:
529 # * missingheads part of common (::commonheads)
529 # * missingheads part of common (::commonheads)
530 common = self.outgoing.common
530 common = self.outgoing.common
531 nm = self.repo.changelog.nodemap
531 nm = self.repo.changelog.nodemap
532 cheads = [node for node in self.revs if nm[node] in common]
532 cheads = [node for node in self.revs if nm[node] in common]
533 # and
533 # and
534 # * commonheads parents on missing
534 # * commonheads parents on missing
535 revset = unfi.set(
535 revset = unfi.set(
536 b'%ln and parents(roots(%ln))',
536 b'%ln and parents(roots(%ln))',
537 self.outgoing.commonheads,
537 self.outgoing.commonheads,
538 self.outgoing.missing,
538 self.outgoing.missing,
539 )
539 )
540 cheads.extend(c.node() for c in revset)
540 cheads.extend(c.node() for c in revset)
541 return cheads
541 return cheads
542
542
543 @property
543 @property
544 def commonheads(self):
544 def commonheads(self):
545 """set of all common heads after changeset bundle push"""
545 """set of all common heads after changeset bundle push"""
546 if self.cgresult:
546 if self.cgresult:
547 return self.futureheads
547 return self.futureheads
548 else:
548 else:
549 return self.fallbackheads
549 return self.fallbackheads
550
550
551
551
552 # mapping of message used when pushing bookmark
552 # mapping of message used when pushing bookmark
553 bookmsgmap = {
553 bookmsgmap = {
554 b'update': (
554 b'update': (
555 _(b"updating bookmark %s\n"),
555 _(b"updating bookmark %s\n"),
556 _(b'updating bookmark %s failed!\n'),
556 _(b'updating bookmark %s failed!\n'),
557 ),
557 ),
558 b'export': (
558 b'export': (
559 _(b"exporting bookmark %s\n"),
559 _(b"exporting bookmark %s\n"),
560 _(b'exporting bookmark %s failed!\n'),
560 _(b'exporting bookmark %s failed!\n'),
561 ),
561 ),
562 b'delete': (
562 b'delete': (
563 _(b"deleting remote bookmark %s\n"),
563 _(b"deleting remote bookmark %s\n"),
564 _(b'deleting remote bookmark %s failed!\n'),
564 _(b'deleting remote bookmark %s failed!\n'),
565 ),
565 ),
566 }
566 }
567
567
568
568
569 def push(
569 def push(
570 repo,
570 repo,
571 remote,
571 remote,
572 force=False,
572 force=False,
573 revs=None,
573 revs=None,
574 newbranch=False,
574 newbranch=False,
575 bookmarks=(),
575 bookmarks=(),
576 publish=False,
576 publish=False,
577 opargs=None,
577 opargs=None,
578 ):
578 ):
579 '''Push outgoing changesets (limited by revs) from a local
579 '''Push outgoing changesets (limited by revs) from a local
580 repository to remote. Return an integer:
580 repository to remote. Return an integer:
581 - None means nothing to push
581 - None means nothing to push
582 - 0 means HTTP error
582 - 0 means HTTP error
583 - 1 means we pushed and remote head count is unchanged *or*
583 - 1 means we pushed and remote head count is unchanged *or*
584 we have outgoing changesets but refused to push
584 we have outgoing changesets but refused to push
585 - other values as described by addchangegroup()
585 - other values as described by addchangegroup()
586 '''
586 '''
587 if opargs is None:
587 if opargs is None:
588 opargs = {}
588 opargs = {}
589 pushop = pushoperation(
589 pushop = pushoperation(
590 repo,
590 repo,
591 remote,
591 remote,
592 force,
592 force,
593 revs,
593 revs,
594 newbranch,
594 newbranch,
595 bookmarks,
595 bookmarks,
596 publish,
596 publish,
597 **pycompat.strkwargs(opargs)
597 **pycompat.strkwargs(opargs)
598 )
598 )
599 if pushop.remote.local():
599 if pushop.remote.local():
600 missing = (
600 missing = (
601 set(pushop.repo.requirements) - pushop.remote.local().supported
601 set(pushop.repo.requirements) - pushop.remote.local().supported
602 )
602 )
603 if missing:
603 if missing:
604 msg = _(
604 msg = _(
605 b"required features are not"
605 b"required features are not"
606 b" supported in the destination:"
606 b" supported in the destination:"
607 b" %s"
607 b" %s"
608 ) % (b', '.join(sorted(missing)))
608 ) % (b', '.join(sorted(missing)))
609 raise error.Abort(msg)
609 raise error.Abort(msg)
610
610
611 if not pushop.remote.canpush():
611 if not pushop.remote.canpush():
612 raise error.Abort(_(b"destination does not support push"))
612 raise error.Abort(_(b"destination does not support push"))
613
613
614 if not pushop.remote.capable(b'unbundle'):
614 if not pushop.remote.capable(b'unbundle'):
615 raise error.Abort(
615 raise error.Abort(
616 _(
616 _(
617 b'cannot push: destination does not support the '
617 b'cannot push: destination does not support the '
618 b'unbundle wire protocol command'
618 b'unbundle wire protocol command'
619 )
619 )
620 )
620 )
621
621
622 # get lock as we might write phase data
622 # get lock as we might write phase data
623 wlock = lock = None
623 wlock = lock = None
624 try:
624 try:
625 # bundle2 push may receive a reply bundle touching bookmarks
625 # bundle2 push may receive a reply bundle touching bookmarks
626 # requiring the wlock. Take it now to ensure proper ordering.
626 # requiring the wlock. Take it now to ensure proper ordering.
627 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
628 if (
628 if (
629 (not _forcebundle1(pushop))
629 (not _forcebundle1(pushop))
630 and maypushback
630 and maypushback
631 and not bookmod.bookmarksinstore(repo)
631 and not bookmod.bookmarksinstore(repo)
632 ):
632 ):
633 wlock = pushop.repo.wlock()
633 wlock = pushop.repo.wlock()
634 lock = pushop.repo.lock()
634 lock = pushop.repo.lock()
635 pushop.trmanager = transactionmanager(
635 pushop.trmanager = transactionmanager(
636 pushop.repo, b'push-response', pushop.remote.url()
636 pushop.repo, b'push-response', pushop.remote.url()
637 )
637 )
638 except error.LockUnavailable as err:
638 except error.LockUnavailable as err:
639 # source repo cannot be locked.
639 # source repo cannot be locked.
640 # We do not abort the push, but just disable the local phase
640 # We do not abort the push, but just disable the local phase
641 # synchronisation.
641 # synchronisation.
642 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
643 err
643 err
644 )
644 )
645 pushop.ui.debug(msg)
645 pushop.ui.debug(msg)
646
646
647 with wlock or util.nullcontextmanager():
647 with wlock or util.nullcontextmanager():
648 with lock or util.nullcontextmanager():
648 with lock or util.nullcontextmanager():
649 with pushop.trmanager or util.nullcontextmanager():
649 with pushop.trmanager or util.nullcontextmanager():
650 pushop.repo.checkpush(pushop)
650 pushop.repo.checkpush(pushop)
651 _checkpublish(pushop)
651 _checkpublish(pushop)
652 _pushdiscovery(pushop)
652 _pushdiscovery(pushop)
653 if not _forcebundle1(pushop):
653 if not _forcebundle1(pushop):
654 _pushbundle2(pushop)
654 _pushbundle2(pushop)
655 _pushchangeset(pushop)
655 _pushchangeset(pushop)
656 _pushsyncphase(pushop)
656 _pushsyncphase(pushop)
657 _pushobsolete(pushop)
657 _pushobsolete(pushop)
658 _pushbookmark(pushop)
658 _pushbookmark(pushop)
659
659
660 if repo.ui.configbool(b'experimental', b'remotenames'):
660 if repo.ui.configbool(b'experimental', b'remotenames'):
661 logexchange.pullremotenames(repo, remote)
661 logexchange.pullremotenames(repo, remote)
662
662
663 return pushop
663 return pushop
664
664
665
665
666 # list of steps to perform discovery before push
666 # list of steps to perform discovery before push
667 pushdiscoveryorder = []
667 pushdiscoveryorder = []
668
668
669 # Mapping between step name and function
669 # Mapping between step name and function
670 #
670 #
671 # This exists to help extensions wrap steps if necessary
671 # This exists to help extensions wrap steps if necessary
672 pushdiscoverymapping = {}
672 pushdiscoverymapping = {}
673
673
674
674
675 def pushdiscovery(stepname):
675 def pushdiscovery(stepname):
676 """decorator for function performing discovery before push
676 """decorator for function performing discovery before push
677
677
678 The function is added to the step -> function mapping and appended to the
678 The function is added to the step -> function mapping and appended to the
679 list of steps. Beware that decorated function will be added in order (this
679 list of steps. Beware that decorated function will be added in order (this
680 may matter).
680 may matter).
681
681
682 You can only use this decorator for a new step, if you want to wrap a step
682 You can only use this decorator for a new step, if you want to wrap a step
683 from an extension, change the pushdiscovery dictionary directly."""
683 from an extension, change the pushdiscovery dictionary directly."""
684
684
685 def dec(func):
685 def dec(func):
686 assert stepname not in pushdiscoverymapping
686 assert stepname not in pushdiscoverymapping
687 pushdiscoverymapping[stepname] = func
687 pushdiscoverymapping[stepname] = func
688 pushdiscoveryorder.append(stepname)
688 pushdiscoveryorder.append(stepname)
689 return func
689 return func
690
690
691 return dec
691 return dec
692
692
693
693
694 def _pushdiscovery(pushop):
694 def _pushdiscovery(pushop):
695 """Run all discovery steps"""
695 """Run all discovery steps"""
696 for stepname in pushdiscoveryorder:
696 for stepname in pushdiscoveryorder:
697 step = pushdiscoverymapping[stepname]
697 step = pushdiscoverymapping[stepname]
698 step(pushop)
698 step(pushop)
699
699
700
700
701 @pushdiscovery(b'changeset')
701 @pushdiscovery(b'changeset')
702 def _pushdiscoverychangeset(pushop):
702 def _pushdiscoverychangeset(pushop):
703 """discover the changeset that need to be pushed"""
703 """discover the changeset that need to be pushed"""
704 fci = discovery.findcommonincoming
704 fci = discovery.findcommonincoming
705 if pushop.revs:
705 if pushop.revs:
706 commoninc = fci(
706 commoninc = fci(
707 pushop.repo,
707 pushop.repo,
708 pushop.remote,
708 pushop.remote,
709 force=pushop.force,
709 force=pushop.force,
710 ancestorsof=pushop.revs,
710 ancestorsof=pushop.revs,
711 )
711 )
712 else:
712 else:
713 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
713 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
714 common, inc, remoteheads = commoninc
714 common, inc, remoteheads = commoninc
715 fco = discovery.findcommonoutgoing
715 fco = discovery.findcommonoutgoing
716 outgoing = fco(
716 outgoing = fco(
717 pushop.repo,
717 pushop.repo,
718 pushop.remote,
718 pushop.remote,
719 onlyheads=pushop.revs,
719 onlyheads=pushop.revs,
720 commoninc=commoninc,
720 commoninc=commoninc,
721 force=pushop.force,
721 force=pushop.force,
722 )
722 )
723 pushop.outgoing = outgoing
723 pushop.outgoing = outgoing
724 pushop.remoteheads = remoteheads
724 pushop.remoteheads = remoteheads
725 pushop.incoming = inc
725 pushop.incoming = inc
726
726
727
727
728 @pushdiscovery(b'phase')
728 @pushdiscovery(b'phase')
729 def _pushdiscoveryphase(pushop):
729 def _pushdiscoveryphase(pushop):
730 """discover the phase that needs to be pushed
730 """discover the phase that needs to be pushed
731
731
732 (computed for both success and failure case for changesets push)"""
732 (computed for both success and failure case for changesets push)"""
733 outgoing = pushop.outgoing
733 outgoing = pushop.outgoing
734 unfi = pushop.repo.unfiltered()
734 unfi = pushop.repo.unfiltered()
735 remotephases = listkeys(pushop.remote, b'phases')
735 remotephases = listkeys(pushop.remote, b'phases')
736
736
737 if (
737 if (
738 pushop.ui.configbool(b'ui', b'_usedassubrepo')
738 pushop.ui.configbool(b'ui', b'_usedassubrepo')
739 and remotephases # server supports phases
739 and remotephases # server supports phases
740 and not pushop.outgoing.missing # no changesets to be pushed
740 and not pushop.outgoing.missing # no changesets to be pushed
741 and remotephases.get(b'publishing', False)
741 and remotephases.get(b'publishing', False)
742 ):
742 ):
743 # When:
743 # When:
744 # - this is a subrepo push
744 # - this is a subrepo push
745 # - and remote support phase
745 # - and remote support phase
746 # - and no changeset are to be pushed
746 # - and no changeset are to be pushed
747 # - and remote is publishing
747 # - and remote is publishing
748 # We may be in issue 3781 case!
748 # We may be in issue 3781 case!
749 # We drop the possible phase synchronisation done by
749 # We drop the possible phase synchronisation done by
750 # courtesy to publish changesets possibly locally draft
750 # courtesy to publish changesets possibly locally draft
751 # on the remote.
751 # on the remote.
752 pushop.outdatedphases = []
752 pushop.outdatedphases = []
753 pushop.fallbackoutdatedphases = []
753 pushop.fallbackoutdatedphases = []
754 return
754 return
755
755
756 pushop.remotephases = phases.remotephasessummary(
756 pushop.remotephases = phases.remotephasessummary(
757 pushop.repo, pushop.fallbackheads, remotephases
757 pushop.repo, pushop.fallbackheads, remotephases
758 )
758 )
759 droots = pushop.remotephases.draftroots
759 droots = pushop.remotephases.draftroots
760
760
761 extracond = b''
761 extracond = b''
762 if not pushop.remotephases.publishing:
762 if not pushop.remotephases.publishing:
763 extracond = b' and public()'
763 extracond = b' and public()'
764 revset = b'heads((%%ln::%%ln) %s)' % extracond
764 revset = b'heads((%%ln::%%ln) %s)' % extracond
765 # Get the list of all revs draft on remote by public here.
765 # Get the list of all revs draft on remote by public here.
766 # XXX Beware that revset break if droots is not strictly
766 # XXX Beware that revset break if droots is not strictly
767 # XXX root we may want to ensure it is but it is costly
767 # XXX root we may want to ensure it is but it is costly
768 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
768 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
769 if not pushop.remotephases.publishing and pushop.publish:
769 if not pushop.remotephases.publishing and pushop.publish:
770 future = list(
770 future = list(
771 unfi.set(
771 unfi.set(
772 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
772 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
773 )
773 )
774 )
774 )
775 elif not outgoing.missing:
775 elif not outgoing.missing:
776 future = fallback
776 future = fallback
777 else:
777 else:
778 # adds changeset we are going to push as draft
778 # adds changeset we are going to push as draft
779 #
779 #
780 # should not be necessary for publishing server, but because of an
780 # should not be necessary for publishing server, but because of an
781 # issue fixed in xxxxx we have to do it anyway.
781 # issue fixed in xxxxx we have to do it anyway.
782 fdroots = list(
782 fdroots = list(
783 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
783 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
784 )
784 )
785 fdroots = [f.node() for f in fdroots]
785 fdroots = [f.node() for f in fdroots]
786 future = list(unfi.set(revset, fdroots, pushop.futureheads))
786 future = list(unfi.set(revset, fdroots, pushop.futureheads))
787 pushop.outdatedphases = future
787 pushop.outdatedphases = future
788 pushop.fallbackoutdatedphases = fallback
788 pushop.fallbackoutdatedphases = fallback
789
789
790
790
791 @pushdiscovery(b'obsmarker')
791 @pushdiscovery(b'obsmarker')
792 def _pushdiscoveryobsmarkers(pushop):
792 def _pushdiscoveryobsmarkers(pushop):
793 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
793 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
794 return
794 return
795
795
796 if not pushop.repo.obsstore:
796 if not pushop.repo.obsstore:
797 return
797 return
798
798
799 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
799 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
800 return
800 return
801
801
802 repo = pushop.repo
802 repo = pushop.repo
803 # very naive computation, that can be quite expensive on big repo.
803 # very naive computation, that can be quite expensive on big repo.
804 # However: evolution is currently slow on them anyway.
804 # However: evolution is currently slow on them anyway.
805 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
805 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
806 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
806 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
807
807
808
808
809 @pushdiscovery(b'bookmarks')
809 @pushdiscovery(b'bookmarks')
810 def _pushdiscoverybookmarks(pushop):
810 def _pushdiscoverybookmarks(pushop):
811 ui = pushop.ui
811 ui = pushop.ui
812 repo = pushop.repo.unfiltered()
812 repo = pushop.repo.unfiltered()
813 remote = pushop.remote
813 remote = pushop.remote
814 ui.debug(b"checking for updated bookmarks\n")
814 ui.debug(b"checking for updated bookmarks\n")
815 ancestors = ()
815 ancestors = ()
816 if pushop.revs:
816 if pushop.revs:
817 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
817 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
818 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
818 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
819
819
820 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
820 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
821
821
822 explicit = {
822 explicit = {
823 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
823 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
824 }
824 }
825
825
826 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
826 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
827 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
827 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
828
828
829
829
830 def _processcompared(pushop, pushed, explicit, remotebms, comp):
830 def _processcompared(pushop, pushed, explicit, remotebms, comp):
831 """take decision on bookmarks to push to the remote repo
831 """take decision on bookmarks to push to the remote repo
832
832
833 Exists to help extensions alter this behavior.
833 Exists to help extensions alter this behavior.
834 """
834 """
835 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
835 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
836
836
837 repo = pushop.repo
837 repo = pushop.repo
838
838
839 for b, scid, dcid in advsrc:
839 for b, scid, dcid in advsrc:
840 if b in explicit:
840 if b in explicit:
841 explicit.remove(b)
841 explicit.remove(b)
842 if not pushed or repo[scid].rev() in pushed:
842 if not pushed or repo[scid].rev() in pushed:
843 pushop.outbookmarks.append((b, dcid, scid))
843 pushop.outbookmarks.append((b, dcid, scid))
844 # search added bookmark
844 # search added bookmark
845 for b, scid, dcid in addsrc:
845 for b, scid, dcid in addsrc:
846 if b in explicit:
846 if b in explicit:
847 explicit.remove(b)
847 explicit.remove(b)
848 pushop.outbookmarks.append((b, b'', scid))
848 pushop.outbookmarks.append((b, b'', scid))
849 # search for overwritten bookmark
849 # search for overwritten bookmark
850 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
850 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
851 if b in explicit:
851 if b in explicit:
852 explicit.remove(b)
852 explicit.remove(b)
853 pushop.outbookmarks.append((b, dcid, scid))
853 pushop.outbookmarks.append((b, dcid, scid))
854 # search for bookmark to delete
854 # search for bookmark to delete
855 for b, scid, dcid in adddst:
855 for b, scid, dcid in adddst:
856 if b in explicit:
856 if b in explicit:
857 explicit.remove(b)
857 explicit.remove(b)
858 # treat as "deleted locally"
858 # treat as "deleted locally"
859 pushop.outbookmarks.append((b, dcid, b''))
859 pushop.outbookmarks.append((b, dcid, b''))
860 # identical bookmarks shouldn't get reported
860 # identical bookmarks shouldn't get reported
861 for b, scid, dcid in same:
861 for b, scid, dcid in same:
862 if b in explicit:
862 if b in explicit:
863 explicit.remove(b)
863 explicit.remove(b)
864
864
865 if explicit:
865 if explicit:
866 explicit = sorted(explicit)
866 explicit = sorted(explicit)
867 # we should probably list all of them
867 # we should probably list all of them
868 pushop.ui.warn(
868 pushop.ui.warn(
869 _(
869 _(
870 b'bookmark %s does not exist on the local '
870 b'bookmark %s does not exist on the local '
871 b'or remote repository!\n'
871 b'or remote repository!\n'
872 )
872 )
873 % explicit[0]
873 % explicit[0]
874 )
874 )
875 pushop.bkresult = 2
875 pushop.bkresult = 2
876
876
877 pushop.outbookmarks.sort()
877 pushop.outbookmarks.sort()
878
878
879
879
880 def _pushcheckoutgoing(pushop):
880 def _pushcheckoutgoing(pushop):
881 outgoing = pushop.outgoing
881 outgoing = pushop.outgoing
882 unfi = pushop.repo.unfiltered()
882 unfi = pushop.repo.unfiltered()
883 if not outgoing.missing:
883 if not outgoing.missing:
884 # nothing to push
884 # nothing to push
885 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
885 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
886 return False
886 return False
887 # something to push
887 # something to push
888 if not pushop.force:
888 if not pushop.force:
889 # if repo.obsstore == False --> no obsolete
889 # if repo.obsstore == False --> no obsolete
890 # then, save the iteration
890 # then, save the iteration
891 if unfi.obsstore:
891 if unfi.obsstore:
892 # this message are here for 80 char limit reason
892 # this message are here for 80 char limit reason
893 mso = _(b"push includes obsolete changeset: %s!")
893 mso = _(b"push includes obsolete changeset: %s!")
894 mspd = _(b"push includes phase-divergent changeset: %s!")
894 mspd = _(b"push includes phase-divergent changeset: %s!")
895 mscd = _(b"push includes content-divergent changeset: %s!")
895 mscd = _(b"push includes content-divergent changeset: %s!")
896 mst = {
896 mst = {
897 b"orphan": _(b"push includes orphan changeset: %s!"),
897 b"orphan": _(b"push includes orphan changeset: %s!"),
898 b"phase-divergent": mspd,
898 b"phase-divergent": mspd,
899 b"content-divergent": mscd,
899 b"content-divergent": mscd,
900 }
900 }
901 # If we are to push if there is at least one
901 # If we are to push if there is at least one
902 # obsolete or unstable changeset in missing, at
902 # obsolete or unstable changeset in missing, at
903 # least one of the missinghead will be obsolete or
903 # least one of the missinghead will be obsolete or
904 # unstable. So checking heads only is ok
904 # unstable. So checking heads only is ok
905 for node in outgoing.missingheads:
905 for node in outgoing.missingheads:
906 ctx = unfi[node]
906 ctx = unfi[node]
907 if ctx.obsolete():
907 if ctx.obsolete():
908 raise error.Abort(mso % ctx)
908 raise error.Abort(mso % ctx)
909 elif ctx.isunstable():
909 elif ctx.isunstable():
910 # TODO print more than one instability in the abort
910 # TODO print more than one instability in the abort
911 # message
911 # message
912 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
912 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
913
913
914 discovery.checkheads(pushop)
914 discovery.checkheads(pushop)
915 return True
915 return True
916
916
917
917
918 # List of names of steps to perform for an outgoing bundle2, order matters.
918 # List of names of steps to perform for an outgoing bundle2, order matters.
919 b2partsgenorder = []
919 b2partsgenorder = []
920
920
921 # Mapping between step name and function
921 # Mapping between step name and function
922 #
922 #
923 # This exists to help extensions wrap steps if necessary
923 # This exists to help extensions wrap steps if necessary
924 b2partsgenmapping = {}
924 b2partsgenmapping = {}
925
925
926
926
927 def b2partsgenerator(stepname, idx=None):
927 def b2partsgenerator(stepname, idx=None):
928 """decorator for function generating bundle2 part
928 """decorator for function generating bundle2 part
929
929
930 The function is added to the step -> function mapping and appended to the
930 The function is added to the step -> function mapping and appended to the
931 list of steps. Beware that decorated functions will be added in order
931 list of steps. Beware that decorated functions will be added in order
932 (this may matter).
932 (this may matter).
933
933
934 You can only use this decorator for new steps, if you want to wrap a step
934 You can only use this decorator for new steps, if you want to wrap a step
935 from an extension, attack the b2partsgenmapping dictionary directly."""
935 from an extension, attack the b2partsgenmapping dictionary directly."""
936
936
937 def dec(func):
937 def dec(func):
938 assert stepname not in b2partsgenmapping
938 assert stepname not in b2partsgenmapping
939 b2partsgenmapping[stepname] = func
939 b2partsgenmapping[stepname] = func
940 if idx is None:
940 if idx is None:
941 b2partsgenorder.append(stepname)
941 b2partsgenorder.append(stepname)
942 else:
942 else:
943 b2partsgenorder.insert(idx, stepname)
943 b2partsgenorder.insert(idx, stepname)
944 return func
944 return func
945
945
946 return dec
946 return dec
947
947
948
948
949 def _pushb2ctxcheckheads(pushop, bundler):
949 def _pushb2ctxcheckheads(pushop, bundler):
950 """Generate race condition checking parts
950 """Generate race condition checking parts
951
951
952 Exists as an independent function to aid extensions
952 Exists as an independent function to aid extensions
953 """
953 """
954 # * 'force' do not check for push race,
954 # * 'force' do not check for push race,
955 # * if we don't push anything, there are nothing to check.
955 # * if we don't push anything, there are nothing to check.
956 if not pushop.force and pushop.outgoing.missingheads:
956 if not pushop.force and pushop.outgoing.missingheads:
957 allowunrelated = b'related' in bundler.capabilities.get(
957 allowunrelated = b'related' in bundler.capabilities.get(
958 b'checkheads', ()
958 b'checkheads', ()
959 )
959 )
960 emptyremote = pushop.pushbranchmap is None
960 emptyremote = pushop.pushbranchmap is None
961 if not allowunrelated or emptyremote:
961 if not allowunrelated or emptyremote:
962 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
962 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
963 else:
963 else:
964 affected = set()
964 affected = set()
965 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
965 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
966 remoteheads, newheads, unsyncedheads, discardedheads = heads
966 remoteheads, newheads, unsyncedheads, discardedheads = heads
967 if remoteheads is not None:
967 if remoteheads is not None:
968 remote = set(remoteheads)
968 remote = set(remoteheads)
969 affected |= set(discardedheads) & remote
969 affected |= set(discardedheads) & remote
970 affected |= remote - set(newheads)
970 affected |= remote - set(newheads)
971 if affected:
971 if affected:
972 data = iter(sorted(affected))
972 data = iter(sorted(affected))
973 bundler.newpart(b'check:updated-heads', data=data)
973 bundler.newpart(b'check:updated-heads', data=data)
974
974
975
975
976 def _pushing(pushop):
976 def _pushing(pushop):
977 """return True if we are pushing anything"""
977 """return True if we are pushing anything"""
978 return bool(
978 return bool(
979 pushop.outgoing.missing
979 pushop.outgoing.missing
980 or pushop.outdatedphases
980 or pushop.outdatedphases
981 or pushop.outobsmarkers
981 or pushop.outobsmarkers
982 or pushop.outbookmarks
982 or pushop.outbookmarks
983 )
983 )
984
984
985
985
986 @b2partsgenerator(b'check-bookmarks')
986 @b2partsgenerator(b'check-bookmarks')
987 def _pushb2checkbookmarks(pushop, bundler):
987 def _pushb2checkbookmarks(pushop, bundler):
988 """insert bookmark move checking"""
988 """insert bookmark move checking"""
989 if not _pushing(pushop) or pushop.force:
989 if not _pushing(pushop) or pushop.force:
990 return
990 return
991 b2caps = bundle2.bundle2caps(pushop.remote)
991 b2caps = bundle2.bundle2caps(pushop.remote)
992 hasbookmarkcheck = b'bookmarks' in b2caps
992 hasbookmarkcheck = b'bookmarks' in b2caps
993 if not (pushop.outbookmarks and hasbookmarkcheck):
993 if not (pushop.outbookmarks and hasbookmarkcheck):
994 return
994 return
995 data = []
995 data = []
996 for book, old, new in pushop.outbookmarks:
996 for book, old, new in pushop.outbookmarks:
997 data.append((book, old))
997 data.append((book, old))
998 checkdata = bookmod.binaryencode(data)
998 checkdata = bookmod.binaryencode(data)
999 bundler.newpart(b'check:bookmarks', data=checkdata)
999 bundler.newpart(b'check:bookmarks', data=checkdata)
1000
1000
1001
1001
1002 @b2partsgenerator(b'check-phases')
1002 @b2partsgenerator(b'check-phases')
1003 def _pushb2checkphases(pushop, bundler):
1003 def _pushb2checkphases(pushop, bundler):
1004 """insert phase move checking"""
1004 """insert phase move checking"""
1005 if not _pushing(pushop) or pushop.force:
1005 if not _pushing(pushop) or pushop.force:
1006 return
1006 return
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1008 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1009 if pushop.remotephases is not None and hasphaseheads:
1009 if pushop.remotephases is not None and hasphaseheads:
1010 # check that the remote phase has not changed
1010 # check that the remote phase has not changed
1011 checks = [[] for p in phases.allphases]
1011 checks = [[] for p in phases.allphases]
1012 checks[phases.public].extend(pushop.remotephases.publicheads)
1012 checks[phases.public].extend(pushop.remotephases.publicheads)
1013 checks[phases.draft].extend(pushop.remotephases.draftroots)
1013 checks[phases.draft].extend(pushop.remotephases.draftroots)
1014 if any(checks):
1014 if any(checks):
1015 for nodes in checks:
1015 for nodes in checks:
1016 nodes.sort()
1016 nodes.sort()
1017 checkdata = phases.binaryencode(checks)
1017 checkdata = phases.binaryencode(checks)
1018 bundler.newpart(b'check:phases', data=checkdata)
1018 bundler.newpart(b'check:phases', data=checkdata)
1019
1019
1020
1020
1021 @b2partsgenerator(b'changeset')
1021 @b2partsgenerator(b'changeset')
1022 def _pushb2ctx(pushop, bundler):
1022 def _pushb2ctx(pushop, bundler):
1023 """handle changegroup push through bundle2
1023 """handle changegroup push through bundle2
1024
1024
1025 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1025 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1026 """
1026 """
1027 if b'changesets' in pushop.stepsdone:
1027 if b'changesets' in pushop.stepsdone:
1028 return
1028 return
1029 pushop.stepsdone.add(b'changesets')
1029 pushop.stepsdone.add(b'changesets')
1030 # Send known heads to the server for race detection.
1030 # Send known heads to the server for race detection.
1031 if not _pushcheckoutgoing(pushop):
1031 if not _pushcheckoutgoing(pushop):
1032 return
1032 return
1033 pushop.repo.prepushoutgoinghooks(pushop)
1033 pushop.repo.prepushoutgoinghooks(pushop)
1034
1034
1035 _pushb2ctxcheckheads(pushop, bundler)
1035 _pushb2ctxcheckheads(pushop, bundler)
1036
1036
1037 b2caps = bundle2.bundle2caps(pushop.remote)
1037 b2caps = bundle2.bundle2caps(pushop.remote)
1038 version = b'01'
1038 version = b'01'
1039 cgversions = b2caps.get(b'changegroup')
1039 cgversions = b2caps.get(b'changegroup')
1040 if cgversions: # 3.1 and 3.2 ship with an empty value
1040 if cgversions: # 3.1 and 3.2 ship with an empty value
1041 cgversions = [
1041 cgversions = [
1042 v
1042 v
1043 for v in cgversions
1043 for v in cgversions
1044 if v in changegroup.supportedoutgoingversions(pushop.repo)
1044 if v in changegroup.supportedoutgoingversions(pushop.repo)
1045 ]
1045 ]
1046 if not cgversions:
1046 if not cgversions:
1047 raise error.Abort(_(b'no common changegroup version'))
1047 raise error.Abort(_(b'no common changegroup version'))
1048 version = max(cgversions)
1048 version = max(cgversions)
1049 cgstream = changegroup.makestream(
1049 cgstream = changegroup.makestream(
1050 pushop.repo, pushop.outgoing, version, b'push'
1050 pushop.repo, pushop.outgoing, version, b'push'
1051 )
1051 )
1052 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1052 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1053 if cgversions:
1053 if cgversions:
1054 cgpart.addparam(b'version', version)
1054 cgpart.addparam(b'version', version)
1055 if b'treemanifest' in pushop.repo.requirements:
1055 if b'treemanifest' in pushop.repo.requirements:
1056 cgpart.addparam(b'treemanifest', b'1')
1056 cgpart.addparam(b'treemanifest', b'1')
1057 if b'exp-sidedata-flag' in pushop.repo.requirements:
1057 if b'exp-sidedata-flag' in pushop.repo.requirements:
1058 cgpart.addparam(b'exp-sidedata', b'1')
1058 cgpart.addparam(b'exp-sidedata', b'1')
1059
1059
1060 def handlereply(op):
1060 def handlereply(op):
1061 """extract addchangegroup returns from server reply"""
1061 """extract addchangegroup returns from server reply"""
1062 cgreplies = op.records.getreplies(cgpart.id)
1062 cgreplies = op.records.getreplies(cgpart.id)
1063 assert len(cgreplies[b'changegroup']) == 1
1063 assert len(cgreplies[b'changegroup']) == 1
1064 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1064 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1065
1065
1066 return handlereply
1066 return handlereply
1067
1067
1068
1068
1069 @b2partsgenerator(b'phase')
1069 @b2partsgenerator(b'phase')
1070 def _pushb2phases(pushop, bundler):
1070 def _pushb2phases(pushop, bundler):
1071 """handle phase push through bundle2"""
1071 """handle phase push through bundle2"""
1072 if b'phases' in pushop.stepsdone:
1072 if b'phases' in pushop.stepsdone:
1073 return
1073 return
1074 b2caps = bundle2.bundle2caps(pushop.remote)
1074 b2caps = bundle2.bundle2caps(pushop.remote)
1075 ui = pushop.repo.ui
1075 ui = pushop.repo.ui
1076
1076
1077 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1077 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1078 haspushkey = b'pushkey' in b2caps
1078 haspushkey = b'pushkey' in b2caps
1079 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1079 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1080
1080
1081 if hasphaseheads and not legacyphase:
1081 if hasphaseheads and not legacyphase:
1082 return _pushb2phaseheads(pushop, bundler)
1082 return _pushb2phaseheads(pushop, bundler)
1083 elif haspushkey:
1083 elif haspushkey:
1084 return _pushb2phasespushkey(pushop, bundler)
1084 return _pushb2phasespushkey(pushop, bundler)
1085
1085
1086
1086
1087 def _pushb2phaseheads(pushop, bundler):
1087 def _pushb2phaseheads(pushop, bundler):
1088 """push phase information through a bundle2 - binary part"""
1088 """push phase information through a bundle2 - binary part"""
1089 pushop.stepsdone.add(b'phases')
1089 pushop.stepsdone.add(b'phases')
1090 if pushop.outdatedphases:
1090 if pushop.outdatedphases:
1091 updates = [[] for p in phases.allphases]
1091 updates = [[] for p in phases.allphases]
1092 updates[0].extend(h.node() for h in pushop.outdatedphases)
1092 updates[0].extend(h.node() for h in pushop.outdatedphases)
1093 phasedata = phases.binaryencode(updates)
1093 phasedata = phases.binaryencode(updates)
1094 bundler.newpart(b'phase-heads', data=phasedata)
1094 bundler.newpart(b'phase-heads', data=phasedata)
1095
1095
1096
1096
1097 def _pushb2phasespushkey(pushop, bundler):
1097 def _pushb2phasespushkey(pushop, bundler):
1098 """push phase information through a bundle2 - pushkey part"""
1098 """push phase information through a bundle2 - pushkey part"""
1099 pushop.stepsdone.add(b'phases')
1099 pushop.stepsdone.add(b'phases')
1100 part2node = []
1100 part2node = []
1101
1101
1102 def handlefailure(pushop, exc):
1102 def handlefailure(pushop, exc):
1103 targetid = int(exc.partid)
1103 targetid = int(exc.partid)
1104 for partid, node in part2node:
1104 for partid, node in part2node:
1105 if partid == targetid:
1105 if partid == targetid:
1106 raise error.Abort(_(b'updating %s to public failed') % node)
1106 raise error.Abort(_(b'updating %s to public failed') % node)
1107
1107
1108 enc = pushkey.encode
1108 enc = pushkey.encode
1109 for newremotehead in pushop.outdatedphases:
1109 for newremotehead in pushop.outdatedphases:
1110 part = bundler.newpart(b'pushkey')
1110 part = bundler.newpart(b'pushkey')
1111 part.addparam(b'namespace', enc(b'phases'))
1111 part.addparam(b'namespace', enc(b'phases'))
1112 part.addparam(b'key', enc(newremotehead.hex()))
1112 part.addparam(b'key', enc(newremotehead.hex()))
1113 part.addparam(b'old', enc(b'%d' % phases.draft))
1113 part.addparam(b'old', enc(b'%d' % phases.draft))
1114 part.addparam(b'new', enc(b'%d' % phases.public))
1114 part.addparam(b'new', enc(b'%d' % phases.public))
1115 part2node.append((part.id, newremotehead))
1115 part2node.append((part.id, newremotehead))
1116 pushop.pkfailcb[part.id] = handlefailure
1116 pushop.pkfailcb[part.id] = handlefailure
1117
1117
1118 def handlereply(op):
1118 def handlereply(op):
1119 for partid, node in part2node:
1119 for partid, node in part2node:
1120 partrep = op.records.getreplies(partid)
1120 partrep = op.records.getreplies(partid)
1121 results = partrep[b'pushkey']
1121 results = partrep[b'pushkey']
1122 assert len(results) <= 1
1122 assert len(results) <= 1
1123 msg = None
1123 msg = None
1124 if not results:
1124 if not results:
1125 msg = _(b'server ignored update of %s to public!\n') % node
1125 msg = _(b'server ignored update of %s to public!\n') % node
1126 elif not int(results[0][b'return']):
1126 elif not int(results[0][b'return']):
1127 msg = _(b'updating %s to public failed!\n') % node
1127 msg = _(b'updating %s to public failed!\n') % node
1128 if msg is not None:
1128 if msg is not None:
1129 pushop.ui.warn(msg)
1129 pushop.ui.warn(msg)
1130
1130
1131 return handlereply
1131 return handlereply
1132
1132
1133
1133
1134 @b2partsgenerator(b'obsmarkers')
1134 @b2partsgenerator(b'obsmarkers')
1135 def _pushb2obsmarkers(pushop, bundler):
1135 def _pushb2obsmarkers(pushop, bundler):
1136 if b'obsmarkers' in pushop.stepsdone:
1136 if b'obsmarkers' in pushop.stepsdone:
1137 return
1137 return
1138 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1138 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1139 if obsolete.commonversion(remoteversions) is None:
1139 if obsolete.commonversion(remoteversions) is None:
1140 return
1140 return
1141 pushop.stepsdone.add(b'obsmarkers')
1141 pushop.stepsdone.add(b'obsmarkers')
1142 if pushop.outobsmarkers:
1142 if pushop.outobsmarkers:
1143 markers = _sortedmarkers(pushop.outobsmarkers)
1143 markers = _sortedmarkers(pushop.outobsmarkers)
1144 bundle2.buildobsmarkerspart(bundler, markers)
1144 bundle2.buildobsmarkerspart(bundler, markers)
1145
1145
1146
1146
1147 @b2partsgenerator(b'bookmarks')
1147 @b2partsgenerator(b'bookmarks')
1148 def _pushb2bookmarks(pushop, bundler):
1148 def _pushb2bookmarks(pushop, bundler):
1149 """handle bookmark push through bundle2"""
1149 """handle bookmark push through bundle2"""
1150 if b'bookmarks' in pushop.stepsdone:
1150 if b'bookmarks' in pushop.stepsdone:
1151 return
1151 return
1152 b2caps = bundle2.bundle2caps(pushop.remote)
1152 b2caps = bundle2.bundle2caps(pushop.remote)
1153
1153
1154 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1154 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1155 legacybooks = b'bookmarks' in legacy
1155 legacybooks = b'bookmarks' in legacy
1156
1156
1157 if not legacybooks and b'bookmarks' in b2caps:
1157 if not legacybooks and b'bookmarks' in b2caps:
1158 return _pushb2bookmarkspart(pushop, bundler)
1158 return _pushb2bookmarkspart(pushop, bundler)
1159 elif b'pushkey' in b2caps:
1159 elif b'pushkey' in b2caps:
1160 return _pushb2bookmarkspushkey(pushop, bundler)
1160 return _pushb2bookmarkspushkey(pushop, bundler)
1161
1161
1162
1162
1163 def _bmaction(old, new):
1163 def _bmaction(old, new):
1164 """small utility for bookmark pushing"""
1164 """small utility for bookmark pushing"""
1165 if not old:
1165 if not old:
1166 return b'export'
1166 return b'export'
1167 elif not new:
1167 elif not new:
1168 return b'delete'
1168 return b'delete'
1169 return b'update'
1169 return b'update'
1170
1170
1171
1171
1172 def _abortonsecretctx(pushop, node, b):
1172 def _abortonsecretctx(pushop, node, b):
1173 """abort if a given bookmark points to a secret changeset"""
1173 """abort if a given bookmark points to a secret changeset"""
1174 if node and pushop.repo[node].phase() == phases.secret:
1174 if node and pushop.repo[node].phase() == phases.secret:
1175 raise error.Abort(
1175 raise error.Abort(
1176 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1176 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1177 )
1177 )
1178
1178
1179
1179
1180 def _pushb2bookmarkspart(pushop, bundler):
1180 def _pushb2bookmarkspart(pushop, bundler):
1181 pushop.stepsdone.add(b'bookmarks')
1181 pushop.stepsdone.add(b'bookmarks')
1182 if not pushop.outbookmarks:
1182 if not pushop.outbookmarks:
1183 return
1183 return
1184
1184
1185 allactions = []
1185 allactions = []
1186 data = []
1186 data = []
1187 for book, old, new in pushop.outbookmarks:
1187 for book, old, new in pushop.outbookmarks:
1188 _abortonsecretctx(pushop, new, book)
1188 _abortonsecretctx(pushop, new, book)
1189 data.append((book, new))
1189 data.append((book, new))
1190 allactions.append((book, _bmaction(old, new)))
1190 allactions.append((book, _bmaction(old, new)))
1191 checkdata = bookmod.binaryencode(data)
1191 checkdata = bookmod.binaryencode(data)
1192 bundler.newpart(b'bookmarks', data=checkdata)
1192 bundler.newpart(b'bookmarks', data=checkdata)
1193
1193
1194 def handlereply(op):
1194 def handlereply(op):
1195 ui = pushop.ui
1195 ui = pushop.ui
1196 # if success
1196 # if success
1197 for book, action in allactions:
1197 for book, action in allactions:
1198 ui.status(bookmsgmap[action][0] % book)
1198 ui.status(bookmsgmap[action][0] % book)
1199
1199
1200 return handlereply
1200 return handlereply
1201
1201
1202
1202
1203 def _pushb2bookmarkspushkey(pushop, bundler):
1203 def _pushb2bookmarkspushkey(pushop, bundler):
1204 pushop.stepsdone.add(b'bookmarks')
1204 pushop.stepsdone.add(b'bookmarks')
1205 part2book = []
1205 part2book = []
1206 enc = pushkey.encode
1206 enc = pushkey.encode
1207
1207
1208 def handlefailure(pushop, exc):
1208 def handlefailure(pushop, exc):
1209 targetid = int(exc.partid)
1209 targetid = int(exc.partid)
1210 for partid, book, action in part2book:
1210 for partid, book, action in part2book:
1211 if partid == targetid:
1211 if partid == targetid:
1212 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1212 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1213 # we should not be called for part we did not generated
1213 # we should not be called for part we did not generated
1214 assert False
1214 assert False
1215
1215
1216 for book, old, new in pushop.outbookmarks:
1216 for book, old, new in pushop.outbookmarks:
1217 _abortonsecretctx(pushop, new, book)
1217 _abortonsecretctx(pushop, new, book)
1218 part = bundler.newpart(b'pushkey')
1218 part = bundler.newpart(b'pushkey')
1219 part.addparam(b'namespace', enc(b'bookmarks'))
1219 part.addparam(b'namespace', enc(b'bookmarks'))
1220 part.addparam(b'key', enc(book))
1220 part.addparam(b'key', enc(book))
1221 part.addparam(b'old', enc(hex(old)))
1221 part.addparam(b'old', enc(hex(old)))
1222 part.addparam(b'new', enc(hex(new)))
1222 part.addparam(b'new', enc(hex(new)))
1223 action = b'update'
1223 action = b'update'
1224 if not old:
1224 if not old:
1225 action = b'export'
1225 action = b'export'
1226 elif not new:
1226 elif not new:
1227 action = b'delete'
1227 action = b'delete'
1228 part2book.append((part.id, book, action))
1228 part2book.append((part.id, book, action))
1229 pushop.pkfailcb[part.id] = handlefailure
1229 pushop.pkfailcb[part.id] = handlefailure
1230
1230
1231 def handlereply(op):
1231 def handlereply(op):
1232 ui = pushop.ui
1232 ui = pushop.ui
1233 for partid, book, action in part2book:
1233 for partid, book, action in part2book:
1234 partrep = op.records.getreplies(partid)
1234 partrep = op.records.getreplies(partid)
1235 results = partrep[b'pushkey']
1235 results = partrep[b'pushkey']
1236 assert len(results) <= 1
1236 assert len(results) <= 1
1237 if not results:
1237 if not results:
1238 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1238 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1239 else:
1239 else:
1240 ret = int(results[0][b'return'])
1240 ret = int(results[0][b'return'])
1241 if ret:
1241 if ret:
1242 ui.status(bookmsgmap[action][0] % book)
1242 ui.status(bookmsgmap[action][0] % book)
1243 else:
1243 else:
1244 ui.warn(bookmsgmap[action][1] % book)
1244 ui.warn(bookmsgmap[action][1] % book)
1245 if pushop.bkresult is not None:
1245 if pushop.bkresult is not None:
1246 pushop.bkresult = 1
1246 pushop.bkresult = 1
1247
1247
1248 return handlereply
1248 return handlereply
1249
1249
1250
1250
1251 @b2partsgenerator(b'pushvars', idx=0)
1251 @b2partsgenerator(b'pushvars', idx=0)
1252 def _getbundlesendvars(pushop, bundler):
1252 def _getbundlesendvars(pushop, bundler):
1253 '''send shellvars via bundle2'''
1253 '''send shellvars via bundle2'''
1254 pushvars = pushop.pushvars
1254 pushvars = pushop.pushvars
1255 if pushvars:
1255 if pushvars:
1256 shellvars = {}
1256 shellvars = {}
1257 for raw in pushvars:
1257 for raw in pushvars:
1258 if b'=' not in raw:
1258 if b'=' not in raw:
1259 msg = (
1259 msg = (
1260 b"unable to parse variable '%s', should follow "
1260 b"unable to parse variable '%s', should follow "
1261 b"'KEY=VALUE' or 'KEY=' format"
1261 b"'KEY=VALUE' or 'KEY=' format"
1262 )
1262 )
1263 raise error.Abort(msg % raw)
1263 raise error.Abort(msg % raw)
1264 k, v = raw.split(b'=', 1)
1264 k, v = raw.split(b'=', 1)
1265 shellvars[k] = v
1265 shellvars[k] = v
1266
1266
1267 part = bundler.newpart(b'pushvars')
1267 part = bundler.newpart(b'pushvars')
1268
1268
1269 for key, value in pycompat.iteritems(shellvars):
1269 for key, value in pycompat.iteritems(shellvars):
1270 part.addparam(key, value, mandatory=False)
1270 part.addparam(key, value, mandatory=False)
1271
1271
1272
1272
1273 def _pushbundle2(pushop):
1273 def _pushbundle2(pushop):
1274 """push data to the remote using bundle2
1274 """push data to the remote using bundle2
1275
1275
1276 The only currently supported type of data is changegroup but this will
1276 The only currently supported type of data is changegroup but this will
1277 evolve in the future."""
1277 evolve in the future."""
1278 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1278 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1279 pushback = pushop.trmanager and pushop.ui.configbool(
1279 pushback = pushop.trmanager and pushop.ui.configbool(
1280 b'experimental', b'bundle2.pushback'
1280 b'experimental', b'bundle2.pushback'
1281 )
1281 )
1282
1282
1283 # create reply capability
1283 # create reply capability
1284 capsblob = bundle2.encodecaps(
1284 capsblob = bundle2.encodecaps(
1285 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1285 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1286 )
1286 )
1287 bundler.newpart(b'replycaps', data=capsblob)
1287 bundler.newpart(b'replycaps', data=capsblob)
1288 replyhandlers = []
1288 replyhandlers = []
1289 for partgenname in b2partsgenorder:
1289 for partgenname in b2partsgenorder:
1290 partgen = b2partsgenmapping[partgenname]
1290 partgen = b2partsgenmapping[partgenname]
1291 ret = partgen(pushop, bundler)
1291 ret = partgen(pushop, bundler)
1292 if callable(ret):
1292 if callable(ret):
1293 replyhandlers.append(ret)
1293 replyhandlers.append(ret)
1294 # do not push if nothing to push
1294 # do not push if nothing to push
1295 if bundler.nbparts <= 1:
1295 if bundler.nbparts <= 1:
1296 return
1296 return
1297 stream = util.chunkbuffer(bundler.getchunks())
1297 stream = util.chunkbuffer(bundler.getchunks())
1298 try:
1298 try:
1299 try:
1299 try:
1300 with pushop.remote.commandexecutor() as e:
1300 with pushop.remote.commandexecutor() as e:
1301 reply = e.callcommand(
1301 reply = e.callcommand(
1302 b'unbundle',
1302 b'unbundle',
1303 {
1303 {
1304 b'bundle': stream,
1304 b'bundle': stream,
1305 b'heads': [b'force'],
1305 b'heads': [b'force'],
1306 b'url': pushop.remote.url(),
1306 b'url': pushop.remote.url(),
1307 },
1307 },
1308 ).result()
1308 ).result()
1309 except error.BundleValueError as exc:
1309 except error.BundleValueError as exc:
1310 raise error.Abort(_(b'missing support for %s') % exc)
1310 raise error.Abort(_(b'missing support for %s') % exc)
1311 try:
1311 try:
1312 trgetter = None
1312 trgetter = None
1313 if pushback:
1313 if pushback:
1314 trgetter = pushop.trmanager.transaction
1314 trgetter = pushop.trmanager.transaction
1315 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1315 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1316 except error.BundleValueError as exc:
1316 except error.BundleValueError as exc:
1317 raise error.Abort(_(b'missing support for %s') % exc)
1317 raise error.Abort(_(b'missing support for %s') % exc)
1318 except bundle2.AbortFromPart as exc:
1318 except bundle2.AbortFromPart as exc:
1319 pushop.ui.status(_(b'remote: %s\n') % exc)
1319 pushop.ui.status(_(b'remote: %s\n') % exc)
1320 if exc.hint is not None:
1320 if exc.hint is not None:
1321 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1321 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1322 raise error.Abort(_(b'push failed on remote'))
1322 raise error.Abort(_(b'push failed on remote'))
1323 except error.PushkeyFailed as exc:
1323 except error.PushkeyFailed as exc:
1324 partid = int(exc.partid)
1324 partid = int(exc.partid)
1325 if partid not in pushop.pkfailcb:
1325 if partid not in pushop.pkfailcb:
1326 raise
1326 raise
1327 pushop.pkfailcb[partid](pushop, exc)
1327 pushop.pkfailcb[partid](pushop, exc)
1328 for rephand in replyhandlers:
1328 for rephand in replyhandlers:
1329 rephand(op)
1329 rephand(op)
1330
1330
1331
1331
1332 def _pushchangeset(pushop):
1332 def _pushchangeset(pushop):
1333 """Make the actual push of changeset bundle to remote repo"""
1333 """Make the actual push of changeset bundle to remote repo"""
1334 if b'changesets' in pushop.stepsdone:
1334 if b'changesets' in pushop.stepsdone:
1335 return
1335 return
1336 pushop.stepsdone.add(b'changesets')
1336 pushop.stepsdone.add(b'changesets')
1337 if not _pushcheckoutgoing(pushop):
1337 if not _pushcheckoutgoing(pushop):
1338 return
1338 return
1339
1339
1340 # Should have verified this in push().
1340 # Should have verified this in push().
1341 assert pushop.remote.capable(b'unbundle')
1341 assert pushop.remote.capable(b'unbundle')
1342
1342
1343 pushop.repo.prepushoutgoinghooks(pushop)
1343 pushop.repo.prepushoutgoinghooks(pushop)
1344 outgoing = pushop.outgoing
1344 outgoing = pushop.outgoing
1345 # TODO: get bundlecaps from remote
1345 # TODO: get bundlecaps from remote
1346 bundlecaps = None
1346 bundlecaps = None
1347 # create a changegroup from local
1347 # create a changegroup from local
1348 if pushop.revs is None and not (
1348 if pushop.revs is None and not (
1349 outgoing.excluded or pushop.repo.changelog.filteredrevs
1349 outgoing.excluded or pushop.repo.changelog.filteredrevs
1350 ):
1350 ):
1351 # push everything,
1351 # push everything,
1352 # use the fast path, no race possible on push
1352 # use the fast path, no race possible on push
1353 cg = changegroup.makechangegroup(
1353 cg = changegroup.makechangegroup(
1354 pushop.repo,
1354 pushop.repo,
1355 outgoing,
1355 outgoing,
1356 b'01',
1356 b'01',
1357 b'push',
1357 b'push',
1358 fastpath=True,
1358 fastpath=True,
1359 bundlecaps=bundlecaps,
1359 bundlecaps=bundlecaps,
1360 )
1360 )
1361 else:
1361 else:
1362 cg = changegroup.makechangegroup(
1362 cg = changegroup.makechangegroup(
1363 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1363 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1364 )
1364 )
1365
1365
1366 # apply changegroup to remote
1366 # apply changegroup to remote
1367 # local repo finds heads on server, finds out what
1367 # local repo finds heads on server, finds out what
1368 # revs it must push. once revs transferred, if server
1368 # revs it must push. once revs transferred, if server
1369 # finds it has different heads (someone else won
1369 # finds it has different heads (someone else won
1370 # commit/push race), server aborts.
1370 # commit/push race), server aborts.
1371 if pushop.force:
1371 if pushop.force:
1372 remoteheads = [b'force']
1372 remoteheads = [b'force']
1373 else:
1373 else:
1374 remoteheads = pushop.remoteheads
1374 remoteheads = pushop.remoteheads
1375 # ssh: return remote's addchangegroup()
1375 # ssh: return remote's addchangegroup()
1376 # http: return remote's addchangegroup() or 0 for error
1376 # http: return remote's addchangegroup() or 0 for error
1377 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1377 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1378
1378
1379
1379
1380 def _pushsyncphase(pushop):
1380 def _pushsyncphase(pushop):
1381 """synchronise phase information locally and remotely"""
1381 """synchronise phase information locally and remotely"""
1382 cheads = pushop.commonheads
1382 cheads = pushop.commonheads
1383 # even when we don't push, exchanging phase data is useful
1383 # even when we don't push, exchanging phase data is useful
1384 remotephases = listkeys(pushop.remote, b'phases')
1384 remotephases = listkeys(pushop.remote, b'phases')
1385 if (
1385 if (
1386 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1386 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1387 and remotephases # server supports phases
1387 and remotephases # server supports phases
1388 and pushop.cgresult is None # nothing was pushed
1388 and pushop.cgresult is None # nothing was pushed
1389 and remotephases.get(b'publishing', False)
1389 and remotephases.get(b'publishing', False)
1390 ):
1390 ):
1391 # When:
1391 # When:
1392 # - this is a subrepo push
1392 # - this is a subrepo push
1393 # - and remote support phase
1393 # - and remote support phase
1394 # - and no changeset was pushed
1394 # - and no changeset was pushed
1395 # - and remote is publishing
1395 # - and remote is publishing
1396 # We may be in issue 3871 case!
1396 # We may be in issue 3871 case!
1397 # We drop the possible phase synchronisation done by
1397 # We drop the possible phase synchronisation done by
1398 # courtesy to publish changesets possibly locally draft
1398 # courtesy to publish changesets possibly locally draft
1399 # on the remote.
1399 # on the remote.
1400 remotephases = {b'publishing': b'True'}
1400 remotephases = {b'publishing': b'True'}
1401 if not remotephases: # old server or public only reply from non-publishing
1401 if not remotephases: # old server or public only reply from non-publishing
1402 _localphasemove(pushop, cheads)
1402 _localphasemove(pushop, cheads)
1403 # don't push any phase data as there is nothing to push
1403 # don't push any phase data as there is nothing to push
1404 else:
1404 else:
1405 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1405 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1406 pheads, droots = ana
1406 pheads, droots = ana
1407 ### Apply remote phase on local
1407 ### Apply remote phase on local
1408 if remotephases.get(b'publishing', False):
1408 if remotephases.get(b'publishing', False):
1409 _localphasemove(pushop, cheads)
1409 _localphasemove(pushop, cheads)
1410 else: # publish = False
1410 else: # publish = False
1411 _localphasemove(pushop, pheads)
1411 _localphasemove(pushop, pheads)
1412 _localphasemove(pushop, cheads, phases.draft)
1412 _localphasemove(pushop, cheads, phases.draft)
1413 ### Apply local phase on remote
1413 ### Apply local phase on remote
1414
1414
1415 if pushop.cgresult:
1415 if pushop.cgresult:
1416 if b'phases' in pushop.stepsdone:
1416 if b'phases' in pushop.stepsdone:
1417 # phases already pushed though bundle2
1417 # phases already pushed though bundle2
1418 return
1418 return
1419 outdated = pushop.outdatedphases
1419 outdated = pushop.outdatedphases
1420 else:
1420 else:
1421 outdated = pushop.fallbackoutdatedphases
1421 outdated = pushop.fallbackoutdatedphases
1422
1422
1423 pushop.stepsdone.add(b'phases')
1423 pushop.stepsdone.add(b'phases')
1424
1424
1425 # filter heads already turned public by the push
1425 # filter heads already turned public by the push
1426 outdated = [c for c in outdated if c.node() not in pheads]
1426 outdated = [c for c in outdated if c.node() not in pheads]
1427 # fallback to independent pushkey command
1427 # fallback to independent pushkey command
1428 for newremotehead in outdated:
1428 for newremotehead in outdated:
1429 with pushop.remote.commandexecutor() as e:
1429 with pushop.remote.commandexecutor() as e:
1430 r = e.callcommand(
1430 r = e.callcommand(
1431 b'pushkey',
1431 b'pushkey',
1432 {
1432 {
1433 b'namespace': b'phases',
1433 b'namespace': b'phases',
1434 b'key': newremotehead.hex(),
1434 b'key': newremotehead.hex(),
1435 b'old': b'%d' % phases.draft,
1435 b'old': b'%d' % phases.draft,
1436 b'new': b'%d' % phases.public,
1436 b'new': b'%d' % phases.public,
1437 },
1437 },
1438 ).result()
1438 ).result()
1439
1439
1440 if not r:
1440 if not r:
1441 pushop.ui.warn(
1441 pushop.ui.warn(
1442 _(b'updating %s to public failed!\n') % newremotehead
1442 _(b'updating %s to public failed!\n') % newremotehead
1443 )
1443 )
1444
1444
1445
1445
1446 def _localphasemove(pushop, nodes, phase=phases.public):
1446 def _localphasemove(pushop, nodes, phase=phases.public):
1447 """move <nodes> to <phase> in the local source repo"""
1447 """move <nodes> to <phase> in the local source repo"""
1448 if pushop.trmanager:
1448 if pushop.trmanager:
1449 phases.advanceboundary(
1449 phases.advanceboundary(
1450 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1450 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1451 )
1451 )
1452 else:
1452 else:
1453 # repo is not locked, do not change any phases!
1453 # repo is not locked, do not change any phases!
1454 # Informs the user that phases should have been moved when
1454 # Informs the user that phases should have been moved when
1455 # applicable.
1455 # applicable.
1456 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1456 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1457 phasestr = phases.phasenames[phase]
1457 phasestr = phases.phasenames[phase]
1458 if actualmoves:
1458 if actualmoves:
1459 pushop.ui.status(
1459 pushop.ui.status(
1460 _(
1460 _(
1461 b'cannot lock source repo, skipping '
1461 b'cannot lock source repo, skipping '
1462 b'local %s phase update\n'
1462 b'local %s phase update\n'
1463 )
1463 )
1464 % phasestr
1464 % phasestr
1465 )
1465 )
1466
1466
1467
1467
1468 def _pushobsolete(pushop):
1468 def _pushobsolete(pushop):
1469 """utility function to push obsolete markers to a remote"""
1469 """utility function to push obsolete markers to a remote"""
1470 if b'obsmarkers' in pushop.stepsdone:
1470 if b'obsmarkers' in pushop.stepsdone:
1471 return
1471 return
1472 repo = pushop.repo
1472 repo = pushop.repo
1473 remote = pushop.remote
1473 remote = pushop.remote
1474 pushop.stepsdone.add(b'obsmarkers')
1474 pushop.stepsdone.add(b'obsmarkers')
1475 if pushop.outobsmarkers:
1475 if pushop.outobsmarkers:
1476 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1476 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1477 rslts = []
1477 rslts = []
1478 markers = _sortedmarkers(pushop.outobsmarkers)
1478 markers = _sortedmarkers(pushop.outobsmarkers)
1479 remotedata = obsolete._pushkeyescape(markers)
1479 remotedata = obsolete._pushkeyescape(markers)
1480 for key in sorted(remotedata, reverse=True):
1480 for key in sorted(remotedata, reverse=True):
1481 # reverse sort to ensure we end with dump0
1481 # reverse sort to ensure we end with dump0
1482 data = remotedata[key]
1482 data = remotedata[key]
1483 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1483 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1484 if [r for r in rslts if not r]:
1484 if [r for r in rslts if not r]:
1485 msg = _(b'failed to push some obsolete markers!\n')
1485 msg = _(b'failed to push some obsolete markers!\n')
1486 repo.ui.warn(msg)
1486 repo.ui.warn(msg)
1487
1487
1488
1488
1489 def _pushbookmark(pushop):
1489 def _pushbookmark(pushop):
1490 """Update bookmark position on remote"""
1490 """Update bookmark position on remote"""
1491 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1491 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1492 return
1492 return
1493 pushop.stepsdone.add(b'bookmarks')
1493 pushop.stepsdone.add(b'bookmarks')
1494 ui = pushop.ui
1494 ui = pushop.ui
1495 remote = pushop.remote
1495 remote = pushop.remote
1496
1496
1497 for b, old, new in pushop.outbookmarks:
1497 for b, old, new in pushop.outbookmarks:
1498 action = b'update'
1498 action = b'update'
1499 if not old:
1499 if not old:
1500 action = b'export'
1500 action = b'export'
1501 elif not new:
1501 elif not new:
1502 action = b'delete'
1502 action = b'delete'
1503
1503
1504 with remote.commandexecutor() as e:
1504 with remote.commandexecutor() as e:
1505 r = e.callcommand(
1505 r = e.callcommand(
1506 b'pushkey',
1506 b'pushkey',
1507 {
1507 {
1508 b'namespace': b'bookmarks',
1508 b'namespace': b'bookmarks',
1509 b'key': b,
1509 b'key': b,
1510 b'old': hex(old),
1510 b'old': hex(old),
1511 b'new': hex(new),
1511 b'new': hex(new),
1512 },
1512 },
1513 ).result()
1513 ).result()
1514
1514
1515 if r:
1515 if r:
1516 ui.status(bookmsgmap[action][0] % b)
1516 ui.status(bookmsgmap[action][0] % b)
1517 else:
1517 else:
1518 ui.warn(bookmsgmap[action][1] % b)
1518 ui.warn(bookmsgmap[action][1] % b)
1519 # discovery can have set the value form invalid entry
1519 # discovery can have set the value form invalid entry
1520 if pushop.bkresult is not None:
1520 if pushop.bkresult is not None:
1521 pushop.bkresult = 1
1521 pushop.bkresult = 1
1522
1522
1523
1523
1524 class pulloperation(object):
1524 class pulloperation(object):
1525 """A object that represent a single pull operation
1525 """A object that represent a single pull operation
1526
1526
1527 It purpose is to carry pull related state and very common operation.
1527 It purpose is to carry pull related state and very common operation.
1528
1528
1529 A new should be created at the beginning of each pull and discarded
1529 A new should be created at the beginning of each pull and discarded
1530 afterward.
1530 afterward.
1531 """
1531 """
1532
1532
1533 def __init__(
1533 def __init__(
1534 self,
1534 self,
1535 repo,
1535 repo,
1536 remote,
1536 remote,
1537 heads=None,
1537 heads=None,
1538 force=False,
1538 force=False,
1539 bookmarks=(),
1539 bookmarks=(),
1540 remotebookmarks=None,
1540 remotebookmarks=None,
1541 streamclonerequested=None,
1541 streamclonerequested=None,
1542 includepats=None,
1542 includepats=None,
1543 excludepats=None,
1543 excludepats=None,
1544 depth=None,
1544 depth=None,
1545 ):
1545 ):
1546 # repo we pull into
1546 # repo we pull into
1547 self.repo = repo
1547 self.repo = repo
1548 # repo we pull from
1548 # repo we pull from
1549 self.remote = remote
1549 self.remote = remote
1550 # revision we try to pull (None is "all")
1550 # revision we try to pull (None is "all")
1551 self.heads = heads
1551 self.heads = heads
1552 # bookmark pulled explicitly
1552 # bookmark pulled explicitly
1553 self.explicitbookmarks = [
1553 self.explicitbookmarks = [
1554 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1554 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1555 ]
1555 ]
1556 # do we force pull?
1556 # do we force pull?
1557 self.force = force
1557 self.force = force
1558 # whether a streaming clone was requested
1558 # whether a streaming clone was requested
1559 self.streamclonerequested = streamclonerequested
1559 self.streamclonerequested = streamclonerequested
1560 # transaction manager
1560 # transaction manager
1561 self.trmanager = None
1561 self.trmanager = None
1562 # set of common changeset between local and remote before pull
1562 # set of common changeset between local and remote before pull
1563 self.common = None
1563 self.common = None
1564 # set of pulled head
1564 # set of pulled head
1565 self.rheads = None
1565 self.rheads = None
1566 # list of missing changeset to fetch remotely
1566 # list of missing changeset to fetch remotely
1567 self.fetch = None
1567 self.fetch = None
1568 # remote bookmarks data
1568 # remote bookmarks data
1569 self.remotebookmarks = remotebookmarks
1569 self.remotebookmarks = remotebookmarks
1570 # result of changegroup pulling (used as return code by pull)
1570 # result of changegroup pulling (used as return code by pull)
1571 self.cgresult = None
1571 self.cgresult = None
1572 # list of step already done
1572 # list of step already done
1573 self.stepsdone = set()
1573 self.stepsdone = set()
1574 # Whether we attempted a clone from pre-generated bundles.
1574 # Whether we attempted a clone from pre-generated bundles.
1575 self.clonebundleattempted = False
1575 self.clonebundleattempted = False
1576 # Set of file patterns to include.
1576 # Set of file patterns to include.
1577 self.includepats = includepats
1577 self.includepats = includepats
1578 # Set of file patterns to exclude.
1578 # Set of file patterns to exclude.
1579 self.excludepats = excludepats
1579 self.excludepats = excludepats
1580 # Number of ancestor changesets to pull from each pulled head.
1580 # Number of ancestor changesets to pull from each pulled head.
1581 self.depth = depth
1581 self.depth = depth
1582
1582
1583 @util.propertycache
1583 @util.propertycache
1584 def pulledsubset(self):
1584 def pulledsubset(self):
1585 """heads of the set of changeset target by the pull"""
1585 """heads of the set of changeset target by the pull"""
1586 # compute target subset
1586 # compute target subset
1587 if self.heads is None:
1587 if self.heads is None:
1588 # We pulled every thing possible
1588 # We pulled every thing possible
1589 # sync on everything common
1589 # sync on everything common
1590 c = set(self.common)
1590 c = set(self.common)
1591 ret = list(self.common)
1591 ret = list(self.common)
1592 for n in self.rheads:
1592 for n in self.rheads:
1593 if n not in c:
1593 if n not in c:
1594 ret.append(n)
1594 ret.append(n)
1595 return ret
1595 return ret
1596 else:
1596 else:
1597 # We pulled a specific subset
1597 # We pulled a specific subset
1598 # sync on this subset
1598 # sync on this subset
1599 return self.heads
1599 return self.heads
1600
1600
1601 @util.propertycache
1601 @util.propertycache
1602 def canusebundle2(self):
1602 def canusebundle2(self):
1603 return not _forcebundle1(self)
1603 return not _forcebundle1(self)
1604
1604
1605 @util.propertycache
1605 @util.propertycache
1606 def remotebundle2caps(self):
1606 def remotebundle2caps(self):
1607 return bundle2.bundle2caps(self.remote)
1607 return bundle2.bundle2caps(self.remote)
1608
1608
1609 def gettransaction(self):
1609 def gettransaction(self):
1610 # deprecated; talk to trmanager directly
1610 # deprecated; talk to trmanager directly
1611 return self.trmanager.transaction()
1611 return self.trmanager.transaction()
1612
1612
1613
1613
1614 class transactionmanager(util.transactional):
1614 class transactionmanager(util.transactional):
1615 """An object to manage the life cycle of a transaction
1615 """An object to manage the life cycle of a transaction
1616
1616
1617 It creates the transaction on demand and calls the appropriate hooks when
1617 It creates the transaction on demand and calls the appropriate hooks when
1618 closing the transaction."""
1618 closing the transaction."""
1619
1619
1620 def __init__(self, repo, source, url):
1620 def __init__(self, repo, source, url):
1621 self.repo = repo
1621 self.repo = repo
1622 self.source = source
1622 self.source = source
1623 self.url = url
1623 self.url = url
1624 self._tr = None
1624 self._tr = None
1625
1625
1626 def transaction(self):
1626 def transaction(self):
1627 """Return an open transaction object, constructing if necessary"""
1627 """Return an open transaction object, constructing if necessary"""
1628 if not self._tr:
1628 if not self._tr:
1629 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1629 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1630 self._tr = self.repo.transaction(trname)
1630 self._tr = self.repo.transaction(trname)
1631 self._tr.hookargs[b'source'] = self.source
1631 self._tr.hookargs[b'source'] = self.source
1632 self._tr.hookargs[b'url'] = self.url
1632 self._tr.hookargs[b'url'] = self.url
1633 return self._tr
1633 return self._tr
1634
1634
1635 def close(self):
1635 def close(self):
1636 """close transaction if created"""
1636 """close transaction if created"""
1637 if self._tr is not None:
1637 if self._tr is not None:
1638 self._tr.close()
1638 self._tr.close()
1639
1639
1640 def release(self):
1640 def release(self):
1641 """release transaction if created"""
1641 """release transaction if created"""
1642 if self._tr is not None:
1642 if self._tr is not None:
1643 self._tr.release()
1643 self._tr.release()
1644
1644
1645
1645
1646 def listkeys(remote, namespace):
1646 def listkeys(remote, namespace):
1647 with remote.commandexecutor() as e:
1647 with remote.commandexecutor() as e:
1648 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1648 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1649
1649
1650
1650
1651 def _fullpullbundle2(repo, pullop):
1651 def _fullpullbundle2(repo, pullop):
1652 # The server may send a partial reply, i.e. when inlining
1652 # The server may send a partial reply, i.e. when inlining
1653 # pre-computed bundles. In that case, update the common
1653 # pre-computed bundles. In that case, update the common
1654 # set based on the results and pull another bundle.
1654 # set based on the results and pull another bundle.
1655 #
1655 #
1656 # There are two indicators that the process is finished:
1656 # There are two indicators that the process is finished:
1657 # - no changeset has been added, or
1657 # - no changeset has been added, or
1658 # - all remote heads are known locally.
1658 # - all remote heads are known locally.
1659 # The head check must use the unfiltered view as obsoletion
1659 # The head check must use the unfiltered view as obsoletion
1660 # markers can hide heads.
1660 # markers can hide heads.
1661 unfi = repo.unfiltered()
1661 unfi = repo.unfiltered()
1662 unficl = unfi.changelog
1662 unficl = unfi.changelog
1663
1663
1664 def headsofdiff(h1, h2):
1664 def headsofdiff(h1, h2):
1665 """Returns heads(h1 % h2)"""
1665 """Returns heads(h1 % h2)"""
1666 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1666 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1667 return set(ctx.node() for ctx in res)
1667 return set(ctx.node() for ctx in res)
1668
1668
1669 def headsofunion(h1, h2):
1669 def headsofunion(h1, h2):
1670 """Returns heads((h1 + h2) - null)"""
1670 """Returns heads((h1 + h2) - null)"""
1671 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1671 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1672 return set(ctx.node() for ctx in res)
1672 return set(ctx.node() for ctx in res)
1673
1673
1674 while True:
1674 while True:
1675 old_heads = unficl.heads()
1675 old_heads = unficl.heads()
1676 clstart = len(unficl)
1676 clstart = len(unficl)
1677 _pullbundle2(pullop)
1677 _pullbundle2(pullop)
1678 if repository.NARROW_REQUIREMENT in repo.requirements:
1678 if repository.NARROW_REQUIREMENT in repo.requirements:
1679 # XXX narrow clones filter the heads on the server side during
1679 # XXX narrow clones filter the heads on the server side during
1680 # XXX getbundle and result in partial replies as well.
1680 # XXX getbundle and result in partial replies as well.
1681 # XXX Disable pull bundles in this case as band aid to avoid
1681 # XXX Disable pull bundles in this case as band aid to avoid
1682 # XXX extra round trips.
1682 # XXX extra round trips.
1683 break
1683 break
1684 if clstart == len(unficl):
1684 if clstart == len(unficl):
1685 break
1685 break
1686 if all(unficl.hasnode(n) for n in pullop.rheads):
1686 if all(unficl.hasnode(n) for n in pullop.rheads):
1687 break
1687 break
1688 new_heads = headsofdiff(unficl.heads(), old_heads)
1688 new_heads = headsofdiff(unficl.heads(), old_heads)
1689 pullop.common = headsofunion(new_heads, pullop.common)
1689 pullop.common = headsofunion(new_heads, pullop.common)
1690 pullop.rheads = set(pullop.rheads) - pullop.common
1690 pullop.rheads = set(pullop.rheads) - pullop.common
1691
1691
1692
1692
1693 def pull(
1693 def pull(
1694 repo,
1694 repo,
1695 remote,
1695 remote,
1696 heads=None,
1696 heads=None,
1697 force=False,
1697 force=False,
1698 bookmarks=(),
1698 bookmarks=(),
1699 opargs=None,
1699 opargs=None,
1700 streamclonerequested=None,
1700 streamclonerequested=None,
1701 includepats=None,
1701 includepats=None,
1702 excludepats=None,
1702 excludepats=None,
1703 depth=None,
1703 depth=None,
1704 ):
1704 ):
1705 """Fetch repository data from a remote.
1705 """Fetch repository data from a remote.
1706
1706
1707 This is the main function used to retrieve data from a remote repository.
1707 This is the main function used to retrieve data from a remote repository.
1708
1708
1709 ``repo`` is the local repository to clone into.
1709 ``repo`` is the local repository to clone into.
1710 ``remote`` is a peer instance.
1710 ``remote`` is a peer instance.
1711 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1711 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1712 default) means to pull everything from the remote.
1712 default) means to pull everything from the remote.
1713 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1713 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1714 default, all remote bookmarks are pulled.
1714 default, all remote bookmarks are pulled.
1715 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1715 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1716 initialization.
1716 initialization.
1717 ``streamclonerequested`` is a boolean indicating whether a "streaming
1717 ``streamclonerequested`` is a boolean indicating whether a "streaming
1718 clone" is requested. A "streaming clone" is essentially a raw file copy
1718 clone" is requested. A "streaming clone" is essentially a raw file copy
1719 of revlogs from the server. This only works when the local repository is
1719 of revlogs from the server. This only works when the local repository is
1720 empty. The default value of ``None`` means to respect the server
1720 empty. The default value of ``None`` means to respect the server
1721 configuration for preferring stream clones.
1721 configuration for preferring stream clones.
1722 ``includepats`` and ``excludepats`` define explicit file patterns to
1722 ``includepats`` and ``excludepats`` define explicit file patterns to
1723 include and exclude in storage, respectively. If not defined, narrow
1723 include and exclude in storage, respectively. If not defined, narrow
1724 patterns from the repo instance are used, if available.
1724 patterns from the repo instance are used, if available.
1725 ``depth`` is an integer indicating the DAG depth of history we're
1725 ``depth`` is an integer indicating the DAG depth of history we're
1726 interested in. If defined, for each revision specified in ``heads``, we
1726 interested in. If defined, for each revision specified in ``heads``, we
1727 will fetch up to this many of its ancestors and data associated with them.
1727 will fetch up to this many of its ancestors and data associated with them.
1728
1728
1729 Returns the ``pulloperation`` created for this pull.
1729 Returns the ``pulloperation`` created for this pull.
1730 """
1730 """
1731 if opargs is None:
1731 if opargs is None:
1732 opargs = {}
1732 opargs = {}
1733
1733
1734 # We allow the narrow patterns to be passed in explicitly to provide more
1734 # We allow the narrow patterns to be passed in explicitly to provide more
1735 # flexibility for API consumers.
1735 # flexibility for API consumers.
1736 if includepats or excludepats:
1736 if includepats or excludepats:
1737 includepats = includepats or set()
1737 includepats = includepats or set()
1738 excludepats = excludepats or set()
1738 excludepats = excludepats or set()
1739 else:
1739 else:
1740 includepats, excludepats = repo.narrowpats
1740 includepats, excludepats = repo.narrowpats
1741
1741
1742 narrowspec.validatepatterns(includepats)
1742 narrowspec.validatepatterns(includepats)
1743 narrowspec.validatepatterns(excludepats)
1743 narrowspec.validatepatterns(excludepats)
1744
1744
1745 pullop = pulloperation(
1745 pullop = pulloperation(
1746 repo,
1746 repo,
1747 remote,
1747 remote,
1748 heads,
1748 heads,
1749 force,
1749 force,
1750 bookmarks=bookmarks,
1750 bookmarks=bookmarks,
1751 streamclonerequested=streamclonerequested,
1751 streamclonerequested=streamclonerequested,
1752 includepats=includepats,
1752 includepats=includepats,
1753 excludepats=excludepats,
1753 excludepats=excludepats,
1754 depth=depth,
1754 depth=depth,
1755 **pycompat.strkwargs(opargs)
1755 **pycompat.strkwargs(opargs)
1756 )
1756 )
1757
1757
1758 peerlocal = pullop.remote.local()
1758 peerlocal = pullop.remote.local()
1759 if peerlocal:
1759 if peerlocal:
1760 missing = set(peerlocal.requirements) - pullop.repo.supported
1760 missing = set(peerlocal.requirements) - pullop.repo.supported
1761 if missing:
1761 if missing:
1762 msg = _(
1762 msg = _(
1763 b"required features are not"
1763 b"required features are not"
1764 b" supported in the destination:"
1764 b" supported in the destination:"
1765 b" %s"
1765 b" %s"
1766 ) % (b', '.join(sorted(missing)))
1766 ) % (b', '.join(sorted(missing)))
1767 raise error.Abort(msg)
1767 raise error.Abort(msg)
1768
1768
1769 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1769 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1770 wlock = util.nullcontextmanager()
1770 wlock = util.nullcontextmanager()
1771 if not bookmod.bookmarksinstore(repo):
1771 if not bookmod.bookmarksinstore(repo):
1772 wlock = repo.wlock()
1772 wlock = repo.wlock()
1773 with wlock, repo.lock(), pullop.trmanager:
1773 with wlock, repo.lock(), pullop.trmanager:
1774 # Use the modern wire protocol, if available.
1774 # Use the modern wire protocol, if available.
1775 if remote.capable(b'command-changesetdata'):
1775 if remote.capable(b'command-changesetdata'):
1776 exchangev2.pull(pullop)
1776 exchangev2.pull(pullop)
1777 else:
1777 else:
1778 # This should ideally be in _pullbundle2(). However, it needs to run
1778 # This should ideally be in _pullbundle2(). However, it needs to run
1779 # before discovery to avoid extra work.
1779 # before discovery to avoid extra work.
1780 _maybeapplyclonebundle(pullop)
1780 _maybeapplyclonebundle(pullop)
1781 streamclone.maybeperformlegacystreamclone(pullop)
1781 streamclone.maybeperformlegacystreamclone(pullop)
1782 _pulldiscovery(pullop)
1782 _pulldiscovery(pullop)
1783 if pullop.canusebundle2:
1783 if pullop.canusebundle2:
1784 _fullpullbundle2(repo, pullop)
1784 _fullpullbundle2(repo, pullop)
1785 _pullchangeset(pullop)
1785 _pullchangeset(pullop)
1786 _pullphase(pullop)
1786 _pullphase(pullop)
1787 _pullbookmarks(pullop)
1787 _pullbookmarks(pullop)
1788 _pullobsolete(pullop)
1788 _pullobsolete(pullop)
1789
1789
1790 # storing remotenames
1790 # storing remotenames
1791 if repo.ui.configbool(b'experimental', b'remotenames'):
1791 if repo.ui.configbool(b'experimental', b'remotenames'):
1792 logexchange.pullremotenames(repo, remote)
1792 logexchange.pullremotenames(repo, remote)
1793
1793
1794 return pullop
1794 return pullop
1795
1795
1796
1796
1797 # list of steps to perform discovery before pull
1797 # list of steps to perform discovery before pull
1798 pulldiscoveryorder = []
1798 pulldiscoveryorder = []
1799
1799
1800 # Mapping between step name and function
1800 # Mapping between step name and function
1801 #
1801 #
1802 # This exists to help extensions wrap steps if necessary
1802 # This exists to help extensions wrap steps if necessary
1803 pulldiscoverymapping = {}
1803 pulldiscoverymapping = {}
1804
1804
1805
1805
1806 def pulldiscovery(stepname):
1806 def pulldiscovery(stepname):
1807 """decorator for function performing discovery before pull
1807 """decorator for function performing discovery before pull
1808
1808
1809 The function is added to the step -> function mapping and appended to the
1809 The function is added to the step -> function mapping and appended to the
1810 list of steps. Beware that decorated function will be added in order (this
1810 list of steps. Beware that decorated function will be added in order (this
1811 may matter).
1811 may matter).
1812
1812
1813 You can only use this decorator for a new step, if you want to wrap a step
1813 You can only use this decorator for a new step, if you want to wrap a step
1814 from an extension, change the pulldiscovery dictionary directly."""
1814 from an extension, change the pulldiscovery dictionary directly."""
1815
1815
1816 def dec(func):
1816 def dec(func):
1817 assert stepname not in pulldiscoverymapping
1817 assert stepname not in pulldiscoverymapping
1818 pulldiscoverymapping[stepname] = func
1818 pulldiscoverymapping[stepname] = func
1819 pulldiscoveryorder.append(stepname)
1819 pulldiscoveryorder.append(stepname)
1820 return func
1820 return func
1821
1821
1822 return dec
1822 return dec
1823
1823
1824
1824
1825 def _pulldiscovery(pullop):
1825 def _pulldiscovery(pullop):
1826 """Run all discovery steps"""
1826 """Run all discovery steps"""
1827 for stepname in pulldiscoveryorder:
1827 for stepname in pulldiscoveryorder:
1828 step = pulldiscoverymapping[stepname]
1828 step = pulldiscoverymapping[stepname]
1829 step(pullop)
1829 step(pullop)
1830
1830
1831
1831
1832 @pulldiscovery(b'b1:bookmarks')
1832 @pulldiscovery(b'b1:bookmarks')
1833 def _pullbookmarkbundle1(pullop):
1833 def _pullbookmarkbundle1(pullop):
1834 """fetch bookmark data in bundle1 case
1834 """fetch bookmark data in bundle1 case
1835
1835
1836 If not using bundle2, we have to fetch bookmarks before changeset
1836 If not using bundle2, we have to fetch bookmarks before changeset
1837 discovery to reduce the chance and impact of race conditions."""
1837 discovery to reduce the chance and impact of race conditions."""
1838 if pullop.remotebookmarks is not None:
1838 if pullop.remotebookmarks is not None:
1839 return
1839 return
1840 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1840 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1841 # all known bundle2 servers now support listkeys, but lets be nice with
1841 # all known bundle2 servers now support listkeys, but lets be nice with
1842 # new implementation.
1842 # new implementation.
1843 return
1843 return
1844 books = listkeys(pullop.remote, b'bookmarks')
1844 books = listkeys(pullop.remote, b'bookmarks')
1845 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1845 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1846
1846
1847
1847
1848 @pulldiscovery(b'changegroup')
1848 @pulldiscovery(b'changegroup')
1849 def _pulldiscoverychangegroup(pullop):
1849 def _pulldiscoverychangegroup(pullop):
1850 """discovery phase for the pull
1850 """discovery phase for the pull
1851
1851
1852 Current handle changeset discovery only, will change handle all discovery
1852 Current handle changeset discovery only, will change handle all discovery
1853 at some point."""
1853 at some point."""
1854 tmp = discovery.findcommonincoming(
1854 tmp = discovery.findcommonincoming(
1855 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1855 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1856 )
1856 )
1857 common, fetch, rheads = tmp
1857 common, fetch, rheads = tmp
1858 nm = pullop.repo.unfiltered().changelog.nodemap
1858 has_node = pullop.repo.unfiltered().changelog.index.has_node
1859 if fetch and rheads:
1859 if fetch and rheads:
1860 # If a remote heads is filtered locally, put in back in common.
1860 # If a remote heads is filtered locally, put in back in common.
1861 #
1861 #
1862 # This is a hackish solution to catch most of "common but locally
1862 # This is a hackish solution to catch most of "common but locally
1863 # hidden situation". We do not performs discovery on unfiltered
1863 # hidden situation". We do not performs discovery on unfiltered
1864 # repository because it end up doing a pathological amount of round
1864 # repository because it end up doing a pathological amount of round
1865 # trip for w huge amount of changeset we do not care about.
1865 # trip for w huge amount of changeset we do not care about.
1866 #
1866 #
1867 # If a set of such "common but filtered" changeset exist on the server
1867 # If a set of such "common but filtered" changeset exist on the server
1868 # but are not including a remote heads, we'll not be able to detect it,
1868 # but are not including a remote heads, we'll not be able to detect it,
1869 scommon = set(common)
1869 scommon = set(common)
1870 for n in rheads:
1870 for n in rheads:
1871 if n in nm:
1871 if has_node(n):
1872 if n not in scommon:
1872 if n not in scommon:
1873 common.append(n)
1873 common.append(n)
1874 if set(rheads).issubset(set(common)):
1874 if set(rheads).issubset(set(common)):
1875 fetch = []
1875 fetch = []
1876 pullop.common = common
1876 pullop.common = common
1877 pullop.fetch = fetch
1877 pullop.fetch = fetch
1878 pullop.rheads = rheads
1878 pullop.rheads = rheads
1879
1879
1880
1880
1881 def _pullbundle2(pullop):
1881 def _pullbundle2(pullop):
1882 """pull data using bundle2
1882 """pull data using bundle2
1883
1883
1884 For now, the only supported data are changegroup."""
1884 For now, the only supported data are changegroup."""
1885 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1885 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1886
1886
1887 # make ui easier to access
1887 # make ui easier to access
1888 ui = pullop.repo.ui
1888 ui = pullop.repo.ui
1889
1889
1890 # At the moment we don't do stream clones over bundle2. If that is
1890 # At the moment we don't do stream clones over bundle2. If that is
1891 # implemented then here's where the check for that will go.
1891 # implemented then here's where the check for that will go.
1892 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1892 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1893
1893
1894 # declare pull perimeters
1894 # declare pull perimeters
1895 kwargs[b'common'] = pullop.common
1895 kwargs[b'common'] = pullop.common
1896 kwargs[b'heads'] = pullop.heads or pullop.rheads
1896 kwargs[b'heads'] = pullop.heads or pullop.rheads
1897
1897
1898 # check server supports narrow and then adding includepats and excludepats
1898 # check server supports narrow and then adding includepats and excludepats
1899 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1899 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1900 if servernarrow and pullop.includepats:
1900 if servernarrow and pullop.includepats:
1901 kwargs[b'includepats'] = pullop.includepats
1901 kwargs[b'includepats'] = pullop.includepats
1902 if servernarrow and pullop.excludepats:
1902 if servernarrow and pullop.excludepats:
1903 kwargs[b'excludepats'] = pullop.excludepats
1903 kwargs[b'excludepats'] = pullop.excludepats
1904
1904
1905 if streaming:
1905 if streaming:
1906 kwargs[b'cg'] = False
1906 kwargs[b'cg'] = False
1907 kwargs[b'stream'] = True
1907 kwargs[b'stream'] = True
1908 pullop.stepsdone.add(b'changegroup')
1908 pullop.stepsdone.add(b'changegroup')
1909 pullop.stepsdone.add(b'phases')
1909 pullop.stepsdone.add(b'phases')
1910
1910
1911 else:
1911 else:
1912 # pulling changegroup
1912 # pulling changegroup
1913 pullop.stepsdone.add(b'changegroup')
1913 pullop.stepsdone.add(b'changegroup')
1914
1914
1915 kwargs[b'cg'] = pullop.fetch
1915 kwargs[b'cg'] = pullop.fetch
1916
1916
1917 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1917 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1918 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1918 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1919 if not legacyphase and hasbinaryphase:
1919 if not legacyphase and hasbinaryphase:
1920 kwargs[b'phases'] = True
1920 kwargs[b'phases'] = True
1921 pullop.stepsdone.add(b'phases')
1921 pullop.stepsdone.add(b'phases')
1922
1922
1923 if b'listkeys' in pullop.remotebundle2caps:
1923 if b'listkeys' in pullop.remotebundle2caps:
1924 if b'phases' not in pullop.stepsdone:
1924 if b'phases' not in pullop.stepsdone:
1925 kwargs[b'listkeys'] = [b'phases']
1925 kwargs[b'listkeys'] = [b'phases']
1926
1926
1927 bookmarksrequested = False
1927 bookmarksrequested = False
1928 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1928 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1929 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1929 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1930
1930
1931 if pullop.remotebookmarks is not None:
1931 if pullop.remotebookmarks is not None:
1932 pullop.stepsdone.add(b'request-bookmarks')
1932 pullop.stepsdone.add(b'request-bookmarks')
1933
1933
1934 if (
1934 if (
1935 b'request-bookmarks' not in pullop.stepsdone
1935 b'request-bookmarks' not in pullop.stepsdone
1936 and pullop.remotebookmarks is None
1936 and pullop.remotebookmarks is None
1937 and not legacybookmark
1937 and not legacybookmark
1938 and hasbinarybook
1938 and hasbinarybook
1939 ):
1939 ):
1940 kwargs[b'bookmarks'] = True
1940 kwargs[b'bookmarks'] = True
1941 bookmarksrequested = True
1941 bookmarksrequested = True
1942
1942
1943 if b'listkeys' in pullop.remotebundle2caps:
1943 if b'listkeys' in pullop.remotebundle2caps:
1944 if b'request-bookmarks' not in pullop.stepsdone:
1944 if b'request-bookmarks' not in pullop.stepsdone:
1945 # make sure to always includes bookmark data when migrating
1945 # make sure to always includes bookmark data when migrating
1946 # `hg incoming --bundle` to using this function.
1946 # `hg incoming --bundle` to using this function.
1947 pullop.stepsdone.add(b'request-bookmarks')
1947 pullop.stepsdone.add(b'request-bookmarks')
1948 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1948 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1949
1949
1950 # If this is a full pull / clone and the server supports the clone bundles
1950 # If this is a full pull / clone and the server supports the clone bundles
1951 # feature, tell the server whether we attempted a clone bundle. The
1951 # feature, tell the server whether we attempted a clone bundle. The
1952 # presence of this flag indicates the client supports clone bundles. This
1952 # presence of this flag indicates the client supports clone bundles. This
1953 # will enable the server to treat clients that support clone bundles
1953 # will enable the server to treat clients that support clone bundles
1954 # differently from those that don't.
1954 # differently from those that don't.
1955 if (
1955 if (
1956 pullop.remote.capable(b'clonebundles')
1956 pullop.remote.capable(b'clonebundles')
1957 and pullop.heads is None
1957 and pullop.heads is None
1958 and list(pullop.common) == [nullid]
1958 and list(pullop.common) == [nullid]
1959 ):
1959 ):
1960 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1960 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1961
1961
1962 if streaming:
1962 if streaming:
1963 pullop.repo.ui.status(_(b'streaming all changes\n'))
1963 pullop.repo.ui.status(_(b'streaming all changes\n'))
1964 elif not pullop.fetch:
1964 elif not pullop.fetch:
1965 pullop.repo.ui.status(_(b"no changes found\n"))
1965 pullop.repo.ui.status(_(b"no changes found\n"))
1966 pullop.cgresult = 0
1966 pullop.cgresult = 0
1967 else:
1967 else:
1968 if pullop.heads is None and list(pullop.common) == [nullid]:
1968 if pullop.heads is None and list(pullop.common) == [nullid]:
1969 pullop.repo.ui.status(_(b"requesting all changes\n"))
1969 pullop.repo.ui.status(_(b"requesting all changes\n"))
1970 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1970 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1971 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1971 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1972 if obsolete.commonversion(remoteversions) is not None:
1972 if obsolete.commonversion(remoteversions) is not None:
1973 kwargs[b'obsmarkers'] = True
1973 kwargs[b'obsmarkers'] = True
1974 pullop.stepsdone.add(b'obsmarkers')
1974 pullop.stepsdone.add(b'obsmarkers')
1975 _pullbundle2extraprepare(pullop, kwargs)
1975 _pullbundle2extraprepare(pullop, kwargs)
1976
1976
1977 with pullop.remote.commandexecutor() as e:
1977 with pullop.remote.commandexecutor() as e:
1978 args = dict(kwargs)
1978 args = dict(kwargs)
1979 args[b'source'] = b'pull'
1979 args[b'source'] = b'pull'
1980 bundle = e.callcommand(b'getbundle', args).result()
1980 bundle = e.callcommand(b'getbundle', args).result()
1981
1981
1982 try:
1982 try:
1983 op = bundle2.bundleoperation(
1983 op = bundle2.bundleoperation(
1984 pullop.repo, pullop.gettransaction, source=b'pull'
1984 pullop.repo, pullop.gettransaction, source=b'pull'
1985 )
1985 )
1986 op.modes[b'bookmarks'] = b'records'
1986 op.modes[b'bookmarks'] = b'records'
1987 bundle2.processbundle(pullop.repo, bundle, op=op)
1987 bundle2.processbundle(pullop.repo, bundle, op=op)
1988 except bundle2.AbortFromPart as exc:
1988 except bundle2.AbortFromPart as exc:
1989 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1989 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1990 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1990 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1991 except error.BundleValueError as exc:
1991 except error.BundleValueError as exc:
1992 raise error.Abort(_(b'missing support for %s') % exc)
1992 raise error.Abort(_(b'missing support for %s') % exc)
1993
1993
1994 if pullop.fetch:
1994 if pullop.fetch:
1995 pullop.cgresult = bundle2.combinechangegroupresults(op)
1995 pullop.cgresult = bundle2.combinechangegroupresults(op)
1996
1996
1997 # processing phases change
1997 # processing phases change
1998 for namespace, value in op.records[b'listkeys']:
1998 for namespace, value in op.records[b'listkeys']:
1999 if namespace == b'phases':
1999 if namespace == b'phases':
2000 _pullapplyphases(pullop, value)
2000 _pullapplyphases(pullop, value)
2001
2001
2002 # processing bookmark update
2002 # processing bookmark update
2003 if bookmarksrequested:
2003 if bookmarksrequested:
2004 books = {}
2004 books = {}
2005 for record in op.records[b'bookmarks']:
2005 for record in op.records[b'bookmarks']:
2006 books[record[b'bookmark']] = record[b"node"]
2006 books[record[b'bookmark']] = record[b"node"]
2007 pullop.remotebookmarks = books
2007 pullop.remotebookmarks = books
2008 else:
2008 else:
2009 for namespace, value in op.records[b'listkeys']:
2009 for namespace, value in op.records[b'listkeys']:
2010 if namespace == b'bookmarks':
2010 if namespace == b'bookmarks':
2011 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2011 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2012
2012
2013 # bookmark data were either already there or pulled in the bundle
2013 # bookmark data were either already there or pulled in the bundle
2014 if pullop.remotebookmarks is not None:
2014 if pullop.remotebookmarks is not None:
2015 _pullbookmarks(pullop)
2015 _pullbookmarks(pullop)
2016
2016
2017
2017
2018 def _pullbundle2extraprepare(pullop, kwargs):
2018 def _pullbundle2extraprepare(pullop, kwargs):
2019 """hook function so that extensions can extend the getbundle call"""
2019 """hook function so that extensions can extend the getbundle call"""
2020
2020
2021
2021
2022 def _pullchangeset(pullop):
2022 def _pullchangeset(pullop):
2023 """pull changeset from unbundle into the local repo"""
2023 """pull changeset from unbundle into the local repo"""
2024 # We delay the open of the transaction as late as possible so we
2024 # We delay the open of the transaction as late as possible so we
2025 # don't open transaction for nothing or you break future useful
2025 # don't open transaction for nothing or you break future useful
2026 # rollback call
2026 # rollback call
2027 if b'changegroup' in pullop.stepsdone:
2027 if b'changegroup' in pullop.stepsdone:
2028 return
2028 return
2029 pullop.stepsdone.add(b'changegroup')
2029 pullop.stepsdone.add(b'changegroup')
2030 if not pullop.fetch:
2030 if not pullop.fetch:
2031 pullop.repo.ui.status(_(b"no changes found\n"))
2031 pullop.repo.ui.status(_(b"no changes found\n"))
2032 pullop.cgresult = 0
2032 pullop.cgresult = 0
2033 return
2033 return
2034 tr = pullop.gettransaction()
2034 tr = pullop.gettransaction()
2035 if pullop.heads is None and list(pullop.common) == [nullid]:
2035 if pullop.heads is None and list(pullop.common) == [nullid]:
2036 pullop.repo.ui.status(_(b"requesting all changes\n"))
2036 pullop.repo.ui.status(_(b"requesting all changes\n"))
2037 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2037 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2038 # issue1320, avoid a race if remote changed after discovery
2038 # issue1320, avoid a race if remote changed after discovery
2039 pullop.heads = pullop.rheads
2039 pullop.heads = pullop.rheads
2040
2040
2041 if pullop.remote.capable(b'getbundle'):
2041 if pullop.remote.capable(b'getbundle'):
2042 # TODO: get bundlecaps from remote
2042 # TODO: get bundlecaps from remote
2043 cg = pullop.remote.getbundle(
2043 cg = pullop.remote.getbundle(
2044 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2044 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2045 )
2045 )
2046 elif pullop.heads is None:
2046 elif pullop.heads is None:
2047 with pullop.remote.commandexecutor() as e:
2047 with pullop.remote.commandexecutor() as e:
2048 cg = e.callcommand(
2048 cg = e.callcommand(
2049 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2049 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2050 ).result()
2050 ).result()
2051
2051
2052 elif not pullop.remote.capable(b'changegroupsubset'):
2052 elif not pullop.remote.capable(b'changegroupsubset'):
2053 raise error.Abort(
2053 raise error.Abort(
2054 _(
2054 _(
2055 b"partial pull cannot be done because "
2055 b"partial pull cannot be done because "
2056 b"other repository doesn't support "
2056 b"other repository doesn't support "
2057 b"changegroupsubset."
2057 b"changegroupsubset."
2058 )
2058 )
2059 )
2059 )
2060 else:
2060 else:
2061 with pullop.remote.commandexecutor() as e:
2061 with pullop.remote.commandexecutor() as e:
2062 cg = e.callcommand(
2062 cg = e.callcommand(
2063 b'changegroupsubset',
2063 b'changegroupsubset',
2064 {
2064 {
2065 b'bases': pullop.fetch,
2065 b'bases': pullop.fetch,
2066 b'heads': pullop.heads,
2066 b'heads': pullop.heads,
2067 b'source': b'pull',
2067 b'source': b'pull',
2068 },
2068 },
2069 ).result()
2069 ).result()
2070
2070
2071 bundleop = bundle2.applybundle(
2071 bundleop = bundle2.applybundle(
2072 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2072 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2073 )
2073 )
2074 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2074 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2075
2075
2076
2076
2077 def _pullphase(pullop):
2077 def _pullphase(pullop):
2078 # Get remote phases data from remote
2078 # Get remote phases data from remote
2079 if b'phases' in pullop.stepsdone:
2079 if b'phases' in pullop.stepsdone:
2080 return
2080 return
2081 remotephases = listkeys(pullop.remote, b'phases')
2081 remotephases = listkeys(pullop.remote, b'phases')
2082 _pullapplyphases(pullop, remotephases)
2082 _pullapplyphases(pullop, remotephases)
2083
2083
2084
2084
2085 def _pullapplyphases(pullop, remotephases):
2085 def _pullapplyphases(pullop, remotephases):
2086 """apply phase movement from observed remote state"""
2086 """apply phase movement from observed remote state"""
2087 if b'phases' in pullop.stepsdone:
2087 if b'phases' in pullop.stepsdone:
2088 return
2088 return
2089 pullop.stepsdone.add(b'phases')
2089 pullop.stepsdone.add(b'phases')
2090 publishing = bool(remotephases.get(b'publishing', False))
2090 publishing = bool(remotephases.get(b'publishing', False))
2091 if remotephases and not publishing:
2091 if remotephases and not publishing:
2092 # remote is new and non-publishing
2092 # remote is new and non-publishing
2093 pheads, _dr = phases.analyzeremotephases(
2093 pheads, _dr = phases.analyzeremotephases(
2094 pullop.repo, pullop.pulledsubset, remotephases
2094 pullop.repo, pullop.pulledsubset, remotephases
2095 )
2095 )
2096 dheads = pullop.pulledsubset
2096 dheads = pullop.pulledsubset
2097 else:
2097 else:
2098 # Remote is old or publishing all common changesets
2098 # Remote is old or publishing all common changesets
2099 # should be seen as public
2099 # should be seen as public
2100 pheads = pullop.pulledsubset
2100 pheads = pullop.pulledsubset
2101 dheads = []
2101 dheads = []
2102 unfi = pullop.repo.unfiltered()
2102 unfi = pullop.repo.unfiltered()
2103 phase = unfi._phasecache.phase
2103 phase = unfi._phasecache.phase
2104 rev = unfi.changelog.nodemap.get
2104 rev = unfi.changelog.nodemap.get
2105 public = phases.public
2105 public = phases.public
2106 draft = phases.draft
2106 draft = phases.draft
2107
2107
2108 # exclude changesets already public locally and update the others
2108 # exclude changesets already public locally and update the others
2109 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2109 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2110 if pheads:
2110 if pheads:
2111 tr = pullop.gettransaction()
2111 tr = pullop.gettransaction()
2112 phases.advanceboundary(pullop.repo, tr, public, pheads)
2112 phases.advanceboundary(pullop.repo, tr, public, pheads)
2113
2113
2114 # exclude changesets already draft locally and update the others
2114 # exclude changesets already draft locally and update the others
2115 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2115 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2116 if dheads:
2116 if dheads:
2117 tr = pullop.gettransaction()
2117 tr = pullop.gettransaction()
2118 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2118 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2119
2119
2120
2120
2121 def _pullbookmarks(pullop):
2121 def _pullbookmarks(pullop):
2122 """process the remote bookmark information to update the local one"""
2122 """process the remote bookmark information to update the local one"""
2123 if b'bookmarks' in pullop.stepsdone:
2123 if b'bookmarks' in pullop.stepsdone:
2124 return
2124 return
2125 pullop.stepsdone.add(b'bookmarks')
2125 pullop.stepsdone.add(b'bookmarks')
2126 repo = pullop.repo
2126 repo = pullop.repo
2127 remotebookmarks = pullop.remotebookmarks
2127 remotebookmarks = pullop.remotebookmarks
2128 bookmod.updatefromremote(
2128 bookmod.updatefromremote(
2129 repo.ui,
2129 repo.ui,
2130 repo,
2130 repo,
2131 remotebookmarks,
2131 remotebookmarks,
2132 pullop.remote.url(),
2132 pullop.remote.url(),
2133 pullop.gettransaction,
2133 pullop.gettransaction,
2134 explicit=pullop.explicitbookmarks,
2134 explicit=pullop.explicitbookmarks,
2135 )
2135 )
2136
2136
2137
2137
2138 def _pullobsolete(pullop):
2138 def _pullobsolete(pullop):
2139 """utility function to pull obsolete markers from a remote
2139 """utility function to pull obsolete markers from a remote
2140
2140
2141 The `gettransaction` is function that return the pull transaction, creating
2141 The `gettransaction` is function that return the pull transaction, creating
2142 one if necessary. We return the transaction to inform the calling code that
2142 one if necessary. We return the transaction to inform the calling code that
2143 a new transaction have been created (when applicable).
2143 a new transaction have been created (when applicable).
2144
2144
2145 Exists mostly to allow overriding for experimentation purpose"""
2145 Exists mostly to allow overriding for experimentation purpose"""
2146 if b'obsmarkers' in pullop.stepsdone:
2146 if b'obsmarkers' in pullop.stepsdone:
2147 return
2147 return
2148 pullop.stepsdone.add(b'obsmarkers')
2148 pullop.stepsdone.add(b'obsmarkers')
2149 tr = None
2149 tr = None
2150 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2150 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2151 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2151 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2152 remoteobs = listkeys(pullop.remote, b'obsolete')
2152 remoteobs = listkeys(pullop.remote, b'obsolete')
2153 if b'dump0' in remoteobs:
2153 if b'dump0' in remoteobs:
2154 tr = pullop.gettransaction()
2154 tr = pullop.gettransaction()
2155 markers = []
2155 markers = []
2156 for key in sorted(remoteobs, reverse=True):
2156 for key in sorted(remoteobs, reverse=True):
2157 if key.startswith(b'dump'):
2157 if key.startswith(b'dump'):
2158 data = util.b85decode(remoteobs[key])
2158 data = util.b85decode(remoteobs[key])
2159 version, newmarks = obsolete._readmarkers(data)
2159 version, newmarks = obsolete._readmarkers(data)
2160 markers += newmarks
2160 markers += newmarks
2161 if markers:
2161 if markers:
2162 pullop.repo.obsstore.add(tr, markers)
2162 pullop.repo.obsstore.add(tr, markers)
2163 pullop.repo.invalidatevolatilesets()
2163 pullop.repo.invalidatevolatilesets()
2164 return tr
2164 return tr
2165
2165
2166
2166
2167 def applynarrowacl(repo, kwargs):
2167 def applynarrowacl(repo, kwargs):
2168 """Apply narrow fetch access control.
2168 """Apply narrow fetch access control.
2169
2169
2170 This massages the named arguments for getbundle wire protocol commands
2170 This massages the named arguments for getbundle wire protocol commands
2171 so requested data is filtered through access control rules.
2171 so requested data is filtered through access control rules.
2172 """
2172 """
2173 ui = repo.ui
2173 ui = repo.ui
2174 # TODO this assumes existence of HTTP and is a layering violation.
2174 # TODO this assumes existence of HTTP and is a layering violation.
2175 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2175 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2176 user_includes = ui.configlist(
2176 user_includes = ui.configlist(
2177 _NARROWACL_SECTION,
2177 _NARROWACL_SECTION,
2178 username + b'.includes',
2178 username + b'.includes',
2179 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2179 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2180 )
2180 )
2181 user_excludes = ui.configlist(
2181 user_excludes = ui.configlist(
2182 _NARROWACL_SECTION,
2182 _NARROWACL_SECTION,
2183 username + b'.excludes',
2183 username + b'.excludes',
2184 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2184 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2185 )
2185 )
2186 if not user_includes:
2186 if not user_includes:
2187 raise error.Abort(
2187 raise error.Abort(
2188 _(b"{} configuration for user {} is empty").format(
2188 _(b"{} configuration for user {} is empty").format(
2189 _NARROWACL_SECTION, username
2189 _NARROWACL_SECTION, username
2190 )
2190 )
2191 )
2191 )
2192
2192
2193 user_includes = [
2193 user_includes = [
2194 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2194 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2195 ]
2195 ]
2196 user_excludes = [
2196 user_excludes = [
2197 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2197 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2198 ]
2198 ]
2199
2199
2200 req_includes = set(kwargs.get('includepats', []))
2200 req_includes = set(kwargs.get('includepats', []))
2201 req_excludes = set(kwargs.get('excludepats', []))
2201 req_excludes = set(kwargs.get('excludepats', []))
2202
2202
2203 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2203 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2204 req_includes, req_excludes, user_includes, user_excludes
2204 req_includes, req_excludes, user_includes, user_excludes
2205 )
2205 )
2206
2206
2207 if invalid_includes:
2207 if invalid_includes:
2208 raise error.Abort(
2208 raise error.Abort(
2209 _(b"The following includes are not accessible for {}: {}").format(
2209 _(b"The following includes are not accessible for {}: {}").format(
2210 username, invalid_includes
2210 username, invalid_includes
2211 )
2211 )
2212 )
2212 )
2213
2213
2214 new_args = {}
2214 new_args = {}
2215 new_args.update(kwargs)
2215 new_args.update(kwargs)
2216 new_args['narrow'] = True
2216 new_args['narrow'] = True
2217 new_args['narrow_acl'] = True
2217 new_args['narrow_acl'] = True
2218 new_args['includepats'] = req_includes
2218 new_args['includepats'] = req_includes
2219 if req_excludes:
2219 if req_excludes:
2220 new_args['excludepats'] = req_excludes
2220 new_args['excludepats'] = req_excludes
2221
2221
2222 return new_args
2222 return new_args
2223
2223
2224
2224
2225 def _computeellipsis(repo, common, heads, known, match, depth=None):
2225 def _computeellipsis(repo, common, heads, known, match, depth=None):
2226 """Compute the shape of a narrowed DAG.
2226 """Compute the shape of a narrowed DAG.
2227
2227
2228 Args:
2228 Args:
2229 repo: The repository we're transferring.
2229 repo: The repository we're transferring.
2230 common: The roots of the DAG range we're transferring.
2230 common: The roots of the DAG range we're transferring.
2231 May be just [nullid], which means all ancestors of heads.
2231 May be just [nullid], which means all ancestors of heads.
2232 heads: The heads of the DAG range we're transferring.
2232 heads: The heads of the DAG range we're transferring.
2233 match: The narrowmatcher that allows us to identify relevant changes.
2233 match: The narrowmatcher that allows us to identify relevant changes.
2234 depth: If not None, only consider nodes to be full nodes if they are at
2234 depth: If not None, only consider nodes to be full nodes if they are at
2235 most depth changesets away from one of heads.
2235 most depth changesets away from one of heads.
2236
2236
2237 Returns:
2237 Returns:
2238 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2238 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2239
2239
2240 visitnodes: The list of nodes (either full or ellipsis) which
2240 visitnodes: The list of nodes (either full or ellipsis) which
2241 need to be sent to the client.
2241 need to be sent to the client.
2242 relevant_nodes: The set of changelog nodes which change a file inside
2242 relevant_nodes: The set of changelog nodes which change a file inside
2243 the narrowspec. The client needs these as non-ellipsis nodes.
2243 the narrowspec. The client needs these as non-ellipsis nodes.
2244 ellipsisroots: A dict of {rev: parents} that is used in
2244 ellipsisroots: A dict of {rev: parents} that is used in
2245 narrowchangegroup to produce ellipsis nodes with the
2245 narrowchangegroup to produce ellipsis nodes with the
2246 correct parents.
2246 correct parents.
2247 """
2247 """
2248 cl = repo.changelog
2248 cl = repo.changelog
2249 mfl = repo.manifestlog
2249 mfl = repo.manifestlog
2250
2250
2251 clrev = cl.rev
2251 clrev = cl.rev
2252
2252
2253 commonrevs = {clrev(n) for n in common} | {nullrev}
2253 commonrevs = {clrev(n) for n in common} | {nullrev}
2254 headsrevs = {clrev(n) for n in heads}
2254 headsrevs = {clrev(n) for n in heads}
2255
2255
2256 if depth:
2256 if depth:
2257 revdepth = {h: 0 for h in headsrevs}
2257 revdepth = {h: 0 for h in headsrevs}
2258
2258
2259 ellipsisheads = collections.defaultdict(set)
2259 ellipsisheads = collections.defaultdict(set)
2260 ellipsisroots = collections.defaultdict(set)
2260 ellipsisroots = collections.defaultdict(set)
2261
2261
2262 def addroot(head, curchange):
2262 def addroot(head, curchange):
2263 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2263 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2264 ellipsisroots[head].add(curchange)
2264 ellipsisroots[head].add(curchange)
2265 # Recursively split ellipsis heads with 3 roots by finding the
2265 # Recursively split ellipsis heads with 3 roots by finding the
2266 # roots' youngest common descendant which is an elided merge commit.
2266 # roots' youngest common descendant which is an elided merge commit.
2267 # That descendant takes 2 of the 3 roots as its own, and becomes a
2267 # That descendant takes 2 of the 3 roots as its own, and becomes a
2268 # root of the head.
2268 # root of the head.
2269 while len(ellipsisroots[head]) > 2:
2269 while len(ellipsisroots[head]) > 2:
2270 child, roots = splithead(head)
2270 child, roots = splithead(head)
2271 splitroots(head, child, roots)
2271 splitroots(head, child, roots)
2272 head = child # Recurse in case we just added a 3rd root
2272 head = child # Recurse in case we just added a 3rd root
2273
2273
2274 def splitroots(head, child, roots):
2274 def splitroots(head, child, roots):
2275 ellipsisroots[head].difference_update(roots)
2275 ellipsisroots[head].difference_update(roots)
2276 ellipsisroots[head].add(child)
2276 ellipsisroots[head].add(child)
2277 ellipsisroots[child].update(roots)
2277 ellipsisroots[child].update(roots)
2278 ellipsisroots[child].discard(child)
2278 ellipsisroots[child].discard(child)
2279
2279
2280 def splithead(head):
2280 def splithead(head):
2281 r1, r2, r3 = sorted(ellipsisroots[head])
2281 r1, r2, r3 = sorted(ellipsisroots[head])
2282 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2282 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2283 mid = repo.revs(
2283 mid = repo.revs(
2284 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2284 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2285 )
2285 )
2286 for j in mid:
2286 for j in mid:
2287 if j == nr2:
2287 if j == nr2:
2288 return nr2, (nr1, nr2)
2288 return nr2, (nr1, nr2)
2289 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2289 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2290 return j, (nr1, nr2)
2290 return j, (nr1, nr2)
2291 raise error.Abort(
2291 raise error.Abort(
2292 _(
2292 _(
2293 b'Failed to split up ellipsis node! head: %d, '
2293 b'Failed to split up ellipsis node! head: %d, '
2294 b'roots: %d %d %d'
2294 b'roots: %d %d %d'
2295 )
2295 )
2296 % (head, r1, r2, r3)
2296 % (head, r1, r2, r3)
2297 )
2297 )
2298
2298
2299 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2299 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2300 visit = reversed(missing)
2300 visit = reversed(missing)
2301 relevant_nodes = set()
2301 relevant_nodes = set()
2302 visitnodes = [cl.node(m) for m in missing]
2302 visitnodes = [cl.node(m) for m in missing]
2303 required = set(headsrevs) | known
2303 required = set(headsrevs) | known
2304 for rev in visit:
2304 for rev in visit:
2305 clrev = cl.changelogrevision(rev)
2305 clrev = cl.changelogrevision(rev)
2306 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2306 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2307 if depth is not None:
2307 if depth is not None:
2308 curdepth = revdepth[rev]
2308 curdepth = revdepth[rev]
2309 for p in ps:
2309 for p in ps:
2310 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2310 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2311 needed = False
2311 needed = False
2312 shallow_enough = depth is None or revdepth[rev] <= depth
2312 shallow_enough = depth is None or revdepth[rev] <= depth
2313 if shallow_enough:
2313 if shallow_enough:
2314 curmf = mfl[clrev.manifest].read()
2314 curmf = mfl[clrev.manifest].read()
2315 if ps:
2315 if ps:
2316 # We choose to not trust the changed files list in
2316 # We choose to not trust the changed files list in
2317 # changesets because it's not always correct. TODO: could
2317 # changesets because it's not always correct. TODO: could
2318 # we trust it for the non-merge case?
2318 # we trust it for the non-merge case?
2319 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2319 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2320 needed = bool(curmf.diff(p1mf, match))
2320 needed = bool(curmf.diff(p1mf, match))
2321 if not needed and len(ps) > 1:
2321 if not needed and len(ps) > 1:
2322 # For merge changes, the list of changed files is not
2322 # For merge changes, the list of changed files is not
2323 # helpful, since we need to emit the merge if a file
2323 # helpful, since we need to emit the merge if a file
2324 # in the narrow spec has changed on either side of the
2324 # in the narrow spec has changed on either side of the
2325 # merge. As a result, we do a manifest diff to check.
2325 # merge. As a result, we do a manifest diff to check.
2326 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2326 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2327 needed = bool(curmf.diff(p2mf, match))
2327 needed = bool(curmf.diff(p2mf, match))
2328 else:
2328 else:
2329 # For a root node, we need to include the node if any
2329 # For a root node, we need to include the node if any
2330 # files in the node match the narrowspec.
2330 # files in the node match the narrowspec.
2331 needed = any(curmf.walk(match))
2331 needed = any(curmf.walk(match))
2332
2332
2333 if needed:
2333 if needed:
2334 for head in ellipsisheads[rev]:
2334 for head in ellipsisheads[rev]:
2335 addroot(head, rev)
2335 addroot(head, rev)
2336 for p in ps:
2336 for p in ps:
2337 required.add(p)
2337 required.add(p)
2338 relevant_nodes.add(cl.node(rev))
2338 relevant_nodes.add(cl.node(rev))
2339 else:
2339 else:
2340 if not ps:
2340 if not ps:
2341 ps = [nullrev]
2341 ps = [nullrev]
2342 if rev in required:
2342 if rev in required:
2343 for head in ellipsisheads[rev]:
2343 for head in ellipsisheads[rev]:
2344 addroot(head, rev)
2344 addroot(head, rev)
2345 for p in ps:
2345 for p in ps:
2346 ellipsisheads[p].add(rev)
2346 ellipsisheads[p].add(rev)
2347 else:
2347 else:
2348 for p in ps:
2348 for p in ps:
2349 ellipsisheads[p] |= ellipsisheads[rev]
2349 ellipsisheads[p] |= ellipsisheads[rev]
2350
2350
2351 # add common changesets as roots of their reachable ellipsis heads
2351 # add common changesets as roots of their reachable ellipsis heads
2352 for c in commonrevs:
2352 for c in commonrevs:
2353 for head in ellipsisheads[c]:
2353 for head in ellipsisheads[c]:
2354 addroot(head, c)
2354 addroot(head, c)
2355 return visitnodes, relevant_nodes, ellipsisroots
2355 return visitnodes, relevant_nodes, ellipsisroots
2356
2356
2357
2357
2358 def caps20to10(repo, role):
2358 def caps20to10(repo, role):
2359 """return a set with appropriate options to use bundle20 during getbundle"""
2359 """return a set with appropriate options to use bundle20 during getbundle"""
2360 caps = {b'HG20'}
2360 caps = {b'HG20'}
2361 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2361 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2362 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2362 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2363 return caps
2363 return caps
2364
2364
2365
2365
2366 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2366 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2367 getbundle2partsorder = []
2367 getbundle2partsorder = []
2368
2368
2369 # Mapping between step name and function
2369 # Mapping between step name and function
2370 #
2370 #
2371 # This exists to help extensions wrap steps if necessary
2371 # This exists to help extensions wrap steps if necessary
2372 getbundle2partsmapping = {}
2372 getbundle2partsmapping = {}
2373
2373
2374
2374
2375 def getbundle2partsgenerator(stepname, idx=None):
2375 def getbundle2partsgenerator(stepname, idx=None):
2376 """decorator for function generating bundle2 part for getbundle
2376 """decorator for function generating bundle2 part for getbundle
2377
2377
2378 The function is added to the step -> function mapping and appended to the
2378 The function is added to the step -> function mapping and appended to the
2379 list of steps. Beware that decorated functions will be added in order
2379 list of steps. Beware that decorated functions will be added in order
2380 (this may matter).
2380 (this may matter).
2381
2381
2382 You can only use this decorator for new steps, if you want to wrap a step
2382 You can only use this decorator for new steps, if you want to wrap a step
2383 from an extension, attack the getbundle2partsmapping dictionary directly."""
2383 from an extension, attack the getbundle2partsmapping dictionary directly."""
2384
2384
2385 def dec(func):
2385 def dec(func):
2386 assert stepname not in getbundle2partsmapping
2386 assert stepname not in getbundle2partsmapping
2387 getbundle2partsmapping[stepname] = func
2387 getbundle2partsmapping[stepname] = func
2388 if idx is None:
2388 if idx is None:
2389 getbundle2partsorder.append(stepname)
2389 getbundle2partsorder.append(stepname)
2390 else:
2390 else:
2391 getbundle2partsorder.insert(idx, stepname)
2391 getbundle2partsorder.insert(idx, stepname)
2392 return func
2392 return func
2393
2393
2394 return dec
2394 return dec
2395
2395
2396
2396
2397 def bundle2requested(bundlecaps):
2397 def bundle2requested(bundlecaps):
2398 if bundlecaps is not None:
2398 if bundlecaps is not None:
2399 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2399 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2400 return False
2400 return False
2401
2401
2402
2402
2403 def getbundlechunks(
2403 def getbundlechunks(
2404 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2404 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2405 ):
2405 ):
2406 """Return chunks constituting a bundle's raw data.
2406 """Return chunks constituting a bundle's raw data.
2407
2407
2408 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2408 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2409 passed.
2409 passed.
2410
2410
2411 Returns a 2-tuple of a dict with metadata about the generated bundle
2411 Returns a 2-tuple of a dict with metadata about the generated bundle
2412 and an iterator over raw chunks (of varying sizes).
2412 and an iterator over raw chunks (of varying sizes).
2413 """
2413 """
2414 kwargs = pycompat.byteskwargs(kwargs)
2414 kwargs = pycompat.byteskwargs(kwargs)
2415 info = {}
2415 info = {}
2416 usebundle2 = bundle2requested(bundlecaps)
2416 usebundle2 = bundle2requested(bundlecaps)
2417 # bundle10 case
2417 # bundle10 case
2418 if not usebundle2:
2418 if not usebundle2:
2419 if bundlecaps and not kwargs.get(b'cg', True):
2419 if bundlecaps and not kwargs.get(b'cg', True):
2420 raise ValueError(
2420 raise ValueError(
2421 _(b'request for bundle10 must include changegroup')
2421 _(b'request for bundle10 must include changegroup')
2422 )
2422 )
2423
2423
2424 if kwargs:
2424 if kwargs:
2425 raise ValueError(
2425 raise ValueError(
2426 _(b'unsupported getbundle arguments: %s')
2426 _(b'unsupported getbundle arguments: %s')
2427 % b', '.join(sorted(kwargs.keys()))
2427 % b', '.join(sorted(kwargs.keys()))
2428 )
2428 )
2429 outgoing = _computeoutgoing(repo, heads, common)
2429 outgoing = _computeoutgoing(repo, heads, common)
2430 info[b'bundleversion'] = 1
2430 info[b'bundleversion'] = 1
2431 return (
2431 return (
2432 info,
2432 info,
2433 changegroup.makestream(
2433 changegroup.makestream(
2434 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2434 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2435 ),
2435 ),
2436 )
2436 )
2437
2437
2438 # bundle20 case
2438 # bundle20 case
2439 info[b'bundleversion'] = 2
2439 info[b'bundleversion'] = 2
2440 b2caps = {}
2440 b2caps = {}
2441 for bcaps in bundlecaps:
2441 for bcaps in bundlecaps:
2442 if bcaps.startswith(b'bundle2='):
2442 if bcaps.startswith(b'bundle2='):
2443 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2443 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2444 b2caps.update(bundle2.decodecaps(blob))
2444 b2caps.update(bundle2.decodecaps(blob))
2445 bundler = bundle2.bundle20(repo.ui, b2caps)
2445 bundler = bundle2.bundle20(repo.ui, b2caps)
2446
2446
2447 kwargs[b'heads'] = heads
2447 kwargs[b'heads'] = heads
2448 kwargs[b'common'] = common
2448 kwargs[b'common'] = common
2449
2449
2450 for name in getbundle2partsorder:
2450 for name in getbundle2partsorder:
2451 func = getbundle2partsmapping[name]
2451 func = getbundle2partsmapping[name]
2452 func(
2452 func(
2453 bundler,
2453 bundler,
2454 repo,
2454 repo,
2455 source,
2455 source,
2456 bundlecaps=bundlecaps,
2456 bundlecaps=bundlecaps,
2457 b2caps=b2caps,
2457 b2caps=b2caps,
2458 **pycompat.strkwargs(kwargs)
2458 **pycompat.strkwargs(kwargs)
2459 )
2459 )
2460
2460
2461 info[b'prefercompressed'] = bundler.prefercompressed
2461 info[b'prefercompressed'] = bundler.prefercompressed
2462
2462
2463 return info, bundler.getchunks()
2463 return info, bundler.getchunks()
2464
2464
2465
2465
2466 @getbundle2partsgenerator(b'stream2')
2466 @getbundle2partsgenerator(b'stream2')
2467 def _getbundlestream2(bundler, repo, *args, **kwargs):
2467 def _getbundlestream2(bundler, repo, *args, **kwargs):
2468 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2468 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2469
2469
2470
2470
2471 @getbundle2partsgenerator(b'changegroup')
2471 @getbundle2partsgenerator(b'changegroup')
2472 def _getbundlechangegrouppart(
2472 def _getbundlechangegrouppart(
2473 bundler,
2473 bundler,
2474 repo,
2474 repo,
2475 source,
2475 source,
2476 bundlecaps=None,
2476 bundlecaps=None,
2477 b2caps=None,
2477 b2caps=None,
2478 heads=None,
2478 heads=None,
2479 common=None,
2479 common=None,
2480 **kwargs
2480 **kwargs
2481 ):
2481 ):
2482 """add a changegroup part to the requested bundle"""
2482 """add a changegroup part to the requested bundle"""
2483 if not kwargs.get('cg', True):
2483 if not kwargs.get('cg', True):
2484 return
2484 return
2485
2485
2486 version = b'01'
2486 version = b'01'
2487 cgversions = b2caps.get(b'changegroup')
2487 cgversions = b2caps.get(b'changegroup')
2488 if cgversions: # 3.1 and 3.2 ship with an empty value
2488 if cgversions: # 3.1 and 3.2 ship with an empty value
2489 cgversions = [
2489 cgversions = [
2490 v
2490 v
2491 for v in cgversions
2491 for v in cgversions
2492 if v in changegroup.supportedoutgoingversions(repo)
2492 if v in changegroup.supportedoutgoingversions(repo)
2493 ]
2493 ]
2494 if not cgversions:
2494 if not cgversions:
2495 raise error.Abort(_(b'no common changegroup version'))
2495 raise error.Abort(_(b'no common changegroup version'))
2496 version = max(cgversions)
2496 version = max(cgversions)
2497
2497
2498 outgoing = _computeoutgoing(repo, heads, common)
2498 outgoing = _computeoutgoing(repo, heads, common)
2499 if not outgoing.missing:
2499 if not outgoing.missing:
2500 return
2500 return
2501
2501
2502 if kwargs.get('narrow', False):
2502 if kwargs.get('narrow', False):
2503 include = sorted(filter(bool, kwargs.get('includepats', [])))
2503 include = sorted(filter(bool, kwargs.get('includepats', [])))
2504 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2504 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2505 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2505 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2506 else:
2506 else:
2507 matcher = None
2507 matcher = None
2508
2508
2509 cgstream = changegroup.makestream(
2509 cgstream = changegroup.makestream(
2510 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2510 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2511 )
2511 )
2512
2512
2513 part = bundler.newpart(b'changegroup', data=cgstream)
2513 part = bundler.newpart(b'changegroup', data=cgstream)
2514 if cgversions:
2514 if cgversions:
2515 part.addparam(b'version', version)
2515 part.addparam(b'version', version)
2516
2516
2517 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2517 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2518
2518
2519 if b'treemanifest' in repo.requirements:
2519 if b'treemanifest' in repo.requirements:
2520 part.addparam(b'treemanifest', b'1')
2520 part.addparam(b'treemanifest', b'1')
2521
2521
2522 if b'exp-sidedata-flag' in repo.requirements:
2522 if b'exp-sidedata-flag' in repo.requirements:
2523 part.addparam(b'exp-sidedata', b'1')
2523 part.addparam(b'exp-sidedata', b'1')
2524
2524
2525 if (
2525 if (
2526 kwargs.get('narrow', False)
2526 kwargs.get('narrow', False)
2527 and kwargs.get('narrow_acl', False)
2527 and kwargs.get('narrow_acl', False)
2528 and (include or exclude)
2528 and (include or exclude)
2529 ):
2529 ):
2530 # this is mandatory because otherwise ACL clients won't work
2530 # this is mandatory because otherwise ACL clients won't work
2531 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2531 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2532 narrowspecpart.data = b'%s\0%s' % (
2532 narrowspecpart.data = b'%s\0%s' % (
2533 b'\n'.join(include),
2533 b'\n'.join(include),
2534 b'\n'.join(exclude),
2534 b'\n'.join(exclude),
2535 )
2535 )
2536
2536
2537
2537
2538 @getbundle2partsgenerator(b'bookmarks')
2538 @getbundle2partsgenerator(b'bookmarks')
2539 def _getbundlebookmarkpart(
2539 def _getbundlebookmarkpart(
2540 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2540 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2541 ):
2541 ):
2542 """add a bookmark part to the requested bundle"""
2542 """add a bookmark part to the requested bundle"""
2543 if not kwargs.get('bookmarks', False):
2543 if not kwargs.get('bookmarks', False):
2544 return
2544 return
2545 if b'bookmarks' not in b2caps:
2545 if b'bookmarks' not in b2caps:
2546 raise error.Abort(_(b'no common bookmarks exchange method'))
2546 raise error.Abort(_(b'no common bookmarks exchange method'))
2547 books = bookmod.listbinbookmarks(repo)
2547 books = bookmod.listbinbookmarks(repo)
2548 data = bookmod.binaryencode(books)
2548 data = bookmod.binaryencode(books)
2549 if data:
2549 if data:
2550 bundler.newpart(b'bookmarks', data=data)
2550 bundler.newpart(b'bookmarks', data=data)
2551
2551
2552
2552
2553 @getbundle2partsgenerator(b'listkeys')
2553 @getbundle2partsgenerator(b'listkeys')
2554 def _getbundlelistkeysparts(
2554 def _getbundlelistkeysparts(
2555 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2555 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2556 ):
2556 ):
2557 """add parts containing listkeys namespaces to the requested bundle"""
2557 """add parts containing listkeys namespaces to the requested bundle"""
2558 listkeys = kwargs.get('listkeys', ())
2558 listkeys = kwargs.get('listkeys', ())
2559 for namespace in listkeys:
2559 for namespace in listkeys:
2560 part = bundler.newpart(b'listkeys')
2560 part = bundler.newpart(b'listkeys')
2561 part.addparam(b'namespace', namespace)
2561 part.addparam(b'namespace', namespace)
2562 keys = repo.listkeys(namespace).items()
2562 keys = repo.listkeys(namespace).items()
2563 part.data = pushkey.encodekeys(keys)
2563 part.data = pushkey.encodekeys(keys)
2564
2564
2565
2565
2566 @getbundle2partsgenerator(b'obsmarkers')
2566 @getbundle2partsgenerator(b'obsmarkers')
2567 def _getbundleobsmarkerpart(
2567 def _getbundleobsmarkerpart(
2568 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2568 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2569 ):
2569 ):
2570 """add an obsolescence markers part to the requested bundle"""
2570 """add an obsolescence markers part to the requested bundle"""
2571 if kwargs.get('obsmarkers', False):
2571 if kwargs.get('obsmarkers', False):
2572 if heads is None:
2572 if heads is None:
2573 heads = repo.heads()
2573 heads = repo.heads()
2574 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2574 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2575 markers = repo.obsstore.relevantmarkers(subset)
2575 markers = repo.obsstore.relevantmarkers(subset)
2576 markers = _sortedmarkers(markers)
2576 markers = _sortedmarkers(markers)
2577 bundle2.buildobsmarkerspart(bundler, markers)
2577 bundle2.buildobsmarkerspart(bundler, markers)
2578
2578
2579
2579
2580 @getbundle2partsgenerator(b'phases')
2580 @getbundle2partsgenerator(b'phases')
2581 def _getbundlephasespart(
2581 def _getbundlephasespart(
2582 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2582 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2583 ):
2583 ):
2584 """add phase heads part to the requested bundle"""
2584 """add phase heads part to the requested bundle"""
2585 if kwargs.get('phases', False):
2585 if kwargs.get('phases', False):
2586 if not b'heads' in b2caps.get(b'phases'):
2586 if not b'heads' in b2caps.get(b'phases'):
2587 raise error.Abort(_(b'no common phases exchange method'))
2587 raise error.Abort(_(b'no common phases exchange method'))
2588 if heads is None:
2588 if heads is None:
2589 heads = repo.heads()
2589 heads = repo.heads()
2590
2590
2591 headsbyphase = collections.defaultdict(set)
2591 headsbyphase = collections.defaultdict(set)
2592 if repo.publishing():
2592 if repo.publishing():
2593 headsbyphase[phases.public] = heads
2593 headsbyphase[phases.public] = heads
2594 else:
2594 else:
2595 # find the appropriate heads to move
2595 # find the appropriate heads to move
2596
2596
2597 phase = repo._phasecache.phase
2597 phase = repo._phasecache.phase
2598 node = repo.changelog.node
2598 node = repo.changelog.node
2599 rev = repo.changelog.rev
2599 rev = repo.changelog.rev
2600 for h in heads:
2600 for h in heads:
2601 headsbyphase[phase(repo, rev(h))].add(h)
2601 headsbyphase[phase(repo, rev(h))].add(h)
2602 seenphases = list(headsbyphase.keys())
2602 seenphases = list(headsbyphase.keys())
2603
2603
2604 # We do not handle anything but public and draft phase for now)
2604 # We do not handle anything but public and draft phase for now)
2605 if seenphases:
2605 if seenphases:
2606 assert max(seenphases) <= phases.draft
2606 assert max(seenphases) <= phases.draft
2607
2607
2608 # if client is pulling non-public changesets, we need to find
2608 # if client is pulling non-public changesets, we need to find
2609 # intermediate public heads.
2609 # intermediate public heads.
2610 draftheads = headsbyphase.get(phases.draft, set())
2610 draftheads = headsbyphase.get(phases.draft, set())
2611 if draftheads:
2611 if draftheads:
2612 publicheads = headsbyphase.get(phases.public, set())
2612 publicheads = headsbyphase.get(phases.public, set())
2613
2613
2614 revset = b'heads(only(%ln, %ln) and public())'
2614 revset = b'heads(only(%ln, %ln) and public())'
2615 extraheads = repo.revs(revset, draftheads, publicheads)
2615 extraheads = repo.revs(revset, draftheads, publicheads)
2616 for r in extraheads:
2616 for r in extraheads:
2617 headsbyphase[phases.public].add(node(r))
2617 headsbyphase[phases.public].add(node(r))
2618
2618
2619 # transform data in a format used by the encoding function
2619 # transform data in a format used by the encoding function
2620 phasemapping = []
2620 phasemapping = []
2621 for phase in phases.allphases:
2621 for phase in phases.allphases:
2622 phasemapping.append(sorted(headsbyphase[phase]))
2622 phasemapping.append(sorted(headsbyphase[phase]))
2623
2623
2624 # generate the actual part
2624 # generate the actual part
2625 phasedata = phases.binaryencode(phasemapping)
2625 phasedata = phases.binaryencode(phasemapping)
2626 bundler.newpart(b'phase-heads', data=phasedata)
2626 bundler.newpart(b'phase-heads', data=phasedata)
2627
2627
2628
2628
2629 @getbundle2partsgenerator(b'hgtagsfnodes')
2629 @getbundle2partsgenerator(b'hgtagsfnodes')
2630 def _getbundletagsfnodes(
2630 def _getbundletagsfnodes(
2631 bundler,
2631 bundler,
2632 repo,
2632 repo,
2633 source,
2633 source,
2634 bundlecaps=None,
2634 bundlecaps=None,
2635 b2caps=None,
2635 b2caps=None,
2636 heads=None,
2636 heads=None,
2637 common=None,
2637 common=None,
2638 **kwargs
2638 **kwargs
2639 ):
2639 ):
2640 """Transfer the .hgtags filenodes mapping.
2640 """Transfer the .hgtags filenodes mapping.
2641
2641
2642 Only values for heads in this bundle will be transferred.
2642 Only values for heads in this bundle will be transferred.
2643
2643
2644 The part data consists of pairs of 20 byte changeset node and .hgtags
2644 The part data consists of pairs of 20 byte changeset node and .hgtags
2645 filenodes raw values.
2645 filenodes raw values.
2646 """
2646 """
2647 # Don't send unless:
2647 # Don't send unless:
2648 # - changeset are being exchanged,
2648 # - changeset are being exchanged,
2649 # - the client supports it.
2649 # - the client supports it.
2650 if not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2650 if not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2651 return
2651 return
2652
2652
2653 outgoing = _computeoutgoing(repo, heads, common)
2653 outgoing = _computeoutgoing(repo, heads, common)
2654 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2654 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2655
2655
2656
2656
2657 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2657 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2658 def _getbundlerevbranchcache(
2658 def _getbundlerevbranchcache(
2659 bundler,
2659 bundler,
2660 repo,
2660 repo,
2661 source,
2661 source,
2662 bundlecaps=None,
2662 bundlecaps=None,
2663 b2caps=None,
2663 b2caps=None,
2664 heads=None,
2664 heads=None,
2665 common=None,
2665 common=None,
2666 **kwargs
2666 **kwargs
2667 ):
2667 ):
2668 """Transfer the rev-branch-cache mapping
2668 """Transfer the rev-branch-cache mapping
2669
2669
2670 The payload is a series of data related to each branch
2670 The payload is a series of data related to each branch
2671
2671
2672 1) branch name length
2672 1) branch name length
2673 2) number of open heads
2673 2) number of open heads
2674 3) number of closed heads
2674 3) number of closed heads
2675 4) open heads nodes
2675 4) open heads nodes
2676 5) closed heads nodes
2676 5) closed heads nodes
2677 """
2677 """
2678 # Don't send unless:
2678 # Don't send unless:
2679 # - changeset are being exchanged,
2679 # - changeset are being exchanged,
2680 # - the client supports it.
2680 # - the client supports it.
2681 # - narrow bundle isn't in play (not currently compatible).
2681 # - narrow bundle isn't in play (not currently compatible).
2682 if (
2682 if (
2683 not kwargs.get('cg', True)
2683 not kwargs.get('cg', True)
2684 or b'rev-branch-cache' not in b2caps
2684 or b'rev-branch-cache' not in b2caps
2685 or kwargs.get('narrow', False)
2685 or kwargs.get('narrow', False)
2686 or repo.ui.has_section(_NARROWACL_SECTION)
2686 or repo.ui.has_section(_NARROWACL_SECTION)
2687 ):
2687 ):
2688 return
2688 return
2689
2689
2690 outgoing = _computeoutgoing(repo, heads, common)
2690 outgoing = _computeoutgoing(repo, heads, common)
2691 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2691 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2692
2692
2693
2693
2694 def check_heads(repo, their_heads, context):
2694 def check_heads(repo, their_heads, context):
2695 """check if the heads of a repo have been modified
2695 """check if the heads of a repo have been modified
2696
2696
2697 Used by peer for unbundling.
2697 Used by peer for unbundling.
2698 """
2698 """
2699 heads = repo.heads()
2699 heads = repo.heads()
2700 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2700 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2701 if not (
2701 if not (
2702 their_heads == [b'force']
2702 their_heads == [b'force']
2703 or their_heads == heads
2703 or their_heads == heads
2704 or their_heads == [b'hashed', heads_hash]
2704 or their_heads == [b'hashed', heads_hash]
2705 ):
2705 ):
2706 # someone else committed/pushed/unbundled while we
2706 # someone else committed/pushed/unbundled while we
2707 # were transferring data
2707 # were transferring data
2708 raise error.PushRaced(
2708 raise error.PushRaced(
2709 b'repository changed while %s - please try again' % context
2709 b'repository changed while %s - please try again' % context
2710 )
2710 )
2711
2711
2712
2712
2713 def unbundle(repo, cg, heads, source, url):
2713 def unbundle(repo, cg, heads, source, url):
2714 """Apply a bundle to a repo.
2714 """Apply a bundle to a repo.
2715
2715
2716 this function makes sure the repo is locked during the application and have
2716 this function makes sure the repo is locked during the application and have
2717 mechanism to check that no push race occurred between the creation of the
2717 mechanism to check that no push race occurred between the creation of the
2718 bundle and its application.
2718 bundle and its application.
2719
2719
2720 If the push was raced as PushRaced exception is raised."""
2720 If the push was raced as PushRaced exception is raised."""
2721 r = 0
2721 r = 0
2722 # need a transaction when processing a bundle2 stream
2722 # need a transaction when processing a bundle2 stream
2723 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2723 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2724 lockandtr = [None, None, None]
2724 lockandtr = [None, None, None]
2725 recordout = None
2725 recordout = None
2726 # quick fix for output mismatch with bundle2 in 3.4
2726 # quick fix for output mismatch with bundle2 in 3.4
2727 captureoutput = repo.ui.configbool(
2727 captureoutput = repo.ui.configbool(
2728 b'experimental', b'bundle2-output-capture'
2728 b'experimental', b'bundle2-output-capture'
2729 )
2729 )
2730 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2730 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2731 captureoutput = True
2731 captureoutput = True
2732 try:
2732 try:
2733 # note: outside bundle1, 'heads' is expected to be empty and this
2733 # note: outside bundle1, 'heads' is expected to be empty and this
2734 # 'check_heads' call wil be a no-op
2734 # 'check_heads' call wil be a no-op
2735 check_heads(repo, heads, b'uploading changes')
2735 check_heads(repo, heads, b'uploading changes')
2736 # push can proceed
2736 # push can proceed
2737 if not isinstance(cg, bundle2.unbundle20):
2737 if not isinstance(cg, bundle2.unbundle20):
2738 # legacy case: bundle1 (changegroup 01)
2738 # legacy case: bundle1 (changegroup 01)
2739 txnname = b"\n".join([source, util.hidepassword(url)])
2739 txnname = b"\n".join([source, util.hidepassword(url)])
2740 with repo.lock(), repo.transaction(txnname) as tr:
2740 with repo.lock(), repo.transaction(txnname) as tr:
2741 op = bundle2.applybundle(repo, cg, tr, source, url)
2741 op = bundle2.applybundle(repo, cg, tr, source, url)
2742 r = bundle2.combinechangegroupresults(op)
2742 r = bundle2.combinechangegroupresults(op)
2743 else:
2743 else:
2744 r = None
2744 r = None
2745 try:
2745 try:
2746
2746
2747 def gettransaction():
2747 def gettransaction():
2748 if not lockandtr[2]:
2748 if not lockandtr[2]:
2749 if not bookmod.bookmarksinstore(repo):
2749 if not bookmod.bookmarksinstore(repo):
2750 lockandtr[0] = repo.wlock()
2750 lockandtr[0] = repo.wlock()
2751 lockandtr[1] = repo.lock()
2751 lockandtr[1] = repo.lock()
2752 lockandtr[2] = repo.transaction(source)
2752 lockandtr[2] = repo.transaction(source)
2753 lockandtr[2].hookargs[b'source'] = source
2753 lockandtr[2].hookargs[b'source'] = source
2754 lockandtr[2].hookargs[b'url'] = url
2754 lockandtr[2].hookargs[b'url'] = url
2755 lockandtr[2].hookargs[b'bundle2'] = b'1'
2755 lockandtr[2].hookargs[b'bundle2'] = b'1'
2756 return lockandtr[2]
2756 return lockandtr[2]
2757
2757
2758 # Do greedy locking by default until we're satisfied with lazy
2758 # Do greedy locking by default until we're satisfied with lazy
2759 # locking.
2759 # locking.
2760 if not repo.ui.configbool(
2760 if not repo.ui.configbool(
2761 b'experimental', b'bundle2lazylocking'
2761 b'experimental', b'bundle2lazylocking'
2762 ):
2762 ):
2763 gettransaction()
2763 gettransaction()
2764
2764
2765 op = bundle2.bundleoperation(
2765 op = bundle2.bundleoperation(
2766 repo,
2766 repo,
2767 gettransaction,
2767 gettransaction,
2768 captureoutput=captureoutput,
2768 captureoutput=captureoutput,
2769 source=b'push',
2769 source=b'push',
2770 )
2770 )
2771 try:
2771 try:
2772 op = bundle2.processbundle(repo, cg, op=op)
2772 op = bundle2.processbundle(repo, cg, op=op)
2773 finally:
2773 finally:
2774 r = op.reply
2774 r = op.reply
2775 if captureoutput and r is not None:
2775 if captureoutput and r is not None:
2776 repo.ui.pushbuffer(error=True, subproc=True)
2776 repo.ui.pushbuffer(error=True, subproc=True)
2777
2777
2778 def recordout(output):
2778 def recordout(output):
2779 r.newpart(b'output', data=output, mandatory=False)
2779 r.newpart(b'output', data=output, mandatory=False)
2780
2780
2781 if lockandtr[2] is not None:
2781 if lockandtr[2] is not None:
2782 lockandtr[2].close()
2782 lockandtr[2].close()
2783 except BaseException as exc:
2783 except BaseException as exc:
2784 exc.duringunbundle2 = True
2784 exc.duringunbundle2 = True
2785 if captureoutput and r is not None:
2785 if captureoutput and r is not None:
2786 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2786 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2787
2787
2788 def recordout(output):
2788 def recordout(output):
2789 part = bundle2.bundlepart(
2789 part = bundle2.bundlepart(
2790 b'output', data=output, mandatory=False
2790 b'output', data=output, mandatory=False
2791 )
2791 )
2792 parts.append(part)
2792 parts.append(part)
2793
2793
2794 raise
2794 raise
2795 finally:
2795 finally:
2796 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2796 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2797 if recordout is not None:
2797 if recordout is not None:
2798 recordout(repo.ui.popbuffer())
2798 recordout(repo.ui.popbuffer())
2799 return r
2799 return r
2800
2800
2801
2801
2802 def _maybeapplyclonebundle(pullop):
2802 def _maybeapplyclonebundle(pullop):
2803 """Apply a clone bundle from a remote, if possible."""
2803 """Apply a clone bundle from a remote, if possible."""
2804
2804
2805 repo = pullop.repo
2805 repo = pullop.repo
2806 remote = pullop.remote
2806 remote = pullop.remote
2807
2807
2808 if not repo.ui.configbool(b'ui', b'clonebundles'):
2808 if not repo.ui.configbool(b'ui', b'clonebundles'):
2809 return
2809 return
2810
2810
2811 # Only run if local repo is empty.
2811 # Only run if local repo is empty.
2812 if len(repo):
2812 if len(repo):
2813 return
2813 return
2814
2814
2815 if pullop.heads:
2815 if pullop.heads:
2816 return
2816 return
2817
2817
2818 if not remote.capable(b'clonebundles'):
2818 if not remote.capable(b'clonebundles'):
2819 return
2819 return
2820
2820
2821 with remote.commandexecutor() as e:
2821 with remote.commandexecutor() as e:
2822 res = e.callcommand(b'clonebundles', {}).result()
2822 res = e.callcommand(b'clonebundles', {}).result()
2823
2823
2824 # If we call the wire protocol command, that's good enough to record the
2824 # If we call the wire protocol command, that's good enough to record the
2825 # attempt.
2825 # attempt.
2826 pullop.clonebundleattempted = True
2826 pullop.clonebundleattempted = True
2827
2827
2828 entries = parseclonebundlesmanifest(repo, res)
2828 entries = parseclonebundlesmanifest(repo, res)
2829 if not entries:
2829 if not entries:
2830 repo.ui.note(
2830 repo.ui.note(
2831 _(
2831 _(
2832 b'no clone bundles available on remote; '
2832 b'no clone bundles available on remote; '
2833 b'falling back to regular clone\n'
2833 b'falling back to regular clone\n'
2834 )
2834 )
2835 )
2835 )
2836 return
2836 return
2837
2837
2838 entries = filterclonebundleentries(
2838 entries = filterclonebundleentries(
2839 repo, entries, streamclonerequested=pullop.streamclonerequested
2839 repo, entries, streamclonerequested=pullop.streamclonerequested
2840 )
2840 )
2841
2841
2842 if not entries:
2842 if not entries:
2843 # There is a thundering herd concern here. However, if a server
2843 # There is a thundering herd concern here. However, if a server
2844 # operator doesn't advertise bundles appropriate for its clients,
2844 # operator doesn't advertise bundles appropriate for its clients,
2845 # they deserve what's coming. Furthermore, from a client's
2845 # they deserve what's coming. Furthermore, from a client's
2846 # perspective, no automatic fallback would mean not being able to
2846 # perspective, no automatic fallback would mean not being able to
2847 # clone!
2847 # clone!
2848 repo.ui.warn(
2848 repo.ui.warn(
2849 _(
2849 _(
2850 b'no compatible clone bundles available on server; '
2850 b'no compatible clone bundles available on server; '
2851 b'falling back to regular clone\n'
2851 b'falling back to regular clone\n'
2852 )
2852 )
2853 )
2853 )
2854 repo.ui.warn(
2854 repo.ui.warn(
2855 _(b'(you may want to report this to the server operator)\n')
2855 _(b'(you may want to report this to the server operator)\n')
2856 )
2856 )
2857 return
2857 return
2858
2858
2859 entries = sortclonebundleentries(repo.ui, entries)
2859 entries = sortclonebundleentries(repo.ui, entries)
2860
2860
2861 url = entries[0][b'URL']
2861 url = entries[0][b'URL']
2862 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2862 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2863 if trypullbundlefromurl(repo.ui, repo, url):
2863 if trypullbundlefromurl(repo.ui, repo, url):
2864 repo.ui.status(_(b'finished applying clone bundle\n'))
2864 repo.ui.status(_(b'finished applying clone bundle\n'))
2865 # Bundle failed.
2865 # Bundle failed.
2866 #
2866 #
2867 # We abort by default to avoid the thundering herd of
2867 # We abort by default to avoid the thundering herd of
2868 # clients flooding a server that was expecting expensive
2868 # clients flooding a server that was expecting expensive
2869 # clone load to be offloaded.
2869 # clone load to be offloaded.
2870 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2870 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2871 repo.ui.warn(_(b'falling back to normal clone\n'))
2871 repo.ui.warn(_(b'falling back to normal clone\n'))
2872 else:
2872 else:
2873 raise error.Abort(
2873 raise error.Abort(
2874 _(b'error applying bundle'),
2874 _(b'error applying bundle'),
2875 hint=_(
2875 hint=_(
2876 b'if this error persists, consider contacting '
2876 b'if this error persists, consider contacting '
2877 b'the server operator or disable clone '
2877 b'the server operator or disable clone '
2878 b'bundles via '
2878 b'bundles via '
2879 b'"--config ui.clonebundles=false"'
2879 b'"--config ui.clonebundles=false"'
2880 ),
2880 ),
2881 )
2881 )
2882
2882
2883
2883
2884 def parseclonebundlesmanifest(repo, s):
2884 def parseclonebundlesmanifest(repo, s):
2885 """Parses the raw text of a clone bundles manifest.
2885 """Parses the raw text of a clone bundles manifest.
2886
2886
2887 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2887 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2888 to the URL and other keys are the attributes for the entry.
2888 to the URL and other keys are the attributes for the entry.
2889 """
2889 """
2890 m = []
2890 m = []
2891 for line in s.splitlines():
2891 for line in s.splitlines():
2892 fields = line.split()
2892 fields = line.split()
2893 if not fields:
2893 if not fields:
2894 continue
2894 continue
2895 attrs = {b'URL': fields[0]}
2895 attrs = {b'URL': fields[0]}
2896 for rawattr in fields[1:]:
2896 for rawattr in fields[1:]:
2897 key, value = rawattr.split(b'=', 1)
2897 key, value = rawattr.split(b'=', 1)
2898 key = urlreq.unquote(key)
2898 key = urlreq.unquote(key)
2899 value = urlreq.unquote(value)
2899 value = urlreq.unquote(value)
2900 attrs[key] = value
2900 attrs[key] = value
2901
2901
2902 # Parse BUNDLESPEC into components. This makes client-side
2902 # Parse BUNDLESPEC into components. This makes client-side
2903 # preferences easier to specify since you can prefer a single
2903 # preferences easier to specify since you can prefer a single
2904 # component of the BUNDLESPEC.
2904 # component of the BUNDLESPEC.
2905 if key == b'BUNDLESPEC':
2905 if key == b'BUNDLESPEC':
2906 try:
2906 try:
2907 bundlespec = parsebundlespec(repo, value)
2907 bundlespec = parsebundlespec(repo, value)
2908 attrs[b'COMPRESSION'] = bundlespec.compression
2908 attrs[b'COMPRESSION'] = bundlespec.compression
2909 attrs[b'VERSION'] = bundlespec.version
2909 attrs[b'VERSION'] = bundlespec.version
2910 except error.InvalidBundleSpecification:
2910 except error.InvalidBundleSpecification:
2911 pass
2911 pass
2912 except error.UnsupportedBundleSpecification:
2912 except error.UnsupportedBundleSpecification:
2913 pass
2913 pass
2914
2914
2915 m.append(attrs)
2915 m.append(attrs)
2916
2916
2917 return m
2917 return m
2918
2918
2919
2919
2920 def isstreamclonespec(bundlespec):
2920 def isstreamclonespec(bundlespec):
2921 # Stream clone v1
2921 # Stream clone v1
2922 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2922 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2923 return True
2923 return True
2924
2924
2925 # Stream clone v2
2925 # Stream clone v2
2926 if (
2926 if (
2927 bundlespec.wirecompression == b'UN'
2927 bundlespec.wirecompression == b'UN'
2928 and bundlespec.wireversion == b'02'
2928 and bundlespec.wireversion == b'02'
2929 and bundlespec.contentopts.get(b'streamv2')
2929 and bundlespec.contentopts.get(b'streamv2')
2930 ):
2930 ):
2931 return True
2931 return True
2932
2932
2933 return False
2933 return False
2934
2934
2935
2935
2936 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2936 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2937 """Remove incompatible clone bundle manifest entries.
2937 """Remove incompatible clone bundle manifest entries.
2938
2938
2939 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2939 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2940 and returns a new list consisting of only the entries that this client
2940 and returns a new list consisting of only the entries that this client
2941 should be able to apply.
2941 should be able to apply.
2942
2942
2943 There is no guarantee we'll be able to apply all returned entries because
2943 There is no guarantee we'll be able to apply all returned entries because
2944 the metadata we use to filter on may be missing or wrong.
2944 the metadata we use to filter on may be missing or wrong.
2945 """
2945 """
2946 newentries = []
2946 newentries = []
2947 for entry in entries:
2947 for entry in entries:
2948 spec = entry.get(b'BUNDLESPEC')
2948 spec = entry.get(b'BUNDLESPEC')
2949 if spec:
2949 if spec:
2950 try:
2950 try:
2951 bundlespec = parsebundlespec(repo, spec, strict=True)
2951 bundlespec = parsebundlespec(repo, spec, strict=True)
2952
2952
2953 # If a stream clone was requested, filter out non-streamclone
2953 # If a stream clone was requested, filter out non-streamclone
2954 # entries.
2954 # entries.
2955 if streamclonerequested and not isstreamclonespec(bundlespec):
2955 if streamclonerequested and not isstreamclonespec(bundlespec):
2956 repo.ui.debug(
2956 repo.ui.debug(
2957 b'filtering %s because not a stream clone\n'
2957 b'filtering %s because not a stream clone\n'
2958 % entry[b'URL']
2958 % entry[b'URL']
2959 )
2959 )
2960 continue
2960 continue
2961
2961
2962 except error.InvalidBundleSpecification as e:
2962 except error.InvalidBundleSpecification as e:
2963 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2963 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2964 continue
2964 continue
2965 except error.UnsupportedBundleSpecification as e:
2965 except error.UnsupportedBundleSpecification as e:
2966 repo.ui.debug(
2966 repo.ui.debug(
2967 b'filtering %s because unsupported bundle '
2967 b'filtering %s because unsupported bundle '
2968 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2968 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2969 )
2969 )
2970 continue
2970 continue
2971 # If we don't have a spec and requested a stream clone, we don't know
2971 # If we don't have a spec and requested a stream clone, we don't know
2972 # what the entry is so don't attempt to apply it.
2972 # what the entry is so don't attempt to apply it.
2973 elif streamclonerequested:
2973 elif streamclonerequested:
2974 repo.ui.debug(
2974 repo.ui.debug(
2975 b'filtering %s because cannot determine if a stream '
2975 b'filtering %s because cannot determine if a stream '
2976 b'clone bundle\n' % entry[b'URL']
2976 b'clone bundle\n' % entry[b'URL']
2977 )
2977 )
2978 continue
2978 continue
2979
2979
2980 if b'REQUIRESNI' in entry and not sslutil.hassni:
2980 if b'REQUIRESNI' in entry and not sslutil.hassni:
2981 repo.ui.debug(
2981 repo.ui.debug(
2982 b'filtering %s because SNI not supported\n' % entry[b'URL']
2982 b'filtering %s because SNI not supported\n' % entry[b'URL']
2983 )
2983 )
2984 continue
2984 continue
2985
2985
2986 newentries.append(entry)
2986 newentries.append(entry)
2987
2987
2988 return newentries
2988 return newentries
2989
2989
2990
2990
2991 class clonebundleentry(object):
2991 class clonebundleentry(object):
2992 """Represents an item in a clone bundles manifest.
2992 """Represents an item in a clone bundles manifest.
2993
2993
2994 This rich class is needed to support sorting since sorted() in Python 3
2994 This rich class is needed to support sorting since sorted() in Python 3
2995 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2995 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2996 won't work.
2996 won't work.
2997 """
2997 """
2998
2998
2999 def __init__(self, value, prefers):
2999 def __init__(self, value, prefers):
3000 self.value = value
3000 self.value = value
3001 self.prefers = prefers
3001 self.prefers = prefers
3002
3002
3003 def _cmp(self, other):
3003 def _cmp(self, other):
3004 for prefkey, prefvalue in self.prefers:
3004 for prefkey, prefvalue in self.prefers:
3005 avalue = self.value.get(prefkey)
3005 avalue = self.value.get(prefkey)
3006 bvalue = other.value.get(prefkey)
3006 bvalue = other.value.get(prefkey)
3007
3007
3008 # Special case for b missing attribute and a matches exactly.
3008 # Special case for b missing attribute and a matches exactly.
3009 if avalue is not None and bvalue is None and avalue == prefvalue:
3009 if avalue is not None and bvalue is None and avalue == prefvalue:
3010 return -1
3010 return -1
3011
3011
3012 # Special case for a missing attribute and b matches exactly.
3012 # Special case for a missing attribute and b matches exactly.
3013 if bvalue is not None and avalue is None and bvalue == prefvalue:
3013 if bvalue is not None and avalue is None and bvalue == prefvalue:
3014 return 1
3014 return 1
3015
3015
3016 # We can't compare unless attribute present on both.
3016 # We can't compare unless attribute present on both.
3017 if avalue is None or bvalue is None:
3017 if avalue is None or bvalue is None:
3018 continue
3018 continue
3019
3019
3020 # Same values should fall back to next attribute.
3020 # Same values should fall back to next attribute.
3021 if avalue == bvalue:
3021 if avalue == bvalue:
3022 continue
3022 continue
3023
3023
3024 # Exact matches come first.
3024 # Exact matches come first.
3025 if avalue == prefvalue:
3025 if avalue == prefvalue:
3026 return -1
3026 return -1
3027 if bvalue == prefvalue:
3027 if bvalue == prefvalue:
3028 return 1
3028 return 1
3029
3029
3030 # Fall back to next attribute.
3030 # Fall back to next attribute.
3031 continue
3031 continue
3032
3032
3033 # If we got here we couldn't sort by attributes and prefers. Fall
3033 # If we got here we couldn't sort by attributes and prefers. Fall
3034 # back to index order.
3034 # back to index order.
3035 return 0
3035 return 0
3036
3036
3037 def __lt__(self, other):
3037 def __lt__(self, other):
3038 return self._cmp(other) < 0
3038 return self._cmp(other) < 0
3039
3039
3040 def __gt__(self, other):
3040 def __gt__(self, other):
3041 return self._cmp(other) > 0
3041 return self._cmp(other) > 0
3042
3042
3043 def __eq__(self, other):
3043 def __eq__(self, other):
3044 return self._cmp(other) == 0
3044 return self._cmp(other) == 0
3045
3045
3046 def __le__(self, other):
3046 def __le__(self, other):
3047 return self._cmp(other) <= 0
3047 return self._cmp(other) <= 0
3048
3048
3049 def __ge__(self, other):
3049 def __ge__(self, other):
3050 return self._cmp(other) >= 0
3050 return self._cmp(other) >= 0
3051
3051
3052 def __ne__(self, other):
3052 def __ne__(self, other):
3053 return self._cmp(other) != 0
3053 return self._cmp(other) != 0
3054
3054
3055
3055
3056 def sortclonebundleentries(ui, entries):
3056 def sortclonebundleentries(ui, entries):
3057 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3057 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3058 if not prefers:
3058 if not prefers:
3059 return list(entries)
3059 return list(entries)
3060
3060
3061 prefers = [p.split(b'=', 1) for p in prefers]
3061 prefers = [p.split(b'=', 1) for p in prefers]
3062
3062
3063 items = sorted(clonebundleentry(v, prefers) for v in entries)
3063 items = sorted(clonebundleentry(v, prefers) for v in entries)
3064 return [i.value for i in items]
3064 return [i.value for i in items]
3065
3065
3066
3066
3067 def trypullbundlefromurl(ui, repo, url):
3067 def trypullbundlefromurl(ui, repo, url):
3068 """Attempt to apply a bundle from a URL."""
3068 """Attempt to apply a bundle from a URL."""
3069 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3069 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3070 try:
3070 try:
3071 fh = urlmod.open(ui, url)
3071 fh = urlmod.open(ui, url)
3072 cg = readbundle(ui, fh, b'stream')
3072 cg = readbundle(ui, fh, b'stream')
3073
3073
3074 if isinstance(cg, streamclone.streamcloneapplier):
3074 if isinstance(cg, streamclone.streamcloneapplier):
3075 cg.apply(repo)
3075 cg.apply(repo)
3076 else:
3076 else:
3077 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3077 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3078 return True
3078 return True
3079 except urlerr.httperror as e:
3079 except urlerr.httperror as e:
3080 ui.warn(
3080 ui.warn(
3081 _(b'HTTP error fetching bundle: %s\n')
3081 _(b'HTTP error fetching bundle: %s\n')
3082 % stringutil.forcebytestr(e)
3082 % stringutil.forcebytestr(e)
3083 )
3083 )
3084 except urlerr.urlerror as e:
3084 except urlerr.urlerror as e:
3085 ui.warn(
3085 ui.warn(
3086 _(b'error fetching bundle: %s\n')
3086 _(b'error fetching bundle: %s\n')
3087 % stringutil.forcebytestr(e.reason)
3087 % stringutil.forcebytestr(e.reason)
3088 )
3088 )
3089
3089
3090 return False
3090 return False
General Comments 0
You need to be logged in to leave comments. Login now