##// END OF EJS Templates
exchange: check the `ui.clonebundleprefers` form while processing (issue6257)...
Matt Harbison -
r44763:87780592 default
parent child Browse files
Show More
@@ -1,3100 +1,3108 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from .thirdparty import attr
18 from .thirdparty import attr
19 from . import (
19 from . import (
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 changegroup,
22 changegroup,
23 discovery,
23 discovery,
24 error,
24 error,
25 exchangev2,
25 exchangev2,
26 lock as lockmod,
26 lock as lockmod,
27 logexchange,
27 logexchange,
28 narrowspec,
28 narrowspec,
29 obsolete,
29 obsolete,
30 obsutil,
30 obsutil,
31 phases,
31 phases,
32 pushkey,
32 pushkey,
33 pycompat,
33 pycompat,
34 scmutil,
34 scmutil,
35 sslutil,
35 sslutil,
36 streamclone,
36 streamclone,
37 url as urlmod,
37 url as urlmod,
38 util,
38 util,
39 wireprototypes,
39 wireprototypes,
40 )
40 )
41 from .interfaces import repository
41 from .interfaces import repository
42 from .utils import (
42 from .utils import (
43 hashutil,
43 hashutil,
44 stringutil,
44 stringutil,
45 )
45 )
46
46
47 urlerr = util.urlerr
47 urlerr = util.urlerr
48 urlreq = util.urlreq
48 urlreq = util.urlreq
49
49
50 _NARROWACL_SECTION = b'narrowacl'
50 _NARROWACL_SECTION = b'narrowacl'
51
51
52 # Maps bundle version human names to changegroup versions.
52 # Maps bundle version human names to changegroup versions.
53 _bundlespeccgversions = {
53 _bundlespeccgversions = {
54 b'v1': b'01',
54 b'v1': b'01',
55 b'v2': b'02',
55 b'v2': b'02',
56 b'packed1': b's1',
56 b'packed1': b's1',
57 b'bundle2': b'02', # legacy
57 b'bundle2': b'02', # legacy
58 }
58 }
59
59
60 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
61 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
62 b'v1': {
62 b'v1': {
63 b'changegroup': True,
63 b'changegroup': True,
64 b'cg.version': b'01',
64 b'cg.version': b'01',
65 b'obsolescence': False,
65 b'obsolescence': False,
66 b'phases': False,
66 b'phases': False,
67 b'tagsfnodescache': False,
67 b'tagsfnodescache': False,
68 b'revbranchcache': False,
68 b'revbranchcache': False,
69 },
69 },
70 b'v2': {
70 b'v2': {
71 b'changegroup': True,
71 b'changegroup': True,
72 b'cg.version': b'02',
72 b'cg.version': b'02',
73 b'obsolescence': False,
73 b'obsolescence': False,
74 b'phases': False,
74 b'phases': False,
75 b'tagsfnodescache': True,
75 b'tagsfnodescache': True,
76 b'revbranchcache': True,
76 b'revbranchcache': True,
77 },
77 },
78 b'packed1': {b'cg.version': b's1'},
78 b'packed1': {b'cg.version': b's1'},
79 }
79 }
80 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
80 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
81
81
82 _bundlespecvariants = {
82 _bundlespecvariants = {
83 b"streamv2": {
83 b"streamv2": {
84 b"changegroup": False,
84 b"changegroup": False,
85 b"streamv2": True,
85 b"streamv2": True,
86 b"tagsfnodescache": False,
86 b"tagsfnodescache": False,
87 b"revbranchcache": False,
87 b"revbranchcache": False,
88 }
88 }
89 }
89 }
90
90
91 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
91 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
92 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
92 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
93
93
94
94
95 @attr.s
95 @attr.s
96 class bundlespec(object):
96 class bundlespec(object):
97 compression = attr.ib()
97 compression = attr.ib()
98 wirecompression = attr.ib()
98 wirecompression = attr.ib()
99 version = attr.ib()
99 version = attr.ib()
100 wireversion = attr.ib()
100 wireversion = attr.ib()
101 params = attr.ib()
101 params = attr.ib()
102 contentopts = attr.ib()
102 contentopts = attr.ib()
103
103
104
104
105 def parsebundlespec(repo, spec, strict=True):
105 def parsebundlespec(repo, spec, strict=True):
106 """Parse a bundle string specification into parts.
106 """Parse a bundle string specification into parts.
107
107
108 Bundle specifications denote a well-defined bundle/exchange format.
108 Bundle specifications denote a well-defined bundle/exchange format.
109 The content of a given specification should not change over time in
109 The content of a given specification should not change over time in
110 order to ensure that bundles produced by a newer version of Mercurial are
110 order to ensure that bundles produced by a newer version of Mercurial are
111 readable from an older version.
111 readable from an older version.
112
112
113 The string currently has the form:
113 The string currently has the form:
114
114
115 <compression>-<type>[;<parameter0>[;<parameter1>]]
115 <compression>-<type>[;<parameter0>[;<parameter1>]]
116
116
117 Where <compression> is one of the supported compression formats
117 Where <compression> is one of the supported compression formats
118 and <type> is (currently) a version string. A ";" can follow the type and
118 and <type> is (currently) a version string. A ";" can follow the type and
119 all text afterwards is interpreted as URI encoded, ";" delimited key=value
119 all text afterwards is interpreted as URI encoded, ";" delimited key=value
120 pairs.
120 pairs.
121
121
122 If ``strict`` is True (the default) <compression> is required. Otherwise,
122 If ``strict`` is True (the default) <compression> is required. Otherwise,
123 it is optional.
123 it is optional.
124
124
125 Returns a bundlespec object of (compression, version, parameters).
125 Returns a bundlespec object of (compression, version, parameters).
126 Compression will be ``None`` if not in strict mode and a compression isn't
126 Compression will be ``None`` if not in strict mode and a compression isn't
127 defined.
127 defined.
128
128
129 An ``InvalidBundleSpecification`` is raised when the specification is
129 An ``InvalidBundleSpecification`` is raised when the specification is
130 not syntactically well formed.
130 not syntactically well formed.
131
131
132 An ``UnsupportedBundleSpecification`` is raised when the compression or
132 An ``UnsupportedBundleSpecification`` is raised when the compression or
133 bundle type/version is not recognized.
133 bundle type/version is not recognized.
134
134
135 Note: this function will likely eventually return a more complex data
135 Note: this function will likely eventually return a more complex data
136 structure, including bundle2 part information.
136 structure, including bundle2 part information.
137 """
137 """
138
138
139 def parseparams(s):
139 def parseparams(s):
140 if b';' not in s:
140 if b';' not in s:
141 return s, {}
141 return s, {}
142
142
143 params = {}
143 params = {}
144 version, paramstr = s.split(b';', 1)
144 version, paramstr = s.split(b';', 1)
145
145
146 for p in paramstr.split(b';'):
146 for p in paramstr.split(b';'):
147 if b'=' not in p:
147 if b'=' not in p:
148 raise error.InvalidBundleSpecification(
148 raise error.InvalidBundleSpecification(
149 _(
149 _(
150 b'invalid bundle specification: '
150 b'invalid bundle specification: '
151 b'missing "=" in parameter: %s'
151 b'missing "=" in parameter: %s'
152 )
152 )
153 % p
153 % p
154 )
154 )
155
155
156 key, value = p.split(b'=', 1)
156 key, value = p.split(b'=', 1)
157 key = urlreq.unquote(key)
157 key = urlreq.unquote(key)
158 value = urlreq.unquote(value)
158 value = urlreq.unquote(value)
159 params[key] = value
159 params[key] = value
160
160
161 return version, params
161 return version, params
162
162
163 if strict and b'-' not in spec:
163 if strict and b'-' not in spec:
164 raise error.InvalidBundleSpecification(
164 raise error.InvalidBundleSpecification(
165 _(
165 _(
166 b'invalid bundle specification; '
166 b'invalid bundle specification; '
167 b'must be prefixed with compression: %s'
167 b'must be prefixed with compression: %s'
168 )
168 )
169 % spec
169 % spec
170 )
170 )
171
171
172 if b'-' in spec:
172 if b'-' in spec:
173 compression, version = spec.split(b'-', 1)
173 compression, version = spec.split(b'-', 1)
174
174
175 if compression not in util.compengines.supportedbundlenames:
175 if compression not in util.compengines.supportedbundlenames:
176 raise error.UnsupportedBundleSpecification(
176 raise error.UnsupportedBundleSpecification(
177 _(b'%s compression is not supported') % compression
177 _(b'%s compression is not supported') % compression
178 )
178 )
179
179
180 version, params = parseparams(version)
180 version, params = parseparams(version)
181
181
182 if version not in _bundlespeccgversions:
182 if version not in _bundlespeccgversions:
183 raise error.UnsupportedBundleSpecification(
183 raise error.UnsupportedBundleSpecification(
184 _(b'%s is not a recognized bundle version') % version
184 _(b'%s is not a recognized bundle version') % version
185 )
185 )
186 else:
186 else:
187 # Value could be just the compression or just the version, in which
187 # Value could be just the compression or just the version, in which
188 # case some defaults are assumed (but only when not in strict mode).
188 # case some defaults are assumed (but only when not in strict mode).
189 assert not strict
189 assert not strict
190
190
191 spec, params = parseparams(spec)
191 spec, params = parseparams(spec)
192
192
193 if spec in util.compengines.supportedbundlenames:
193 if spec in util.compengines.supportedbundlenames:
194 compression = spec
194 compression = spec
195 version = b'v1'
195 version = b'v1'
196 # Generaldelta repos require v2.
196 # Generaldelta repos require v2.
197 if b'generaldelta' in repo.requirements:
197 if b'generaldelta' in repo.requirements:
198 version = b'v2'
198 version = b'v2'
199 # Modern compression engines require v2.
199 # Modern compression engines require v2.
200 if compression not in _bundlespecv1compengines:
200 if compression not in _bundlespecv1compengines:
201 version = b'v2'
201 version = b'v2'
202 elif spec in _bundlespeccgversions:
202 elif spec in _bundlespeccgversions:
203 if spec == b'packed1':
203 if spec == b'packed1':
204 compression = b'none'
204 compression = b'none'
205 else:
205 else:
206 compression = b'bzip2'
206 compression = b'bzip2'
207 version = spec
207 version = spec
208 else:
208 else:
209 raise error.UnsupportedBundleSpecification(
209 raise error.UnsupportedBundleSpecification(
210 _(b'%s is not a recognized bundle specification') % spec
210 _(b'%s is not a recognized bundle specification') % spec
211 )
211 )
212
212
213 # Bundle version 1 only supports a known set of compression engines.
213 # Bundle version 1 only supports a known set of compression engines.
214 if version == b'v1' and compression not in _bundlespecv1compengines:
214 if version == b'v1' and compression not in _bundlespecv1compengines:
215 raise error.UnsupportedBundleSpecification(
215 raise error.UnsupportedBundleSpecification(
216 _(b'compression engine %s is not supported on v1 bundles')
216 _(b'compression engine %s is not supported on v1 bundles')
217 % compression
217 % compression
218 )
218 )
219
219
220 # The specification for packed1 can optionally declare the data formats
220 # The specification for packed1 can optionally declare the data formats
221 # required to apply it. If we see this metadata, compare against what the
221 # required to apply it. If we see this metadata, compare against what the
222 # repo supports and error if the bundle isn't compatible.
222 # repo supports and error if the bundle isn't compatible.
223 if version == b'packed1' and b'requirements' in params:
223 if version == b'packed1' and b'requirements' in params:
224 requirements = set(params[b'requirements'].split(b','))
224 requirements = set(params[b'requirements'].split(b','))
225 missingreqs = requirements - repo.supportedformats
225 missingreqs = requirements - repo.supportedformats
226 if missingreqs:
226 if missingreqs:
227 raise error.UnsupportedBundleSpecification(
227 raise error.UnsupportedBundleSpecification(
228 _(b'missing support for repository features: %s')
228 _(b'missing support for repository features: %s')
229 % b', '.join(sorted(missingreqs))
229 % b', '.join(sorted(missingreqs))
230 )
230 )
231
231
232 # Compute contentopts based on the version
232 # Compute contentopts based on the version
233 contentopts = _bundlespeccontentopts.get(version, {}).copy()
233 contentopts = _bundlespeccontentopts.get(version, {}).copy()
234
234
235 # Process the variants
235 # Process the variants
236 if b"stream" in params and params[b"stream"] == b"v2":
236 if b"stream" in params and params[b"stream"] == b"v2":
237 variant = _bundlespecvariants[b"streamv2"]
237 variant = _bundlespecvariants[b"streamv2"]
238 contentopts.update(variant)
238 contentopts.update(variant)
239
239
240 engine = util.compengines.forbundlename(compression)
240 engine = util.compengines.forbundlename(compression)
241 compression, wirecompression = engine.bundletype()
241 compression, wirecompression = engine.bundletype()
242 wireversion = _bundlespeccgversions[version]
242 wireversion = _bundlespeccgversions[version]
243
243
244 return bundlespec(
244 return bundlespec(
245 compression, wirecompression, version, wireversion, params, contentopts
245 compression, wirecompression, version, wireversion, params, contentopts
246 )
246 )
247
247
248
248
249 def readbundle(ui, fh, fname, vfs=None):
249 def readbundle(ui, fh, fname, vfs=None):
250 header = changegroup.readexactly(fh, 4)
250 header = changegroup.readexactly(fh, 4)
251
251
252 alg = None
252 alg = None
253 if not fname:
253 if not fname:
254 fname = b"stream"
254 fname = b"stream"
255 if not header.startswith(b'HG') and header.startswith(b'\0'):
255 if not header.startswith(b'HG') and header.startswith(b'\0'):
256 fh = changegroup.headerlessfixup(fh, header)
256 fh = changegroup.headerlessfixup(fh, header)
257 header = b"HG10"
257 header = b"HG10"
258 alg = b'UN'
258 alg = b'UN'
259 elif vfs:
259 elif vfs:
260 fname = vfs.join(fname)
260 fname = vfs.join(fname)
261
261
262 magic, version = header[0:2], header[2:4]
262 magic, version = header[0:2], header[2:4]
263
263
264 if magic != b'HG':
264 if magic != b'HG':
265 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
265 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
266 if version == b'10':
266 if version == b'10':
267 if alg is None:
267 if alg is None:
268 alg = changegroup.readexactly(fh, 2)
268 alg = changegroup.readexactly(fh, 2)
269 return changegroup.cg1unpacker(fh, alg)
269 return changegroup.cg1unpacker(fh, alg)
270 elif version.startswith(b'2'):
270 elif version.startswith(b'2'):
271 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
271 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
272 elif version == b'S1':
272 elif version == b'S1':
273 return streamclone.streamcloneapplier(fh)
273 return streamclone.streamcloneapplier(fh)
274 else:
274 else:
275 raise error.Abort(
275 raise error.Abort(
276 _(b'%s: unknown bundle version %s') % (fname, version)
276 _(b'%s: unknown bundle version %s') % (fname, version)
277 )
277 )
278
278
279
279
280 def getbundlespec(ui, fh):
280 def getbundlespec(ui, fh):
281 """Infer the bundlespec from a bundle file handle.
281 """Infer the bundlespec from a bundle file handle.
282
282
283 The input file handle is seeked and the original seek position is not
283 The input file handle is seeked and the original seek position is not
284 restored.
284 restored.
285 """
285 """
286
286
287 def speccompression(alg):
287 def speccompression(alg):
288 try:
288 try:
289 return util.compengines.forbundletype(alg).bundletype()[0]
289 return util.compengines.forbundletype(alg).bundletype()[0]
290 except KeyError:
290 except KeyError:
291 return None
291 return None
292
292
293 b = readbundle(ui, fh, None)
293 b = readbundle(ui, fh, None)
294 if isinstance(b, changegroup.cg1unpacker):
294 if isinstance(b, changegroup.cg1unpacker):
295 alg = b._type
295 alg = b._type
296 if alg == b'_truncatedBZ':
296 if alg == b'_truncatedBZ':
297 alg = b'BZ'
297 alg = b'BZ'
298 comp = speccompression(alg)
298 comp = speccompression(alg)
299 if not comp:
299 if not comp:
300 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
300 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
301 return b'%s-v1' % comp
301 return b'%s-v1' % comp
302 elif isinstance(b, bundle2.unbundle20):
302 elif isinstance(b, bundle2.unbundle20):
303 if b'Compression' in b.params:
303 if b'Compression' in b.params:
304 comp = speccompression(b.params[b'Compression'])
304 comp = speccompression(b.params[b'Compression'])
305 if not comp:
305 if not comp:
306 raise error.Abort(
306 raise error.Abort(
307 _(b'unknown compression algorithm: %s') % comp
307 _(b'unknown compression algorithm: %s') % comp
308 )
308 )
309 else:
309 else:
310 comp = b'none'
310 comp = b'none'
311
311
312 version = None
312 version = None
313 for part in b.iterparts():
313 for part in b.iterparts():
314 if part.type == b'changegroup':
314 if part.type == b'changegroup':
315 version = part.params[b'version']
315 version = part.params[b'version']
316 if version in (b'01', b'02'):
316 if version in (b'01', b'02'):
317 version = b'v2'
317 version = b'v2'
318 else:
318 else:
319 raise error.Abort(
319 raise error.Abort(
320 _(
320 _(
321 b'changegroup version %s does not have '
321 b'changegroup version %s does not have '
322 b'a known bundlespec'
322 b'a known bundlespec'
323 )
323 )
324 % version,
324 % version,
325 hint=_(b'try upgrading your Mercurial client'),
325 hint=_(b'try upgrading your Mercurial client'),
326 )
326 )
327 elif part.type == b'stream2' and version is None:
327 elif part.type == b'stream2' and version is None:
328 # A stream2 part requires to be part of a v2 bundle
328 # A stream2 part requires to be part of a v2 bundle
329 requirements = urlreq.unquote(part.params[b'requirements'])
329 requirements = urlreq.unquote(part.params[b'requirements'])
330 splitted = requirements.split()
330 splitted = requirements.split()
331 params = bundle2._formatrequirementsparams(splitted)
331 params = bundle2._formatrequirementsparams(splitted)
332 return b'none-v2;stream=v2;%s' % params
332 return b'none-v2;stream=v2;%s' % params
333
333
334 if not version:
334 if not version:
335 raise error.Abort(
335 raise error.Abort(
336 _(b'could not identify changegroup version in bundle')
336 _(b'could not identify changegroup version in bundle')
337 )
337 )
338
338
339 return b'%s-%s' % (comp, version)
339 return b'%s-%s' % (comp, version)
340 elif isinstance(b, streamclone.streamcloneapplier):
340 elif isinstance(b, streamclone.streamcloneapplier):
341 requirements = streamclone.readbundle1header(fh)[2]
341 requirements = streamclone.readbundle1header(fh)[2]
342 formatted = bundle2._formatrequirementsparams(requirements)
342 formatted = bundle2._formatrequirementsparams(requirements)
343 return b'none-packed1;%s' % formatted
343 return b'none-packed1;%s' % formatted
344 else:
344 else:
345 raise error.Abort(_(b'unknown bundle type: %s') % b)
345 raise error.Abort(_(b'unknown bundle type: %s') % b)
346
346
347
347
348 def _computeoutgoing(repo, heads, common):
348 def _computeoutgoing(repo, heads, common):
349 """Computes which revs are outgoing given a set of common
349 """Computes which revs are outgoing given a set of common
350 and a set of heads.
350 and a set of heads.
351
351
352 This is a separate function so extensions can have access to
352 This is a separate function so extensions can have access to
353 the logic.
353 the logic.
354
354
355 Returns a discovery.outgoing object.
355 Returns a discovery.outgoing object.
356 """
356 """
357 cl = repo.changelog
357 cl = repo.changelog
358 if common:
358 if common:
359 hasnode = cl.hasnode
359 hasnode = cl.hasnode
360 common = [n for n in common if hasnode(n)]
360 common = [n for n in common if hasnode(n)]
361 else:
361 else:
362 common = [nullid]
362 common = [nullid]
363 if not heads:
363 if not heads:
364 heads = cl.heads()
364 heads = cl.heads()
365 return discovery.outgoing(repo, common, heads)
365 return discovery.outgoing(repo, common, heads)
366
366
367
367
368 def _checkpublish(pushop):
368 def _checkpublish(pushop):
369 repo = pushop.repo
369 repo = pushop.repo
370 ui = repo.ui
370 ui = repo.ui
371 behavior = ui.config(b'experimental', b'auto-publish')
371 behavior = ui.config(b'experimental', b'auto-publish')
372 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
372 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
373 return
373 return
374 remotephases = listkeys(pushop.remote, b'phases')
374 remotephases = listkeys(pushop.remote, b'phases')
375 if not remotephases.get(b'publishing', False):
375 if not remotephases.get(b'publishing', False):
376 return
376 return
377
377
378 if pushop.revs is None:
378 if pushop.revs is None:
379 published = repo.filtered(b'served').revs(b'not public()')
379 published = repo.filtered(b'served').revs(b'not public()')
380 else:
380 else:
381 published = repo.revs(b'::%ln - public()', pushop.revs)
381 published = repo.revs(b'::%ln - public()', pushop.revs)
382 if published:
382 if published:
383 if behavior == b'warn':
383 if behavior == b'warn':
384 ui.warn(
384 ui.warn(
385 _(b'%i changesets about to be published\n') % len(published)
385 _(b'%i changesets about to be published\n') % len(published)
386 )
386 )
387 elif behavior == b'confirm':
387 elif behavior == b'confirm':
388 if ui.promptchoice(
388 if ui.promptchoice(
389 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
389 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
390 % len(published)
390 % len(published)
391 ):
391 ):
392 raise error.Abort(_(b'user quit'))
392 raise error.Abort(_(b'user quit'))
393 elif behavior == b'abort':
393 elif behavior == b'abort':
394 msg = _(b'push would publish %i changesets') % len(published)
394 msg = _(b'push would publish %i changesets') % len(published)
395 hint = _(
395 hint = _(
396 b"use --publish or adjust 'experimental.auto-publish'"
396 b"use --publish or adjust 'experimental.auto-publish'"
397 b" config"
397 b" config"
398 )
398 )
399 raise error.Abort(msg, hint=hint)
399 raise error.Abort(msg, hint=hint)
400
400
401
401
402 def _forcebundle1(op):
402 def _forcebundle1(op):
403 """return true if a pull/push must use bundle1
403 """return true if a pull/push must use bundle1
404
404
405 This function is used to allow testing of the older bundle version"""
405 This function is used to allow testing of the older bundle version"""
406 ui = op.repo.ui
406 ui = op.repo.ui
407 # The goal is this config is to allow developer to choose the bundle
407 # The goal is this config is to allow developer to choose the bundle
408 # version used during exchanged. This is especially handy during test.
408 # version used during exchanged. This is especially handy during test.
409 # Value is a list of bundle version to be picked from, highest version
409 # Value is a list of bundle version to be picked from, highest version
410 # should be used.
410 # should be used.
411 #
411 #
412 # developer config: devel.legacy.exchange
412 # developer config: devel.legacy.exchange
413 exchange = ui.configlist(b'devel', b'legacy.exchange')
413 exchange = ui.configlist(b'devel', b'legacy.exchange')
414 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
414 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
415 return forcebundle1 or not op.remote.capable(b'bundle2')
415 return forcebundle1 or not op.remote.capable(b'bundle2')
416
416
417
417
418 class pushoperation(object):
418 class pushoperation(object):
419 """A object that represent a single push operation
419 """A object that represent a single push operation
420
420
421 Its purpose is to carry push related state and very common operations.
421 Its purpose is to carry push related state and very common operations.
422
422
423 A new pushoperation should be created at the beginning of each push and
423 A new pushoperation should be created at the beginning of each push and
424 discarded afterward.
424 discarded afterward.
425 """
425 """
426
426
427 def __init__(
427 def __init__(
428 self,
428 self,
429 repo,
429 repo,
430 remote,
430 remote,
431 force=False,
431 force=False,
432 revs=None,
432 revs=None,
433 newbranch=False,
433 newbranch=False,
434 bookmarks=(),
434 bookmarks=(),
435 publish=False,
435 publish=False,
436 pushvars=None,
436 pushvars=None,
437 ):
437 ):
438 # repo we push from
438 # repo we push from
439 self.repo = repo
439 self.repo = repo
440 self.ui = repo.ui
440 self.ui = repo.ui
441 # repo we push to
441 # repo we push to
442 self.remote = remote
442 self.remote = remote
443 # force option provided
443 # force option provided
444 self.force = force
444 self.force = force
445 # revs to be pushed (None is "all")
445 # revs to be pushed (None is "all")
446 self.revs = revs
446 self.revs = revs
447 # bookmark explicitly pushed
447 # bookmark explicitly pushed
448 self.bookmarks = bookmarks
448 self.bookmarks = bookmarks
449 # allow push of new branch
449 # allow push of new branch
450 self.newbranch = newbranch
450 self.newbranch = newbranch
451 # step already performed
451 # step already performed
452 # (used to check what steps have been already performed through bundle2)
452 # (used to check what steps have been already performed through bundle2)
453 self.stepsdone = set()
453 self.stepsdone = set()
454 # Integer version of the changegroup push result
454 # Integer version of the changegroup push result
455 # - None means nothing to push
455 # - None means nothing to push
456 # - 0 means HTTP error
456 # - 0 means HTTP error
457 # - 1 means we pushed and remote head count is unchanged *or*
457 # - 1 means we pushed and remote head count is unchanged *or*
458 # we have outgoing changesets but refused to push
458 # we have outgoing changesets but refused to push
459 # - other values as described by addchangegroup()
459 # - other values as described by addchangegroup()
460 self.cgresult = None
460 self.cgresult = None
461 # Boolean value for the bookmark push
461 # Boolean value for the bookmark push
462 self.bkresult = None
462 self.bkresult = None
463 # discover.outgoing object (contains common and outgoing data)
463 # discover.outgoing object (contains common and outgoing data)
464 self.outgoing = None
464 self.outgoing = None
465 # all remote topological heads before the push
465 # all remote topological heads before the push
466 self.remoteheads = None
466 self.remoteheads = None
467 # Details of the remote branch pre and post push
467 # Details of the remote branch pre and post push
468 #
468 #
469 # mapping: {'branch': ([remoteheads],
469 # mapping: {'branch': ([remoteheads],
470 # [newheads],
470 # [newheads],
471 # [unsyncedheads],
471 # [unsyncedheads],
472 # [discardedheads])}
472 # [discardedheads])}
473 # - branch: the branch name
473 # - branch: the branch name
474 # - remoteheads: the list of remote heads known locally
474 # - remoteheads: the list of remote heads known locally
475 # None if the branch is new
475 # None if the branch is new
476 # - newheads: the new remote heads (known locally) with outgoing pushed
476 # - newheads: the new remote heads (known locally) with outgoing pushed
477 # - unsyncedheads: the list of remote heads unknown locally.
477 # - unsyncedheads: the list of remote heads unknown locally.
478 # - discardedheads: the list of remote heads made obsolete by the push
478 # - discardedheads: the list of remote heads made obsolete by the push
479 self.pushbranchmap = None
479 self.pushbranchmap = None
480 # testable as a boolean indicating if any nodes are missing locally.
480 # testable as a boolean indicating if any nodes are missing locally.
481 self.incoming = None
481 self.incoming = None
482 # summary of the remote phase situation
482 # summary of the remote phase situation
483 self.remotephases = None
483 self.remotephases = None
484 # phases changes that must be pushed along side the changesets
484 # phases changes that must be pushed along side the changesets
485 self.outdatedphases = None
485 self.outdatedphases = None
486 # phases changes that must be pushed if changeset push fails
486 # phases changes that must be pushed if changeset push fails
487 self.fallbackoutdatedphases = None
487 self.fallbackoutdatedphases = None
488 # outgoing obsmarkers
488 # outgoing obsmarkers
489 self.outobsmarkers = set()
489 self.outobsmarkers = set()
490 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
490 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
491 self.outbookmarks = []
491 self.outbookmarks = []
492 # transaction manager
492 # transaction manager
493 self.trmanager = None
493 self.trmanager = None
494 # map { pushkey partid -> callback handling failure}
494 # map { pushkey partid -> callback handling failure}
495 # used to handle exception from mandatory pushkey part failure
495 # used to handle exception from mandatory pushkey part failure
496 self.pkfailcb = {}
496 self.pkfailcb = {}
497 # an iterable of pushvars or None
497 # an iterable of pushvars or None
498 self.pushvars = pushvars
498 self.pushvars = pushvars
499 # publish pushed changesets
499 # publish pushed changesets
500 self.publish = publish
500 self.publish = publish
501
501
502 @util.propertycache
502 @util.propertycache
503 def futureheads(self):
503 def futureheads(self):
504 """future remote heads if the changeset push succeeds"""
504 """future remote heads if the changeset push succeeds"""
505 return self.outgoing.missingheads
505 return self.outgoing.missingheads
506
506
507 @util.propertycache
507 @util.propertycache
508 def fallbackheads(self):
508 def fallbackheads(self):
509 """future remote heads if the changeset push fails"""
509 """future remote heads if the changeset push fails"""
510 if self.revs is None:
510 if self.revs is None:
511 # not target to push, all common are relevant
511 # not target to push, all common are relevant
512 return self.outgoing.commonheads
512 return self.outgoing.commonheads
513 unfi = self.repo.unfiltered()
513 unfi = self.repo.unfiltered()
514 # I want cheads = heads(::missingheads and ::commonheads)
514 # I want cheads = heads(::missingheads and ::commonheads)
515 # (missingheads is revs with secret changeset filtered out)
515 # (missingheads is revs with secret changeset filtered out)
516 #
516 #
517 # This can be expressed as:
517 # This can be expressed as:
518 # cheads = ( (missingheads and ::commonheads)
518 # cheads = ( (missingheads and ::commonheads)
519 # + (commonheads and ::missingheads))"
519 # + (commonheads and ::missingheads))"
520 # )
520 # )
521 #
521 #
522 # while trying to push we already computed the following:
522 # while trying to push we already computed the following:
523 # common = (::commonheads)
523 # common = (::commonheads)
524 # missing = ((commonheads::missingheads) - commonheads)
524 # missing = ((commonheads::missingheads) - commonheads)
525 #
525 #
526 # We can pick:
526 # We can pick:
527 # * missingheads part of common (::commonheads)
527 # * missingheads part of common (::commonheads)
528 common = self.outgoing.common
528 common = self.outgoing.common
529 rev = self.repo.changelog.index.rev
529 rev = self.repo.changelog.index.rev
530 cheads = [node for node in self.revs if rev(node) in common]
530 cheads = [node for node in self.revs if rev(node) in common]
531 # and
531 # and
532 # * commonheads parents on missing
532 # * commonheads parents on missing
533 revset = unfi.set(
533 revset = unfi.set(
534 b'%ln and parents(roots(%ln))',
534 b'%ln and parents(roots(%ln))',
535 self.outgoing.commonheads,
535 self.outgoing.commonheads,
536 self.outgoing.missing,
536 self.outgoing.missing,
537 )
537 )
538 cheads.extend(c.node() for c in revset)
538 cheads.extend(c.node() for c in revset)
539 return cheads
539 return cheads
540
540
541 @property
541 @property
542 def commonheads(self):
542 def commonheads(self):
543 """set of all common heads after changeset bundle push"""
543 """set of all common heads after changeset bundle push"""
544 if self.cgresult:
544 if self.cgresult:
545 return self.futureheads
545 return self.futureheads
546 else:
546 else:
547 return self.fallbackheads
547 return self.fallbackheads
548
548
549
549
550 # mapping of message used when pushing bookmark
550 # mapping of message used when pushing bookmark
551 bookmsgmap = {
551 bookmsgmap = {
552 b'update': (
552 b'update': (
553 _(b"updating bookmark %s\n"),
553 _(b"updating bookmark %s\n"),
554 _(b'updating bookmark %s failed!\n'),
554 _(b'updating bookmark %s failed!\n'),
555 ),
555 ),
556 b'export': (
556 b'export': (
557 _(b"exporting bookmark %s\n"),
557 _(b"exporting bookmark %s\n"),
558 _(b'exporting bookmark %s failed!\n'),
558 _(b'exporting bookmark %s failed!\n'),
559 ),
559 ),
560 b'delete': (
560 b'delete': (
561 _(b"deleting remote bookmark %s\n"),
561 _(b"deleting remote bookmark %s\n"),
562 _(b'deleting remote bookmark %s failed!\n'),
562 _(b'deleting remote bookmark %s failed!\n'),
563 ),
563 ),
564 }
564 }
565
565
566
566
567 def push(
567 def push(
568 repo,
568 repo,
569 remote,
569 remote,
570 force=False,
570 force=False,
571 revs=None,
571 revs=None,
572 newbranch=False,
572 newbranch=False,
573 bookmarks=(),
573 bookmarks=(),
574 publish=False,
574 publish=False,
575 opargs=None,
575 opargs=None,
576 ):
576 ):
577 '''Push outgoing changesets (limited by revs) from a local
577 '''Push outgoing changesets (limited by revs) from a local
578 repository to remote. Return an integer:
578 repository to remote. Return an integer:
579 - None means nothing to push
579 - None means nothing to push
580 - 0 means HTTP error
580 - 0 means HTTP error
581 - 1 means we pushed and remote head count is unchanged *or*
581 - 1 means we pushed and remote head count is unchanged *or*
582 we have outgoing changesets but refused to push
582 we have outgoing changesets but refused to push
583 - other values as described by addchangegroup()
583 - other values as described by addchangegroup()
584 '''
584 '''
585 if opargs is None:
585 if opargs is None:
586 opargs = {}
586 opargs = {}
587 pushop = pushoperation(
587 pushop = pushoperation(
588 repo,
588 repo,
589 remote,
589 remote,
590 force,
590 force,
591 revs,
591 revs,
592 newbranch,
592 newbranch,
593 bookmarks,
593 bookmarks,
594 publish,
594 publish,
595 **pycompat.strkwargs(opargs)
595 **pycompat.strkwargs(opargs)
596 )
596 )
597 if pushop.remote.local():
597 if pushop.remote.local():
598 missing = (
598 missing = (
599 set(pushop.repo.requirements) - pushop.remote.local().supported
599 set(pushop.repo.requirements) - pushop.remote.local().supported
600 )
600 )
601 if missing:
601 if missing:
602 msg = _(
602 msg = _(
603 b"required features are not"
603 b"required features are not"
604 b" supported in the destination:"
604 b" supported in the destination:"
605 b" %s"
605 b" %s"
606 ) % (b', '.join(sorted(missing)))
606 ) % (b', '.join(sorted(missing)))
607 raise error.Abort(msg)
607 raise error.Abort(msg)
608
608
609 if not pushop.remote.canpush():
609 if not pushop.remote.canpush():
610 raise error.Abort(_(b"destination does not support push"))
610 raise error.Abort(_(b"destination does not support push"))
611
611
612 if not pushop.remote.capable(b'unbundle'):
612 if not pushop.remote.capable(b'unbundle'):
613 raise error.Abort(
613 raise error.Abort(
614 _(
614 _(
615 b'cannot push: destination does not support the '
615 b'cannot push: destination does not support the '
616 b'unbundle wire protocol command'
616 b'unbundle wire protocol command'
617 )
617 )
618 )
618 )
619
619
620 # get lock as we might write phase data
620 # get lock as we might write phase data
621 wlock = lock = None
621 wlock = lock = None
622 try:
622 try:
623 # bundle2 push may receive a reply bundle touching bookmarks
623 # bundle2 push may receive a reply bundle touching bookmarks
624 # requiring the wlock. Take it now to ensure proper ordering.
624 # requiring the wlock. Take it now to ensure proper ordering.
625 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
625 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
626 if (
626 if (
627 (not _forcebundle1(pushop))
627 (not _forcebundle1(pushop))
628 and maypushback
628 and maypushback
629 and not bookmod.bookmarksinstore(repo)
629 and not bookmod.bookmarksinstore(repo)
630 ):
630 ):
631 wlock = pushop.repo.wlock()
631 wlock = pushop.repo.wlock()
632 lock = pushop.repo.lock()
632 lock = pushop.repo.lock()
633 pushop.trmanager = transactionmanager(
633 pushop.trmanager = transactionmanager(
634 pushop.repo, b'push-response', pushop.remote.url()
634 pushop.repo, b'push-response', pushop.remote.url()
635 )
635 )
636 except error.LockUnavailable as err:
636 except error.LockUnavailable as err:
637 # source repo cannot be locked.
637 # source repo cannot be locked.
638 # We do not abort the push, but just disable the local phase
638 # We do not abort the push, but just disable the local phase
639 # synchronisation.
639 # synchronisation.
640 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
640 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
641 err
641 err
642 )
642 )
643 pushop.ui.debug(msg)
643 pushop.ui.debug(msg)
644
644
645 with wlock or util.nullcontextmanager():
645 with wlock or util.nullcontextmanager():
646 with lock or util.nullcontextmanager():
646 with lock or util.nullcontextmanager():
647 with pushop.trmanager or util.nullcontextmanager():
647 with pushop.trmanager or util.nullcontextmanager():
648 pushop.repo.checkpush(pushop)
648 pushop.repo.checkpush(pushop)
649 _checkpublish(pushop)
649 _checkpublish(pushop)
650 _pushdiscovery(pushop)
650 _pushdiscovery(pushop)
651 if not pushop.force:
651 if not pushop.force:
652 _checksubrepostate(pushop)
652 _checksubrepostate(pushop)
653 if not _forcebundle1(pushop):
653 if not _forcebundle1(pushop):
654 _pushbundle2(pushop)
654 _pushbundle2(pushop)
655 _pushchangeset(pushop)
655 _pushchangeset(pushop)
656 _pushsyncphase(pushop)
656 _pushsyncphase(pushop)
657 _pushobsolete(pushop)
657 _pushobsolete(pushop)
658 _pushbookmark(pushop)
658 _pushbookmark(pushop)
659
659
660 if repo.ui.configbool(b'experimental', b'remotenames'):
660 if repo.ui.configbool(b'experimental', b'remotenames'):
661 logexchange.pullremotenames(repo, remote)
661 logexchange.pullremotenames(repo, remote)
662
662
663 return pushop
663 return pushop
664
664
665
665
666 # list of steps to perform discovery before push
666 # list of steps to perform discovery before push
667 pushdiscoveryorder = []
667 pushdiscoveryorder = []
668
668
669 # Mapping between step name and function
669 # Mapping between step name and function
670 #
670 #
671 # This exists to help extensions wrap steps if necessary
671 # This exists to help extensions wrap steps if necessary
672 pushdiscoverymapping = {}
672 pushdiscoverymapping = {}
673
673
674
674
675 def pushdiscovery(stepname):
675 def pushdiscovery(stepname):
676 """decorator for function performing discovery before push
676 """decorator for function performing discovery before push
677
677
678 The function is added to the step -> function mapping and appended to the
678 The function is added to the step -> function mapping and appended to the
679 list of steps. Beware that decorated function will be added in order (this
679 list of steps. Beware that decorated function will be added in order (this
680 may matter).
680 may matter).
681
681
682 You can only use this decorator for a new step, if you want to wrap a step
682 You can only use this decorator for a new step, if you want to wrap a step
683 from an extension, change the pushdiscovery dictionary directly."""
683 from an extension, change the pushdiscovery dictionary directly."""
684
684
685 def dec(func):
685 def dec(func):
686 assert stepname not in pushdiscoverymapping
686 assert stepname not in pushdiscoverymapping
687 pushdiscoverymapping[stepname] = func
687 pushdiscoverymapping[stepname] = func
688 pushdiscoveryorder.append(stepname)
688 pushdiscoveryorder.append(stepname)
689 return func
689 return func
690
690
691 return dec
691 return dec
692
692
693
693
694 def _pushdiscovery(pushop):
694 def _pushdiscovery(pushop):
695 """Run all discovery steps"""
695 """Run all discovery steps"""
696 for stepname in pushdiscoveryorder:
696 for stepname in pushdiscoveryorder:
697 step = pushdiscoverymapping[stepname]
697 step = pushdiscoverymapping[stepname]
698 step(pushop)
698 step(pushop)
699
699
700
700
701 def _checksubrepostate(pushop):
701 def _checksubrepostate(pushop):
702 """Ensure all outgoing referenced subrepo revisions are present locally"""
702 """Ensure all outgoing referenced subrepo revisions are present locally"""
703 for n in pushop.outgoing.missing:
703 for n in pushop.outgoing.missing:
704 ctx = pushop.repo[n]
704 ctx = pushop.repo[n]
705
705
706 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
706 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
707 for subpath in sorted(ctx.substate):
707 for subpath in sorted(ctx.substate):
708 sub = ctx.sub(subpath)
708 sub = ctx.sub(subpath)
709 sub.verify(onpush=True)
709 sub.verify(onpush=True)
710
710
711
711
712 @pushdiscovery(b'changeset')
712 @pushdiscovery(b'changeset')
713 def _pushdiscoverychangeset(pushop):
713 def _pushdiscoverychangeset(pushop):
714 """discover the changeset that need to be pushed"""
714 """discover the changeset that need to be pushed"""
715 fci = discovery.findcommonincoming
715 fci = discovery.findcommonincoming
716 if pushop.revs:
716 if pushop.revs:
717 commoninc = fci(
717 commoninc = fci(
718 pushop.repo,
718 pushop.repo,
719 pushop.remote,
719 pushop.remote,
720 force=pushop.force,
720 force=pushop.force,
721 ancestorsof=pushop.revs,
721 ancestorsof=pushop.revs,
722 )
722 )
723 else:
723 else:
724 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
724 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
725 common, inc, remoteheads = commoninc
725 common, inc, remoteheads = commoninc
726 fco = discovery.findcommonoutgoing
726 fco = discovery.findcommonoutgoing
727 outgoing = fco(
727 outgoing = fco(
728 pushop.repo,
728 pushop.repo,
729 pushop.remote,
729 pushop.remote,
730 onlyheads=pushop.revs,
730 onlyheads=pushop.revs,
731 commoninc=commoninc,
731 commoninc=commoninc,
732 force=pushop.force,
732 force=pushop.force,
733 )
733 )
734 pushop.outgoing = outgoing
734 pushop.outgoing = outgoing
735 pushop.remoteheads = remoteheads
735 pushop.remoteheads = remoteheads
736 pushop.incoming = inc
736 pushop.incoming = inc
737
737
738
738
739 @pushdiscovery(b'phase')
739 @pushdiscovery(b'phase')
740 def _pushdiscoveryphase(pushop):
740 def _pushdiscoveryphase(pushop):
741 """discover the phase that needs to be pushed
741 """discover the phase that needs to be pushed
742
742
743 (computed for both success and failure case for changesets push)"""
743 (computed for both success and failure case for changesets push)"""
744 outgoing = pushop.outgoing
744 outgoing = pushop.outgoing
745 unfi = pushop.repo.unfiltered()
745 unfi = pushop.repo.unfiltered()
746 remotephases = listkeys(pushop.remote, b'phases')
746 remotephases = listkeys(pushop.remote, b'phases')
747
747
748 if (
748 if (
749 pushop.ui.configbool(b'ui', b'_usedassubrepo')
749 pushop.ui.configbool(b'ui', b'_usedassubrepo')
750 and remotephases # server supports phases
750 and remotephases # server supports phases
751 and not pushop.outgoing.missing # no changesets to be pushed
751 and not pushop.outgoing.missing # no changesets to be pushed
752 and remotephases.get(b'publishing', False)
752 and remotephases.get(b'publishing', False)
753 ):
753 ):
754 # When:
754 # When:
755 # - this is a subrepo push
755 # - this is a subrepo push
756 # - and remote support phase
756 # - and remote support phase
757 # - and no changeset are to be pushed
757 # - and no changeset are to be pushed
758 # - and remote is publishing
758 # - and remote is publishing
759 # We may be in issue 3781 case!
759 # We may be in issue 3781 case!
760 # We drop the possible phase synchronisation done by
760 # We drop the possible phase synchronisation done by
761 # courtesy to publish changesets possibly locally draft
761 # courtesy to publish changesets possibly locally draft
762 # on the remote.
762 # on the remote.
763 pushop.outdatedphases = []
763 pushop.outdatedphases = []
764 pushop.fallbackoutdatedphases = []
764 pushop.fallbackoutdatedphases = []
765 return
765 return
766
766
767 pushop.remotephases = phases.remotephasessummary(
767 pushop.remotephases = phases.remotephasessummary(
768 pushop.repo, pushop.fallbackheads, remotephases
768 pushop.repo, pushop.fallbackheads, remotephases
769 )
769 )
770 droots = pushop.remotephases.draftroots
770 droots = pushop.remotephases.draftroots
771
771
772 extracond = b''
772 extracond = b''
773 if not pushop.remotephases.publishing:
773 if not pushop.remotephases.publishing:
774 extracond = b' and public()'
774 extracond = b' and public()'
775 revset = b'heads((%%ln::%%ln) %s)' % extracond
775 revset = b'heads((%%ln::%%ln) %s)' % extracond
776 # Get the list of all revs draft on remote by public here.
776 # Get the list of all revs draft on remote by public here.
777 # XXX Beware that revset break if droots is not strictly
777 # XXX Beware that revset break if droots is not strictly
778 # XXX root we may want to ensure it is but it is costly
778 # XXX root we may want to ensure it is but it is costly
779 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
779 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
780 if not pushop.remotephases.publishing and pushop.publish:
780 if not pushop.remotephases.publishing and pushop.publish:
781 future = list(
781 future = list(
782 unfi.set(
782 unfi.set(
783 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
783 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
784 )
784 )
785 )
785 )
786 elif not outgoing.missing:
786 elif not outgoing.missing:
787 future = fallback
787 future = fallback
788 else:
788 else:
789 # adds changeset we are going to push as draft
789 # adds changeset we are going to push as draft
790 #
790 #
791 # should not be necessary for publishing server, but because of an
791 # should not be necessary for publishing server, but because of an
792 # issue fixed in xxxxx we have to do it anyway.
792 # issue fixed in xxxxx we have to do it anyway.
793 fdroots = list(
793 fdroots = list(
794 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
794 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
795 )
795 )
796 fdroots = [f.node() for f in fdroots]
796 fdroots = [f.node() for f in fdroots]
797 future = list(unfi.set(revset, fdroots, pushop.futureheads))
797 future = list(unfi.set(revset, fdroots, pushop.futureheads))
798 pushop.outdatedphases = future
798 pushop.outdatedphases = future
799 pushop.fallbackoutdatedphases = fallback
799 pushop.fallbackoutdatedphases = fallback
800
800
801
801
802 @pushdiscovery(b'obsmarker')
802 @pushdiscovery(b'obsmarker')
803 def _pushdiscoveryobsmarkers(pushop):
803 def _pushdiscoveryobsmarkers(pushop):
804 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
804 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
805 return
805 return
806
806
807 if not pushop.repo.obsstore:
807 if not pushop.repo.obsstore:
808 return
808 return
809
809
810 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
810 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
811 return
811 return
812
812
813 repo = pushop.repo
813 repo = pushop.repo
814 # very naive computation, that can be quite expensive on big repo.
814 # very naive computation, that can be quite expensive on big repo.
815 # However: evolution is currently slow on them anyway.
815 # However: evolution is currently slow on them anyway.
816 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
816 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
817 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
817 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
818
818
819
819
820 @pushdiscovery(b'bookmarks')
820 @pushdiscovery(b'bookmarks')
821 def _pushdiscoverybookmarks(pushop):
821 def _pushdiscoverybookmarks(pushop):
822 ui = pushop.ui
822 ui = pushop.ui
823 repo = pushop.repo.unfiltered()
823 repo = pushop.repo.unfiltered()
824 remote = pushop.remote
824 remote = pushop.remote
825 ui.debug(b"checking for updated bookmarks\n")
825 ui.debug(b"checking for updated bookmarks\n")
826 ancestors = ()
826 ancestors = ()
827 if pushop.revs:
827 if pushop.revs:
828 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
828 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
829 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
829 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
830
830
831 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
831 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
832
832
833 explicit = {
833 explicit = {
834 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
834 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
835 }
835 }
836
836
837 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
837 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
838 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
838 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
839
839
840
840
841 def _processcompared(pushop, pushed, explicit, remotebms, comp):
841 def _processcompared(pushop, pushed, explicit, remotebms, comp):
842 """take decision on bookmarks to push to the remote repo
842 """take decision on bookmarks to push to the remote repo
843
843
844 Exists to help extensions alter this behavior.
844 Exists to help extensions alter this behavior.
845 """
845 """
846 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
846 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
847
847
848 repo = pushop.repo
848 repo = pushop.repo
849
849
850 for b, scid, dcid in advsrc:
850 for b, scid, dcid in advsrc:
851 if b in explicit:
851 if b in explicit:
852 explicit.remove(b)
852 explicit.remove(b)
853 if not pushed or repo[scid].rev() in pushed:
853 if not pushed or repo[scid].rev() in pushed:
854 pushop.outbookmarks.append((b, dcid, scid))
854 pushop.outbookmarks.append((b, dcid, scid))
855 # search added bookmark
855 # search added bookmark
856 for b, scid, dcid in addsrc:
856 for b, scid, dcid in addsrc:
857 if b in explicit:
857 if b in explicit:
858 explicit.remove(b)
858 explicit.remove(b)
859 pushop.outbookmarks.append((b, b'', scid))
859 pushop.outbookmarks.append((b, b'', scid))
860 # search for overwritten bookmark
860 # search for overwritten bookmark
861 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
861 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
862 if b in explicit:
862 if b in explicit:
863 explicit.remove(b)
863 explicit.remove(b)
864 pushop.outbookmarks.append((b, dcid, scid))
864 pushop.outbookmarks.append((b, dcid, scid))
865 # search for bookmark to delete
865 # search for bookmark to delete
866 for b, scid, dcid in adddst:
866 for b, scid, dcid in adddst:
867 if b in explicit:
867 if b in explicit:
868 explicit.remove(b)
868 explicit.remove(b)
869 # treat as "deleted locally"
869 # treat as "deleted locally"
870 pushop.outbookmarks.append((b, dcid, b''))
870 pushop.outbookmarks.append((b, dcid, b''))
871 # identical bookmarks shouldn't get reported
871 # identical bookmarks shouldn't get reported
872 for b, scid, dcid in same:
872 for b, scid, dcid in same:
873 if b in explicit:
873 if b in explicit:
874 explicit.remove(b)
874 explicit.remove(b)
875
875
876 if explicit:
876 if explicit:
877 explicit = sorted(explicit)
877 explicit = sorted(explicit)
878 # we should probably list all of them
878 # we should probably list all of them
879 pushop.ui.warn(
879 pushop.ui.warn(
880 _(
880 _(
881 b'bookmark %s does not exist on the local '
881 b'bookmark %s does not exist on the local '
882 b'or remote repository!\n'
882 b'or remote repository!\n'
883 )
883 )
884 % explicit[0]
884 % explicit[0]
885 )
885 )
886 pushop.bkresult = 2
886 pushop.bkresult = 2
887
887
888 pushop.outbookmarks.sort()
888 pushop.outbookmarks.sort()
889
889
890
890
891 def _pushcheckoutgoing(pushop):
891 def _pushcheckoutgoing(pushop):
892 outgoing = pushop.outgoing
892 outgoing = pushop.outgoing
893 unfi = pushop.repo.unfiltered()
893 unfi = pushop.repo.unfiltered()
894 if not outgoing.missing:
894 if not outgoing.missing:
895 # nothing to push
895 # nothing to push
896 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
896 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
897 return False
897 return False
898 # something to push
898 # something to push
899 if not pushop.force:
899 if not pushop.force:
900 # if repo.obsstore == False --> no obsolete
900 # if repo.obsstore == False --> no obsolete
901 # then, save the iteration
901 # then, save the iteration
902 if unfi.obsstore:
902 if unfi.obsstore:
903 # this message are here for 80 char limit reason
903 # this message are here for 80 char limit reason
904 mso = _(b"push includes obsolete changeset: %s!")
904 mso = _(b"push includes obsolete changeset: %s!")
905 mspd = _(b"push includes phase-divergent changeset: %s!")
905 mspd = _(b"push includes phase-divergent changeset: %s!")
906 mscd = _(b"push includes content-divergent changeset: %s!")
906 mscd = _(b"push includes content-divergent changeset: %s!")
907 mst = {
907 mst = {
908 b"orphan": _(b"push includes orphan changeset: %s!"),
908 b"orphan": _(b"push includes orphan changeset: %s!"),
909 b"phase-divergent": mspd,
909 b"phase-divergent": mspd,
910 b"content-divergent": mscd,
910 b"content-divergent": mscd,
911 }
911 }
912 # If we are to push if there is at least one
912 # If we are to push if there is at least one
913 # obsolete or unstable changeset in missing, at
913 # obsolete or unstable changeset in missing, at
914 # least one of the missinghead will be obsolete or
914 # least one of the missinghead will be obsolete or
915 # unstable. So checking heads only is ok
915 # unstable. So checking heads only is ok
916 for node in outgoing.missingheads:
916 for node in outgoing.missingheads:
917 ctx = unfi[node]
917 ctx = unfi[node]
918 if ctx.obsolete():
918 if ctx.obsolete():
919 raise error.Abort(mso % ctx)
919 raise error.Abort(mso % ctx)
920 elif ctx.isunstable():
920 elif ctx.isunstable():
921 # TODO print more than one instability in the abort
921 # TODO print more than one instability in the abort
922 # message
922 # message
923 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
923 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
924
924
925 discovery.checkheads(pushop)
925 discovery.checkheads(pushop)
926 return True
926 return True
927
927
928
928
929 # List of names of steps to perform for an outgoing bundle2, order matters.
929 # List of names of steps to perform for an outgoing bundle2, order matters.
930 b2partsgenorder = []
930 b2partsgenorder = []
931
931
932 # Mapping between step name and function
932 # Mapping between step name and function
933 #
933 #
934 # This exists to help extensions wrap steps if necessary
934 # This exists to help extensions wrap steps if necessary
935 b2partsgenmapping = {}
935 b2partsgenmapping = {}
936
936
937
937
938 def b2partsgenerator(stepname, idx=None):
938 def b2partsgenerator(stepname, idx=None):
939 """decorator for function generating bundle2 part
939 """decorator for function generating bundle2 part
940
940
941 The function is added to the step -> function mapping and appended to the
941 The function is added to the step -> function mapping and appended to the
942 list of steps. Beware that decorated functions will be added in order
942 list of steps. Beware that decorated functions will be added in order
943 (this may matter).
943 (this may matter).
944
944
945 You can only use this decorator for new steps, if you want to wrap a step
945 You can only use this decorator for new steps, if you want to wrap a step
946 from an extension, attack the b2partsgenmapping dictionary directly."""
946 from an extension, attack the b2partsgenmapping dictionary directly."""
947
947
948 def dec(func):
948 def dec(func):
949 assert stepname not in b2partsgenmapping
949 assert stepname not in b2partsgenmapping
950 b2partsgenmapping[stepname] = func
950 b2partsgenmapping[stepname] = func
951 if idx is None:
951 if idx is None:
952 b2partsgenorder.append(stepname)
952 b2partsgenorder.append(stepname)
953 else:
953 else:
954 b2partsgenorder.insert(idx, stepname)
954 b2partsgenorder.insert(idx, stepname)
955 return func
955 return func
956
956
957 return dec
957 return dec
958
958
959
959
960 def _pushb2ctxcheckheads(pushop, bundler):
960 def _pushb2ctxcheckheads(pushop, bundler):
961 """Generate race condition checking parts
961 """Generate race condition checking parts
962
962
963 Exists as an independent function to aid extensions
963 Exists as an independent function to aid extensions
964 """
964 """
965 # * 'force' do not check for push race,
965 # * 'force' do not check for push race,
966 # * if we don't push anything, there are nothing to check.
966 # * if we don't push anything, there are nothing to check.
967 if not pushop.force and pushop.outgoing.missingheads:
967 if not pushop.force and pushop.outgoing.missingheads:
968 allowunrelated = b'related' in bundler.capabilities.get(
968 allowunrelated = b'related' in bundler.capabilities.get(
969 b'checkheads', ()
969 b'checkheads', ()
970 )
970 )
971 emptyremote = pushop.pushbranchmap is None
971 emptyremote = pushop.pushbranchmap is None
972 if not allowunrelated or emptyremote:
972 if not allowunrelated or emptyremote:
973 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
973 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
974 else:
974 else:
975 affected = set()
975 affected = set()
976 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
976 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
977 remoteheads, newheads, unsyncedheads, discardedheads = heads
977 remoteheads, newheads, unsyncedheads, discardedheads = heads
978 if remoteheads is not None:
978 if remoteheads is not None:
979 remote = set(remoteheads)
979 remote = set(remoteheads)
980 affected |= set(discardedheads) & remote
980 affected |= set(discardedheads) & remote
981 affected |= remote - set(newheads)
981 affected |= remote - set(newheads)
982 if affected:
982 if affected:
983 data = iter(sorted(affected))
983 data = iter(sorted(affected))
984 bundler.newpart(b'check:updated-heads', data=data)
984 bundler.newpart(b'check:updated-heads', data=data)
985
985
986
986
987 def _pushing(pushop):
987 def _pushing(pushop):
988 """return True if we are pushing anything"""
988 """return True if we are pushing anything"""
989 return bool(
989 return bool(
990 pushop.outgoing.missing
990 pushop.outgoing.missing
991 or pushop.outdatedphases
991 or pushop.outdatedphases
992 or pushop.outobsmarkers
992 or pushop.outobsmarkers
993 or pushop.outbookmarks
993 or pushop.outbookmarks
994 )
994 )
995
995
996
996
997 @b2partsgenerator(b'check-bookmarks')
997 @b2partsgenerator(b'check-bookmarks')
998 def _pushb2checkbookmarks(pushop, bundler):
998 def _pushb2checkbookmarks(pushop, bundler):
999 """insert bookmark move checking"""
999 """insert bookmark move checking"""
1000 if not _pushing(pushop) or pushop.force:
1000 if not _pushing(pushop) or pushop.force:
1001 return
1001 return
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1003 hasbookmarkcheck = b'bookmarks' in b2caps
1003 hasbookmarkcheck = b'bookmarks' in b2caps
1004 if not (pushop.outbookmarks and hasbookmarkcheck):
1004 if not (pushop.outbookmarks and hasbookmarkcheck):
1005 return
1005 return
1006 data = []
1006 data = []
1007 for book, old, new in pushop.outbookmarks:
1007 for book, old, new in pushop.outbookmarks:
1008 data.append((book, old))
1008 data.append((book, old))
1009 checkdata = bookmod.binaryencode(data)
1009 checkdata = bookmod.binaryencode(data)
1010 bundler.newpart(b'check:bookmarks', data=checkdata)
1010 bundler.newpart(b'check:bookmarks', data=checkdata)
1011
1011
1012
1012
1013 @b2partsgenerator(b'check-phases')
1013 @b2partsgenerator(b'check-phases')
1014 def _pushb2checkphases(pushop, bundler):
1014 def _pushb2checkphases(pushop, bundler):
1015 """insert phase move checking"""
1015 """insert phase move checking"""
1016 if not _pushing(pushop) or pushop.force:
1016 if not _pushing(pushop) or pushop.force:
1017 return
1017 return
1018 b2caps = bundle2.bundle2caps(pushop.remote)
1018 b2caps = bundle2.bundle2caps(pushop.remote)
1019 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1019 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1020 if pushop.remotephases is not None and hasphaseheads:
1020 if pushop.remotephases is not None and hasphaseheads:
1021 # check that the remote phase has not changed
1021 # check that the remote phase has not changed
1022 checks = [[] for p in phases.allphases]
1022 checks = [[] for p in phases.allphases]
1023 checks[phases.public].extend(pushop.remotephases.publicheads)
1023 checks[phases.public].extend(pushop.remotephases.publicheads)
1024 checks[phases.draft].extend(pushop.remotephases.draftroots)
1024 checks[phases.draft].extend(pushop.remotephases.draftroots)
1025 if any(checks):
1025 if any(checks):
1026 for nodes in checks:
1026 for nodes in checks:
1027 nodes.sort()
1027 nodes.sort()
1028 checkdata = phases.binaryencode(checks)
1028 checkdata = phases.binaryencode(checks)
1029 bundler.newpart(b'check:phases', data=checkdata)
1029 bundler.newpart(b'check:phases', data=checkdata)
1030
1030
1031
1031
1032 @b2partsgenerator(b'changeset')
1032 @b2partsgenerator(b'changeset')
1033 def _pushb2ctx(pushop, bundler):
1033 def _pushb2ctx(pushop, bundler):
1034 """handle changegroup push through bundle2
1034 """handle changegroup push through bundle2
1035
1035
1036 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1036 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1037 """
1037 """
1038 if b'changesets' in pushop.stepsdone:
1038 if b'changesets' in pushop.stepsdone:
1039 return
1039 return
1040 pushop.stepsdone.add(b'changesets')
1040 pushop.stepsdone.add(b'changesets')
1041 # Send known heads to the server for race detection.
1041 # Send known heads to the server for race detection.
1042 if not _pushcheckoutgoing(pushop):
1042 if not _pushcheckoutgoing(pushop):
1043 return
1043 return
1044 pushop.repo.prepushoutgoinghooks(pushop)
1044 pushop.repo.prepushoutgoinghooks(pushop)
1045
1045
1046 _pushb2ctxcheckheads(pushop, bundler)
1046 _pushb2ctxcheckheads(pushop, bundler)
1047
1047
1048 b2caps = bundle2.bundle2caps(pushop.remote)
1048 b2caps = bundle2.bundle2caps(pushop.remote)
1049 version = b'01'
1049 version = b'01'
1050 cgversions = b2caps.get(b'changegroup')
1050 cgversions = b2caps.get(b'changegroup')
1051 if cgversions: # 3.1 and 3.2 ship with an empty value
1051 if cgversions: # 3.1 and 3.2 ship with an empty value
1052 cgversions = [
1052 cgversions = [
1053 v
1053 v
1054 for v in cgversions
1054 for v in cgversions
1055 if v in changegroup.supportedoutgoingversions(pushop.repo)
1055 if v in changegroup.supportedoutgoingversions(pushop.repo)
1056 ]
1056 ]
1057 if not cgversions:
1057 if not cgversions:
1058 raise error.Abort(_(b'no common changegroup version'))
1058 raise error.Abort(_(b'no common changegroup version'))
1059 version = max(cgversions)
1059 version = max(cgversions)
1060 cgstream = changegroup.makestream(
1060 cgstream = changegroup.makestream(
1061 pushop.repo, pushop.outgoing, version, b'push'
1061 pushop.repo, pushop.outgoing, version, b'push'
1062 )
1062 )
1063 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1063 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1064 if cgversions:
1064 if cgversions:
1065 cgpart.addparam(b'version', version)
1065 cgpart.addparam(b'version', version)
1066 if b'treemanifest' in pushop.repo.requirements:
1066 if b'treemanifest' in pushop.repo.requirements:
1067 cgpart.addparam(b'treemanifest', b'1')
1067 cgpart.addparam(b'treemanifest', b'1')
1068 if b'exp-sidedata-flag' in pushop.repo.requirements:
1068 if b'exp-sidedata-flag' in pushop.repo.requirements:
1069 cgpart.addparam(b'exp-sidedata', b'1')
1069 cgpart.addparam(b'exp-sidedata', b'1')
1070
1070
1071 def handlereply(op):
1071 def handlereply(op):
1072 """extract addchangegroup returns from server reply"""
1072 """extract addchangegroup returns from server reply"""
1073 cgreplies = op.records.getreplies(cgpart.id)
1073 cgreplies = op.records.getreplies(cgpart.id)
1074 assert len(cgreplies[b'changegroup']) == 1
1074 assert len(cgreplies[b'changegroup']) == 1
1075 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1075 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1076
1076
1077 return handlereply
1077 return handlereply
1078
1078
1079
1079
1080 @b2partsgenerator(b'phase')
1080 @b2partsgenerator(b'phase')
1081 def _pushb2phases(pushop, bundler):
1081 def _pushb2phases(pushop, bundler):
1082 """handle phase push through bundle2"""
1082 """handle phase push through bundle2"""
1083 if b'phases' in pushop.stepsdone:
1083 if b'phases' in pushop.stepsdone:
1084 return
1084 return
1085 b2caps = bundle2.bundle2caps(pushop.remote)
1085 b2caps = bundle2.bundle2caps(pushop.remote)
1086 ui = pushop.repo.ui
1086 ui = pushop.repo.ui
1087
1087
1088 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1088 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1089 haspushkey = b'pushkey' in b2caps
1089 haspushkey = b'pushkey' in b2caps
1090 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1090 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1091
1091
1092 if hasphaseheads and not legacyphase:
1092 if hasphaseheads and not legacyphase:
1093 return _pushb2phaseheads(pushop, bundler)
1093 return _pushb2phaseheads(pushop, bundler)
1094 elif haspushkey:
1094 elif haspushkey:
1095 return _pushb2phasespushkey(pushop, bundler)
1095 return _pushb2phasespushkey(pushop, bundler)
1096
1096
1097
1097
1098 def _pushb2phaseheads(pushop, bundler):
1098 def _pushb2phaseheads(pushop, bundler):
1099 """push phase information through a bundle2 - binary part"""
1099 """push phase information through a bundle2 - binary part"""
1100 pushop.stepsdone.add(b'phases')
1100 pushop.stepsdone.add(b'phases')
1101 if pushop.outdatedphases:
1101 if pushop.outdatedphases:
1102 updates = [[] for p in phases.allphases]
1102 updates = [[] for p in phases.allphases]
1103 updates[0].extend(h.node() for h in pushop.outdatedphases)
1103 updates[0].extend(h.node() for h in pushop.outdatedphases)
1104 phasedata = phases.binaryencode(updates)
1104 phasedata = phases.binaryencode(updates)
1105 bundler.newpart(b'phase-heads', data=phasedata)
1105 bundler.newpart(b'phase-heads', data=phasedata)
1106
1106
1107
1107
1108 def _pushb2phasespushkey(pushop, bundler):
1108 def _pushb2phasespushkey(pushop, bundler):
1109 """push phase information through a bundle2 - pushkey part"""
1109 """push phase information through a bundle2 - pushkey part"""
1110 pushop.stepsdone.add(b'phases')
1110 pushop.stepsdone.add(b'phases')
1111 part2node = []
1111 part2node = []
1112
1112
1113 def handlefailure(pushop, exc):
1113 def handlefailure(pushop, exc):
1114 targetid = int(exc.partid)
1114 targetid = int(exc.partid)
1115 for partid, node in part2node:
1115 for partid, node in part2node:
1116 if partid == targetid:
1116 if partid == targetid:
1117 raise error.Abort(_(b'updating %s to public failed') % node)
1117 raise error.Abort(_(b'updating %s to public failed') % node)
1118
1118
1119 enc = pushkey.encode
1119 enc = pushkey.encode
1120 for newremotehead in pushop.outdatedphases:
1120 for newremotehead in pushop.outdatedphases:
1121 part = bundler.newpart(b'pushkey')
1121 part = bundler.newpart(b'pushkey')
1122 part.addparam(b'namespace', enc(b'phases'))
1122 part.addparam(b'namespace', enc(b'phases'))
1123 part.addparam(b'key', enc(newremotehead.hex()))
1123 part.addparam(b'key', enc(newremotehead.hex()))
1124 part.addparam(b'old', enc(b'%d' % phases.draft))
1124 part.addparam(b'old', enc(b'%d' % phases.draft))
1125 part.addparam(b'new', enc(b'%d' % phases.public))
1125 part.addparam(b'new', enc(b'%d' % phases.public))
1126 part2node.append((part.id, newremotehead))
1126 part2node.append((part.id, newremotehead))
1127 pushop.pkfailcb[part.id] = handlefailure
1127 pushop.pkfailcb[part.id] = handlefailure
1128
1128
1129 def handlereply(op):
1129 def handlereply(op):
1130 for partid, node in part2node:
1130 for partid, node in part2node:
1131 partrep = op.records.getreplies(partid)
1131 partrep = op.records.getreplies(partid)
1132 results = partrep[b'pushkey']
1132 results = partrep[b'pushkey']
1133 assert len(results) <= 1
1133 assert len(results) <= 1
1134 msg = None
1134 msg = None
1135 if not results:
1135 if not results:
1136 msg = _(b'server ignored update of %s to public!\n') % node
1136 msg = _(b'server ignored update of %s to public!\n') % node
1137 elif not int(results[0][b'return']):
1137 elif not int(results[0][b'return']):
1138 msg = _(b'updating %s to public failed!\n') % node
1138 msg = _(b'updating %s to public failed!\n') % node
1139 if msg is not None:
1139 if msg is not None:
1140 pushop.ui.warn(msg)
1140 pushop.ui.warn(msg)
1141
1141
1142 return handlereply
1142 return handlereply
1143
1143
1144
1144
1145 @b2partsgenerator(b'obsmarkers')
1145 @b2partsgenerator(b'obsmarkers')
1146 def _pushb2obsmarkers(pushop, bundler):
1146 def _pushb2obsmarkers(pushop, bundler):
1147 if b'obsmarkers' in pushop.stepsdone:
1147 if b'obsmarkers' in pushop.stepsdone:
1148 return
1148 return
1149 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1149 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1150 if obsolete.commonversion(remoteversions) is None:
1150 if obsolete.commonversion(remoteversions) is None:
1151 return
1151 return
1152 pushop.stepsdone.add(b'obsmarkers')
1152 pushop.stepsdone.add(b'obsmarkers')
1153 if pushop.outobsmarkers:
1153 if pushop.outobsmarkers:
1154 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1154 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1155 bundle2.buildobsmarkerspart(bundler, markers)
1155 bundle2.buildobsmarkerspart(bundler, markers)
1156
1156
1157
1157
1158 @b2partsgenerator(b'bookmarks')
1158 @b2partsgenerator(b'bookmarks')
1159 def _pushb2bookmarks(pushop, bundler):
1159 def _pushb2bookmarks(pushop, bundler):
1160 """handle bookmark push through bundle2"""
1160 """handle bookmark push through bundle2"""
1161 if b'bookmarks' in pushop.stepsdone:
1161 if b'bookmarks' in pushop.stepsdone:
1162 return
1162 return
1163 b2caps = bundle2.bundle2caps(pushop.remote)
1163 b2caps = bundle2.bundle2caps(pushop.remote)
1164
1164
1165 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1165 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1166 legacybooks = b'bookmarks' in legacy
1166 legacybooks = b'bookmarks' in legacy
1167
1167
1168 if not legacybooks and b'bookmarks' in b2caps:
1168 if not legacybooks and b'bookmarks' in b2caps:
1169 return _pushb2bookmarkspart(pushop, bundler)
1169 return _pushb2bookmarkspart(pushop, bundler)
1170 elif b'pushkey' in b2caps:
1170 elif b'pushkey' in b2caps:
1171 return _pushb2bookmarkspushkey(pushop, bundler)
1171 return _pushb2bookmarkspushkey(pushop, bundler)
1172
1172
1173
1173
1174 def _bmaction(old, new):
1174 def _bmaction(old, new):
1175 """small utility for bookmark pushing"""
1175 """small utility for bookmark pushing"""
1176 if not old:
1176 if not old:
1177 return b'export'
1177 return b'export'
1178 elif not new:
1178 elif not new:
1179 return b'delete'
1179 return b'delete'
1180 return b'update'
1180 return b'update'
1181
1181
1182
1182
1183 def _abortonsecretctx(pushop, node, b):
1183 def _abortonsecretctx(pushop, node, b):
1184 """abort if a given bookmark points to a secret changeset"""
1184 """abort if a given bookmark points to a secret changeset"""
1185 if node and pushop.repo[node].phase() == phases.secret:
1185 if node and pushop.repo[node].phase() == phases.secret:
1186 raise error.Abort(
1186 raise error.Abort(
1187 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1187 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1188 )
1188 )
1189
1189
1190
1190
1191 def _pushb2bookmarkspart(pushop, bundler):
1191 def _pushb2bookmarkspart(pushop, bundler):
1192 pushop.stepsdone.add(b'bookmarks')
1192 pushop.stepsdone.add(b'bookmarks')
1193 if not pushop.outbookmarks:
1193 if not pushop.outbookmarks:
1194 return
1194 return
1195
1195
1196 allactions = []
1196 allactions = []
1197 data = []
1197 data = []
1198 for book, old, new in pushop.outbookmarks:
1198 for book, old, new in pushop.outbookmarks:
1199 _abortonsecretctx(pushop, new, book)
1199 _abortonsecretctx(pushop, new, book)
1200 data.append((book, new))
1200 data.append((book, new))
1201 allactions.append((book, _bmaction(old, new)))
1201 allactions.append((book, _bmaction(old, new)))
1202 checkdata = bookmod.binaryencode(data)
1202 checkdata = bookmod.binaryencode(data)
1203 bundler.newpart(b'bookmarks', data=checkdata)
1203 bundler.newpart(b'bookmarks', data=checkdata)
1204
1204
1205 def handlereply(op):
1205 def handlereply(op):
1206 ui = pushop.ui
1206 ui = pushop.ui
1207 # if success
1207 # if success
1208 for book, action in allactions:
1208 for book, action in allactions:
1209 ui.status(bookmsgmap[action][0] % book)
1209 ui.status(bookmsgmap[action][0] % book)
1210
1210
1211 return handlereply
1211 return handlereply
1212
1212
1213
1213
1214 def _pushb2bookmarkspushkey(pushop, bundler):
1214 def _pushb2bookmarkspushkey(pushop, bundler):
1215 pushop.stepsdone.add(b'bookmarks')
1215 pushop.stepsdone.add(b'bookmarks')
1216 part2book = []
1216 part2book = []
1217 enc = pushkey.encode
1217 enc = pushkey.encode
1218
1218
1219 def handlefailure(pushop, exc):
1219 def handlefailure(pushop, exc):
1220 targetid = int(exc.partid)
1220 targetid = int(exc.partid)
1221 for partid, book, action in part2book:
1221 for partid, book, action in part2book:
1222 if partid == targetid:
1222 if partid == targetid:
1223 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1223 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1224 # we should not be called for part we did not generated
1224 # we should not be called for part we did not generated
1225 assert False
1225 assert False
1226
1226
1227 for book, old, new in pushop.outbookmarks:
1227 for book, old, new in pushop.outbookmarks:
1228 _abortonsecretctx(pushop, new, book)
1228 _abortonsecretctx(pushop, new, book)
1229 part = bundler.newpart(b'pushkey')
1229 part = bundler.newpart(b'pushkey')
1230 part.addparam(b'namespace', enc(b'bookmarks'))
1230 part.addparam(b'namespace', enc(b'bookmarks'))
1231 part.addparam(b'key', enc(book))
1231 part.addparam(b'key', enc(book))
1232 part.addparam(b'old', enc(hex(old)))
1232 part.addparam(b'old', enc(hex(old)))
1233 part.addparam(b'new', enc(hex(new)))
1233 part.addparam(b'new', enc(hex(new)))
1234 action = b'update'
1234 action = b'update'
1235 if not old:
1235 if not old:
1236 action = b'export'
1236 action = b'export'
1237 elif not new:
1237 elif not new:
1238 action = b'delete'
1238 action = b'delete'
1239 part2book.append((part.id, book, action))
1239 part2book.append((part.id, book, action))
1240 pushop.pkfailcb[part.id] = handlefailure
1240 pushop.pkfailcb[part.id] = handlefailure
1241
1241
1242 def handlereply(op):
1242 def handlereply(op):
1243 ui = pushop.ui
1243 ui = pushop.ui
1244 for partid, book, action in part2book:
1244 for partid, book, action in part2book:
1245 partrep = op.records.getreplies(partid)
1245 partrep = op.records.getreplies(partid)
1246 results = partrep[b'pushkey']
1246 results = partrep[b'pushkey']
1247 assert len(results) <= 1
1247 assert len(results) <= 1
1248 if not results:
1248 if not results:
1249 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1249 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1250 else:
1250 else:
1251 ret = int(results[0][b'return'])
1251 ret = int(results[0][b'return'])
1252 if ret:
1252 if ret:
1253 ui.status(bookmsgmap[action][0] % book)
1253 ui.status(bookmsgmap[action][0] % book)
1254 else:
1254 else:
1255 ui.warn(bookmsgmap[action][1] % book)
1255 ui.warn(bookmsgmap[action][1] % book)
1256 if pushop.bkresult is not None:
1256 if pushop.bkresult is not None:
1257 pushop.bkresult = 1
1257 pushop.bkresult = 1
1258
1258
1259 return handlereply
1259 return handlereply
1260
1260
1261
1261
1262 @b2partsgenerator(b'pushvars', idx=0)
1262 @b2partsgenerator(b'pushvars', idx=0)
1263 def _getbundlesendvars(pushop, bundler):
1263 def _getbundlesendvars(pushop, bundler):
1264 '''send shellvars via bundle2'''
1264 '''send shellvars via bundle2'''
1265 pushvars = pushop.pushvars
1265 pushvars = pushop.pushvars
1266 if pushvars:
1266 if pushvars:
1267 shellvars = {}
1267 shellvars = {}
1268 for raw in pushvars:
1268 for raw in pushvars:
1269 if b'=' not in raw:
1269 if b'=' not in raw:
1270 msg = (
1270 msg = (
1271 b"unable to parse variable '%s', should follow "
1271 b"unable to parse variable '%s', should follow "
1272 b"'KEY=VALUE' or 'KEY=' format"
1272 b"'KEY=VALUE' or 'KEY=' format"
1273 )
1273 )
1274 raise error.Abort(msg % raw)
1274 raise error.Abort(msg % raw)
1275 k, v = raw.split(b'=', 1)
1275 k, v = raw.split(b'=', 1)
1276 shellvars[k] = v
1276 shellvars[k] = v
1277
1277
1278 part = bundler.newpart(b'pushvars')
1278 part = bundler.newpart(b'pushvars')
1279
1279
1280 for key, value in pycompat.iteritems(shellvars):
1280 for key, value in pycompat.iteritems(shellvars):
1281 part.addparam(key, value, mandatory=False)
1281 part.addparam(key, value, mandatory=False)
1282
1282
1283
1283
1284 def _pushbundle2(pushop):
1284 def _pushbundle2(pushop):
1285 """push data to the remote using bundle2
1285 """push data to the remote using bundle2
1286
1286
1287 The only currently supported type of data is changegroup but this will
1287 The only currently supported type of data is changegroup but this will
1288 evolve in the future."""
1288 evolve in the future."""
1289 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1289 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1290 pushback = pushop.trmanager and pushop.ui.configbool(
1290 pushback = pushop.trmanager and pushop.ui.configbool(
1291 b'experimental', b'bundle2.pushback'
1291 b'experimental', b'bundle2.pushback'
1292 )
1292 )
1293
1293
1294 # create reply capability
1294 # create reply capability
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1296 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1297 )
1297 )
1298 bundler.newpart(b'replycaps', data=capsblob)
1298 bundler.newpart(b'replycaps', data=capsblob)
1299 replyhandlers = []
1299 replyhandlers = []
1300 for partgenname in b2partsgenorder:
1300 for partgenname in b2partsgenorder:
1301 partgen = b2partsgenmapping[partgenname]
1301 partgen = b2partsgenmapping[partgenname]
1302 ret = partgen(pushop, bundler)
1302 ret = partgen(pushop, bundler)
1303 if callable(ret):
1303 if callable(ret):
1304 replyhandlers.append(ret)
1304 replyhandlers.append(ret)
1305 # do not push if nothing to push
1305 # do not push if nothing to push
1306 if bundler.nbparts <= 1:
1306 if bundler.nbparts <= 1:
1307 return
1307 return
1308 stream = util.chunkbuffer(bundler.getchunks())
1308 stream = util.chunkbuffer(bundler.getchunks())
1309 try:
1309 try:
1310 try:
1310 try:
1311 with pushop.remote.commandexecutor() as e:
1311 with pushop.remote.commandexecutor() as e:
1312 reply = e.callcommand(
1312 reply = e.callcommand(
1313 b'unbundle',
1313 b'unbundle',
1314 {
1314 {
1315 b'bundle': stream,
1315 b'bundle': stream,
1316 b'heads': [b'force'],
1316 b'heads': [b'force'],
1317 b'url': pushop.remote.url(),
1317 b'url': pushop.remote.url(),
1318 },
1318 },
1319 ).result()
1319 ).result()
1320 except error.BundleValueError as exc:
1320 except error.BundleValueError as exc:
1321 raise error.Abort(_(b'missing support for %s') % exc)
1321 raise error.Abort(_(b'missing support for %s') % exc)
1322 try:
1322 try:
1323 trgetter = None
1323 trgetter = None
1324 if pushback:
1324 if pushback:
1325 trgetter = pushop.trmanager.transaction
1325 trgetter = pushop.trmanager.transaction
1326 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1326 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1327 except error.BundleValueError as exc:
1327 except error.BundleValueError as exc:
1328 raise error.Abort(_(b'missing support for %s') % exc)
1328 raise error.Abort(_(b'missing support for %s') % exc)
1329 except bundle2.AbortFromPart as exc:
1329 except bundle2.AbortFromPart as exc:
1330 pushop.ui.status(_(b'remote: %s\n') % exc)
1330 pushop.ui.status(_(b'remote: %s\n') % exc)
1331 if exc.hint is not None:
1331 if exc.hint is not None:
1332 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1332 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1333 raise error.Abort(_(b'push failed on remote'))
1333 raise error.Abort(_(b'push failed on remote'))
1334 except error.PushkeyFailed as exc:
1334 except error.PushkeyFailed as exc:
1335 partid = int(exc.partid)
1335 partid = int(exc.partid)
1336 if partid not in pushop.pkfailcb:
1336 if partid not in pushop.pkfailcb:
1337 raise
1337 raise
1338 pushop.pkfailcb[partid](pushop, exc)
1338 pushop.pkfailcb[partid](pushop, exc)
1339 for rephand in replyhandlers:
1339 for rephand in replyhandlers:
1340 rephand(op)
1340 rephand(op)
1341
1341
1342
1342
1343 def _pushchangeset(pushop):
1343 def _pushchangeset(pushop):
1344 """Make the actual push of changeset bundle to remote repo"""
1344 """Make the actual push of changeset bundle to remote repo"""
1345 if b'changesets' in pushop.stepsdone:
1345 if b'changesets' in pushop.stepsdone:
1346 return
1346 return
1347 pushop.stepsdone.add(b'changesets')
1347 pushop.stepsdone.add(b'changesets')
1348 if not _pushcheckoutgoing(pushop):
1348 if not _pushcheckoutgoing(pushop):
1349 return
1349 return
1350
1350
1351 # Should have verified this in push().
1351 # Should have verified this in push().
1352 assert pushop.remote.capable(b'unbundle')
1352 assert pushop.remote.capable(b'unbundle')
1353
1353
1354 pushop.repo.prepushoutgoinghooks(pushop)
1354 pushop.repo.prepushoutgoinghooks(pushop)
1355 outgoing = pushop.outgoing
1355 outgoing = pushop.outgoing
1356 # TODO: get bundlecaps from remote
1356 # TODO: get bundlecaps from remote
1357 bundlecaps = None
1357 bundlecaps = None
1358 # create a changegroup from local
1358 # create a changegroup from local
1359 if pushop.revs is None and not (
1359 if pushop.revs is None and not (
1360 outgoing.excluded or pushop.repo.changelog.filteredrevs
1360 outgoing.excluded or pushop.repo.changelog.filteredrevs
1361 ):
1361 ):
1362 # push everything,
1362 # push everything,
1363 # use the fast path, no race possible on push
1363 # use the fast path, no race possible on push
1364 cg = changegroup.makechangegroup(
1364 cg = changegroup.makechangegroup(
1365 pushop.repo,
1365 pushop.repo,
1366 outgoing,
1366 outgoing,
1367 b'01',
1367 b'01',
1368 b'push',
1368 b'push',
1369 fastpath=True,
1369 fastpath=True,
1370 bundlecaps=bundlecaps,
1370 bundlecaps=bundlecaps,
1371 )
1371 )
1372 else:
1372 else:
1373 cg = changegroup.makechangegroup(
1373 cg = changegroup.makechangegroup(
1374 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1374 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1375 )
1375 )
1376
1376
1377 # apply changegroup to remote
1377 # apply changegroup to remote
1378 # local repo finds heads on server, finds out what
1378 # local repo finds heads on server, finds out what
1379 # revs it must push. once revs transferred, if server
1379 # revs it must push. once revs transferred, if server
1380 # finds it has different heads (someone else won
1380 # finds it has different heads (someone else won
1381 # commit/push race), server aborts.
1381 # commit/push race), server aborts.
1382 if pushop.force:
1382 if pushop.force:
1383 remoteheads = [b'force']
1383 remoteheads = [b'force']
1384 else:
1384 else:
1385 remoteheads = pushop.remoteheads
1385 remoteheads = pushop.remoteheads
1386 # ssh: return remote's addchangegroup()
1386 # ssh: return remote's addchangegroup()
1387 # http: return remote's addchangegroup() or 0 for error
1387 # http: return remote's addchangegroup() or 0 for error
1388 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1388 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1389
1389
1390
1390
1391 def _pushsyncphase(pushop):
1391 def _pushsyncphase(pushop):
1392 """synchronise phase information locally and remotely"""
1392 """synchronise phase information locally and remotely"""
1393 cheads = pushop.commonheads
1393 cheads = pushop.commonheads
1394 # even when we don't push, exchanging phase data is useful
1394 # even when we don't push, exchanging phase data is useful
1395 remotephases = listkeys(pushop.remote, b'phases')
1395 remotephases = listkeys(pushop.remote, b'phases')
1396 if (
1396 if (
1397 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1397 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1398 and remotephases # server supports phases
1398 and remotephases # server supports phases
1399 and pushop.cgresult is None # nothing was pushed
1399 and pushop.cgresult is None # nothing was pushed
1400 and remotephases.get(b'publishing', False)
1400 and remotephases.get(b'publishing', False)
1401 ):
1401 ):
1402 # When:
1402 # When:
1403 # - this is a subrepo push
1403 # - this is a subrepo push
1404 # - and remote support phase
1404 # - and remote support phase
1405 # - and no changeset was pushed
1405 # - and no changeset was pushed
1406 # - and remote is publishing
1406 # - and remote is publishing
1407 # We may be in issue 3871 case!
1407 # We may be in issue 3871 case!
1408 # We drop the possible phase synchronisation done by
1408 # We drop the possible phase synchronisation done by
1409 # courtesy to publish changesets possibly locally draft
1409 # courtesy to publish changesets possibly locally draft
1410 # on the remote.
1410 # on the remote.
1411 remotephases = {b'publishing': b'True'}
1411 remotephases = {b'publishing': b'True'}
1412 if not remotephases: # old server or public only reply from non-publishing
1412 if not remotephases: # old server or public only reply from non-publishing
1413 _localphasemove(pushop, cheads)
1413 _localphasemove(pushop, cheads)
1414 # don't push any phase data as there is nothing to push
1414 # don't push any phase data as there is nothing to push
1415 else:
1415 else:
1416 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1416 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1417 pheads, droots = ana
1417 pheads, droots = ana
1418 ### Apply remote phase on local
1418 ### Apply remote phase on local
1419 if remotephases.get(b'publishing', False):
1419 if remotephases.get(b'publishing', False):
1420 _localphasemove(pushop, cheads)
1420 _localphasemove(pushop, cheads)
1421 else: # publish = False
1421 else: # publish = False
1422 _localphasemove(pushop, pheads)
1422 _localphasemove(pushop, pheads)
1423 _localphasemove(pushop, cheads, phases.draft)
1423 _localphasemove(pushop, cheads, phases.draft)
1424 ### Apply local phase on remote
1424 ### Apply local phase on remote
1425
1425
1426 if pushop.cgresult:
1426 if pushop.cgresult:
1427 if b'phases' in pushop.stepsdone:
1427 if b'phases' in pushop.stepsdone:
1428 # phases already pushed though bundle2
1428 # phases already pushed though bundle2
1429 return
1429 return
1430 outdated = pushop.outdatedphases
1430 outdated = pushop.outdatedphases
1431 else:
1431 else:
1432 outdated = pushop.fallbackoutdatedphases
1432 outdated = pushop.fallbackoutdatedphases
1433
1433
1434 pushop.stepsdone.add(b'phases')
1434 pushop.stepsdone.add(b'phases')
1435
1435
1436 # filter heads already turned public by the push
1436 # filter heads already turned public by the push
1437 outdated = [c for c in outdated if c.node() not in pheads]
1437 outdated = [c for c in outdated if c.node() not in pheads]
1438 # fallback to independent pushkey command
1438 # fallback to independent pushkey command
1439 for newremotehead in outdated:
1439 for newremotehead in outdated:
1440 with pushop.remote.commandexecutor() as e:
1440 with pushop.remote.commandexecutor() as e:
1441 r = e.callcommand(
1441 r = e.callcommand(
1442 b'pushkey',
1442 b'pushkey',
1443 {
1443 {
1444 b'namespace': b'phases',
1444 b'namespace': b'phases',
1445 b'key': newremotehead.hex(),
1445 b'key': newremotehead.hex(),
1446 b'old': b'%d' % phases.draft,
1446 b'old': b'%d' % phases.draft,
1447 b'new': b'%d' % phases.public,
1447 b'new': b'%d' % phases.public,
1448 },
1448 },
1449 ).result()
1449 ).result()
1450
1450
1451 if not r:
1451 if not r:
1452 pushop.ui.warn(
1452 pushop.ui.warn(
1453 _(b'updating %s to public failed!\n') % newremotehead
1453 _(b'updating %s to public failed!\n') % newremotehead
1454 )
1454 )
1455
1455
1456
1456
1457 def _localphasemove(pushop, nodes, phase=phases.public):
1457 def _localphasemove(pushop, nodes, phase=phases.public):
1458 """move <nodes> to <phase> in the local source repo"""
1458 """move <nodes> to <phase> in the local source repo"""
1459 if pushop.trmanager:
1459 if pushop.trmanager:
1460 phases.advanceboundary(
1460 phases.advanceboundary(
1461 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1461 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1462 )
1462 )
1463 else:
1463 else:
1464 # repo is not locked, do not change any phases!
1464 # repo is not locked, do not change any phases!
1465 # Informs the user that phases should have been moved when
1465 # Informs the user that phases should have been moved when
1466 # applicable.
1466 # applicable.
1467 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1467 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1468 phasestr = phases.phasenames[phase]
1468 phasestr = phases.phasenames[phase]
1469 if actualmoves:
1469 if actualmoves:
1470 pushop.ui.status(
1470 pushop.ui.status(
1471 _(
1471 _(
1472 b'cannot lock source repo, skipping '
1472 b'cannot lock source repo, skipping '
1473 b'local %s phase update\n'
1473 b'local %s phase update\n'
1474 )
1474 )
1475 % phasestr
1475 % phasestr
1476 )
1476 )
1477
1477
1478
1478
1479 def _pushobsolete(pushop):
1479 def _pushobsolete(pushop):
1480 """utility function to push obsolete markers to a remote"""
1480 """utility function to push obsolete markers to a remote"""
1481 if b'obsmarkers' in pushop.stepsdone:
1481 if b'obsmarkers' in pushop.stepsdone:
1482 return
1482 return
1483 repo = pushop.repo
1483 repo = pushop.repo
1484 remote = pushop.remote
1484 remote = pushop.remote
1485 pushop.stepsdone.add(b'obsmarkers')
1485 pushop.stepsdone.add(b'obsmarkers')
1486 if pushop.outobsmarkers:
1486 if pushop.outobsmarkers:
1487 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1487 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1488 rslts = []
1488 rslts = []
1489 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1489 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1490 remotedata = obsolete._pushkeyescape(markers)
1490 remotedata = obsolete._pushkeyescape(markers)
1491 for key in sorted(remotedata, reverse=True):
1491 for key in sorted(remotedata, reverse=True):
1492 # reverse sort to ensure we end with dump0
1492 # reverse sort to ensure we end with dump0
1493 data = remotedata[key]
1493 data = remotedata[key]
1494 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1494 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1495 if [r for r in rslts if not r]:
1495 if [r for r in rslts if not r]:
1496 msg = _(b'failed to push some obsolete markers!\n')
1496 msg = _(b'failed to push some obsolete markers!\n')
1497 repo.ui.warn(msg)
1497 repo.ui.warn(msg)
1498
1498
1499
1499
1500 def _pushbookmark(pushop):
1500 def _pushbookmark(pushop):
1501 """Update bookmark position on remote"""
1501 """Update bookmark position on remote"""
1502 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1502 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1503 return
1503 return
1504 pushop.stepsdone.add(b'bookmarks')
1504 pushop.stepsdone.add(b'bookmarks')
1505 ui = pushop.ui
1505 ui = pushop.ui
1506 remote = pushop.remote
1506 remote = pushop.remote
1507
1507
1508 for b, old, new in pushop.outbookmarks:
1508 for b, old, new in pushop.outbookmarks:
1509 action = b'update'
1509 action = b'update'
1510 if not old:
1510 if not old:
1511 action = b'export'
1511 action = b'export'
1512 elif not new:
1512 elif not new:
1513 action = b'delete'
1513 action = b'delete'
1514
1514
1515 with remote.commandexecutor() as e:
1515 with remote.commandexecutor() as e:
1516 r = e.callcommand(
1516 r = e.callcommand(
1517 b'pushkey',
1517 b'pushkey',
1518 {
1518 {
1519 b'namespace': b'bookmarks',
1519 b'namespace': b'bookmarks',
1520 b'key': b,
1520 b'key': b,
1521 b'old': hex(old),
1521 b'old': hex(old),
1522 b'new': hex(new),
1522 b'new': hex(new),
1523 },
1523 },
1524 ).result()
1524 ).result()
1525
1525
1526 if r:
1526 if r:
1527 ui.status(bookmsgmap[action][0] % b)
1527 ui.status(bookmsgmap[action][0] % b)
1528 else:
1528 else:
1529 ui.warn(bookmsgmap[action][1] % b)
1529 ui.warn(bookmsgmap[action][1] % b)
1530 # discovery can have set the value form invalid entry
1530 # discovery can have set the value form invalid entry
1531 if pushop.bkresult is not None:
1531 if pushop.bkresult is not None:
1532 pushop.bkresult = 1
1532 pushop.bkresult = 1
1533
1533
1534
1534
1535 class pulloperation(object):
1535 class pulloperation(object):
1536 """A object that represent a single pull operation
1536 """A object that represent a single pull operation
1537
1537
1538 It purpose is to carry pull related state and very common operation.
1538 It purpose is to carry pull related state and very common operation.
1539
1539
1540 A new should be created at the beginning of each pull and discarded
1540 A new should be created at the beginning of each pull and discarded
1541 afterward.
1541 afterward.
1542 """
1542 """
1543
1543
1544 def __init__(
1544 def __init__(
1545 self,
1545 self,
1546 repo,
1546 repo,
1547 remote,
1547 remote,
1548 heads=None,
1548 heads=None,
1549 force=False,
1549 force=False,
1550 bookmarks=(),
1550 bookmarks=(),
1551 remotebookmarks=None,
1551 remotebookmarks=None,
1552 streamclonerequested=None,
1552 streamclonerequested=None,
1553 includepats=None,
1553 includepats=None,
1554 excludepats=None,
1554 excludepats=None,
1555 depth=None,
1555 depth=None,
1556 ):
1556 ):
1557 # repo we pull into
1557 # repo we pull into
1558 self.repo = repo
1558 self.repo = repo
1559 # repo we pull from
1559 # repo we pull from
1560 self.remote = remote
1560 self.remote = remote
1561 # revision we try to pull (None is "all")
1561 # revision we try to pull (None is "all")
1562 self.heads = heads
1562 self.heads = heads
1563 # bookmark pulled explicitly
1563 # bookmark pulled explicitly
1564 self.explicitbookmarks = [
1564 self.explicitbookmarks = [
1565 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1565 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1566 ]
1566 ]
1567 # do we force pull?
1567 # do we force pull?
1568 self.force = force
1568 self.force = force
1569 # whether a streaming clone was requested
1569 # whether a streaming clone was requested
1570 self.streamclonerequested = streamclonerequested
1570 self.streamclonerequested = streamclonerequested
1571 # transaction manager
1571 # transaction manager
1572 self.trmanager = None
1572 self.trmanager = None
1573 # set of common changeset between local and remote before pull
1573 # set of common changeset between local and remote before pull
1574 self.common = None
1574 self.common = None
1575 # set of pulled head
1575 # set of pulled head
1576 self.rheads = None
1576 self.rheads = None
1577 # list of missing changeset to fetch remotely
1577 # list of missing changeset to fetch remotely
1578 self.fetch = None
1578 self.fetch = None
1579 # remote bookmarks data
1579 # remote bookmarks data
1580 self.remotebookmarks = remotebookmarks
1580 self.remotebookmarks = remotebookmarks
1581 # result of changegroup pulling (used as return code by pull)
1581 # result of changegroup pulling (used as return code by pull)
1582 self.cgresult = None
1582 self.cgresult = None
1583 # list of step already done
1583 # list of step already done
1584 self.stepsdone = set()
1584 self.stepsdone = set()
1585 # Whether we attempted a clone from pre-generated bundles.
1585 # Whether we attempted a clone from pre-generated bundles.
1586 self.clonebundleattempted = False
1586 self.clonebundleattempted = False
1587 # Set of file patterns to include.
1587 # Set of file patterns to include.
1588 self.includepats = includepats
1588 self.includepats = includepats
1589 # Set of file patterns to exclude.
1589 # Set of file patterns to exclude.
1590 self.excludepats = excludepats
1590 self.excludepats = excludepats
1591 # Number of ancestor changesets to pull from each pulled head.
1591 # Number of ancestor changesets to pull from each pulled head.
1592 self.depth = depth
1592 self.depth = depth
1593
1593
1594 @util.propertycache
1594 @util.propertycache
1595 def pulledsubset(self):
1595 def pulledsubset(self):
1596 """heads of the set of changeset target by the pull"""
1596 """heads of the set of changeset target by the pull"""
1597 # compute target subset
1597 # compute target subset
1598 if self.heads is None:
1598 if self.heads is None:
1599 # We pulled every thing possible
1599 # We pulled every thing possible
1600 # sync on everything common
1600 # sync on everything common
1601 c = set(self.common)
1601 c = set(self.common)
1602 ret = list(self.common)
1602 ret = list(self.common)
1603 for n in self.rheads:
1603 for n in self.rheads:
1604 if n not in c:
1604 if n not in c:
1605 ret.append(n)
1605 ret.append(n)
1606 return ret
1606 return ret
1607 else:
1607 else:
1608 # We pulled a specific subset
1608 # We pulled a specific subset
1609 # sync on this subset
1609 # sync on this subset
1610 return self.heads
1610 return self.heads
1611
1611
1612 @util.propertycache
1612 @util.propertycache
1613 def canusebundle2(self):
1613 def canusebundle2(self):
1614 return not _forcebundle1(self)
1614 return not _forcebundle1(self)
1615
1615
1616 @util.propertycache
1616 @util.propertycache
1617 def remotebundle2caps(self):
1617 def remotebundle2caps(self):
1618 return bundle2.bundle2caps(self.remote)
1618 return bundle2.bundle2caps(self.remote)
1619
1619
1620 def gettransaction(self):
1620 def gettransaction(self):
1621 # deprecated; talk to trmanager directly
1621 # deprecated; talk to trmanager directly
1622 return self.trmanager.transaction()
1622 return self.trmanager.transaction()
1623
1623
1624
1624
1625 class transactionmanager(util.transactional):
1625 class transactionmanager(util.transactional):
1626 """An object to manage the life cycle of a transaction
1626 """An object to manage the life cycle of a transaction
1627
1627
1628 It creates the transaction on demand and calls the appropriate hooks when
1628 It creates the transaction on demand and calls the appropriate hooks when
1629 closing the transaction."""
1629 closing the transaction."""
1630
1630
1631 def __init__(self, repo, source, url):
1631 def __init__(self, repo, source, url):
1632 self.repo = repo
1632 self.repo = repo
1633 self.source = source
1633 self.source = source
1634 self.url = url
1634 self.url = url
1635 self._tr = None
1635 self._tr = None
1636
1636
1637 def transaction(self):
1637 def transaction(self):
1638 """Return an open transaction object, constructing if necessary"""
1638 """Return an open transaction object, constructing if necessary"""
1639 if not self._tr:
1639 if not self._tr:
1640 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1640 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1641 self._tr = self.repo.transaction(trname)
1641 self._tr = self.repo.transaction(trname)
1642 self._tr.hookargs[b'source'] = self.source
1642 self._tr.hookargs[b'source'] = self.source
1643 self._tr.hookargs[b'url'] = self.url
1643 self._tr.hookargs[b'url'] = self.url
1644 return self._tr
1644 return self._tr
1645
1645
1646 def close(self):
1646 def close(self):
1647 """close transaction if created"""
1647 """close transaction if created"""
1648 if self._tr is not None:
1648 if self._tr is not None:
1649 self._tr.close()
1649 self._tr.close()
1650
1650
1651 def release(self):
1651 def release(self):
1652 """release transaction if created"""
1652 """release transaction if created"""
1653 if self._tr is not None:
1653 if self._tr is not None:
1654 self._tr.release()
1654 self._tr.release()
1655
1655
1656
1656
1657 def listkeys(remote, namespace):
1657 def listkeys(remote, namespace):
1658 with remote.commandexecutor() as e:
1658 with remote.commandexecutor() as e:
1659 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1659 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1660
1660
1661
1661
1662 def _fullpullbundle2(repo, pullop):
1662 def _fullpullbundle2(repo, pullop):
1663 # The server may send a partial reply, i.e. when inlining
1663 # The server may send a partial reply, i.e. when inlining
1664 # pre-computed bundles. In that case, update the common
1664 # pre-computed bundles. In that case, update the common
1665 # set based on the results and pull another bundle.
1665 # set based on the results and pull another bundle.
1666 #
1666 #
1667 # There are two indicators that the process is finished:
1667 # There are two indicators that the process is finished:
1668 # - no changeset has been added, or
1668 # - no changeset has been added, or
1669 # - all remote heads are known locally.
1669 # - all remote heads are known locally.
1670 # The head check must use the unfiltered view as obsoletion
1670 # The head check must use the unfiltered view as obsoletion
1671 # markers can hide heads.
1671 # markers can hide heads.
1672 unfi = repo.unfiltered()
1672 unfi = repo.unfiltered()
1673 unficl = unfi.changelog
1673 unficl = unfi.changelog
1674
1674
1675 def headsofdiff(h1, h2):
1675 def headsofdiff(h1, h2):
1676 """Returns heads(h1 % h2)"""
1676 """Returns heads(h1 % h2)"""
1677 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1677 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1678 return set(ctx.node() for ctx in res)
1678 return set(ctx.node() for ctx in res)
1679
1679
1680 def headsofunion(h1, h2):
1680 def headsofunion(h1, h2):
1681 """Returns heads((h1 + h2) - null)"""
1681 """Returns heads((h1 + h2) - null)"""
1682 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1682 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1683 return set(ctx.node() for ctx in res)
1683 return set(ctx.node() for ctx in res)
1684
1684
1685 while True:
1685 while True:
1686 old_heads = unficl.heads()
1686 old_heads = unficl.heads()
1687 clstart = len(unficl)
1687 clstart = len(unficl)
1688 _pullbundle2(pullop)
1688 _pullbundle2(pullop)
1689 if repository.NARROW_REQUIREMENT in repo.requirements:
1689 if repository.NARROW_REQUIREMENT in repo.requirements:
1690 # XXX narrow clones filter the heads on the server side during
1690 # XXX narrow clones filter the heads on the server side during
1691 # XXX getbundle and result in partial replies as well.
1691 # XXX getbundle and result in partial replies as well.
1692 # XXX Disable pull bundles in this case as band aid to avoid
1692 # XXX Disable pull bundles in this case as band aid to avoid
1693 # XXX extra round trips.
1693 # XXX extra round trips.
1694 break
1694 break
1695 if clstart == len(unficl):
1695 if clstart == len(unficl):
1696 break
1696 break
1697 if all(unficl.hasnode(n) for n in pullop.rheads):
1697 if all(unficl.hasnode(n) for n in pullop.rheads):
1698 break
1698 break
1699 new_heads = headsofdiff(unficl.heads(), old_heads)
1699 new_heads = headsofdiff(unficl.heads(), old_heads)
1700 pullop.common = headsofunion(new_heads, pullop.common)
1700 pullop.common = headsofunion(new_heads, pullop.common)
1701 pullop.rheads = set(pullop.rheads) - pullop.common
1701 pullop.rheads = set(pullop.rheads) - pullop.common
1702
1702
1703
1703
1704 def pull(
1704 def pull(
1705 repo,
1705 repo,
1706 remote,
1706 remote,
1707 heads=None,
1707 heads=None,
1708 force=False,
1708 force=False,
1709 bookmarks=(),
1709 bookmarks=(),
1710 opargs=None,
1710 opargs=None,
1711 streamclonerequested=None,
1711 streamclonerequested=None,
1712 includepats=None,
1712 includepats=None,
1713 excludepats=None,
1713 excludepats=None,
1714 depth=None,
1714 depth=None,
1715 ):
1715 ):
1716 """Fetch repository data from a remote.
1716 """Fetch repository data from a remote.
1717
1717
1718 This is the main function used to retrieve data from a remote repository.
1718 This is the main function used to retrieve data from a remote repository.
1719
1719
1720 ``repo`` is the local repository to clone into.
1720 ``repo`` is the local repository to clone into.
1721 ``remote`` is a peer instance.
1721 ``remote`` is a peer instance.
1722 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1722 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1723 default) means to pull everything from the remote.
1723 default) means to pull everything from the remote.
1724 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1724 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1725 default, all remote bookmarks are pulled.
1725 default, all remote bookmarks are pulled.
1726 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1726 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1727 initialization.
1727 initialization.
1728 ``streamclonerequested`` is a boolean indicating whether a "streaming
1728 ``streamclonerequested`` is a boolean indicating whether a "streaming
1729 clone" is requested. A "streaming clone" is essentially a raw file copy
1729 clone" is requested. A "streaming clone" is essentially a raw file copy
1730 of revlogs from the server. This only works when the local repository is
1730 of revlogs from the server. This only works when the local repository is
1731 empty. The default value of ``None`` means to respect the server
1731 empty. The default value of ``None`` means to respect the server
1732 configuration for preferring stream clones.
1732 configuration for preferring stream clones.
1733 ``includepats`` and ``excludepats`` define explicit file patterns to
1733 ``includepats`` and ``excludepats`` define explicit file patterns to
1734 include and exclude in storage, respectively. If not defined, narrow
1734 include and exclude in storage, respectively. If not defined, narrow
1735 patterns from the repo instance are used, if available.
1735 patterns from the repo instance are used, if available.
1736 ``depth`` is an integer indicating the DAG depth of history we're
1736 ``depth`` is an integer indicating the DAG depth of history we're
1737 interested in. If defined, for each revision specified in ``heads``, we
1737 interested in. If defined, for each revision specified in ``heads``, we
1738 will fetch up to this many of its ancestors and data associated with them.
1738 will fetch up to this many of its ancestors and data associated with them.
1739
1739
1740 Returns the ``pulloperation`` created for this pull.
1740 Returns the ``pulloperation`` created for this pull.
1741 """
1741 """
1742 if opargs is None:
1742 if opargs is None:
1743 opargs = {}
1743 opargs = {}
1744
1744
1745 # We allow the narrow patterns to be passed in explicitly to provide more
1745 # We allow the narrow patterns to be passed in explicitly to provide more
1746 # flexibility for API consumers.
1746 # flexibility for API consumers.
1747 if includepats or excludepats:
1747 if includepats or excludepats:
1748 includepats = includepats or set()
1748 includepats = includepats or set()
1749 excludepats = excludepats or set()
1749 excludepats = excludepats or set()
1750 else:
1750 else:
1751 includepats, excludepats = repo.narrowpats
1751 includepats, excludepats = repo.narrowpats
1752
1752
1753 narrowspec.validatepatterns(includepats)
1753 narrowspec.validatepatterns(includepats)
1754 narrowspec.validatepatterns(excludepats)
1754 narrowspec.validatepatterns(excludepats)
1755
1755
1756 pullop = pulloperation(
1756 pullop = pulloperation(
1757 repo,
1757 repo,
1758 remote,
1758 remote,
1759 heads,
1759 heads,
1760 force,
1760 force,
1761 bookmarks=bookmarks,
1761 bookmarks=bookmarks,
1762 streamclonerequested=streamclonerequested,
1762 streamclonerequested=streamclonerequested,
1763 includepats=includepats,
1763 includepats=includepats,
1764 excludepats=excludepats,
1764 excludepats=excludepats,
1765 depth=depth,
1765 depth=depth,
1766 **pycompat.strkwargs(opargs)
1766 **pycompat.strkwargs(opargs)
1767 )
1767 )
1768
1768
1769 peerlocal = pullop.remote.local()
1769 peerlocal = pullop.remote.local()
1770 if peerlocal:
1770 if peerlocal:
1771 missing = set(peerlocal.requirements) - pullop.repo.supported
1771 missing = set(peerlocal.requirements) - pullop.repo.supported
1772 if missing:
1772 if missing:
1773 msg = _(
1773 msg = _(
1774 b"required features are not"
1774 b"required features are not"
1775 b" supported in the destination:"
1775 b" supported in the destination:"
1776 b" %s"
1776 b" %s"
1777 ) % (b', '.join(sorted(missing)))
1777 ) % (b', '.join(sorted(missing)))
1778 raise error.Abort(msg)
1778 raise error.Abort(msg)
1779
1779
1780 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1780 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1781 wlock = util.nullcontextmanager()
1781 wlock = util.nullcontextmanager()
1782 if not bookmod.bookmarksinstore(repo):
1782 if not bookmod.bookmarksinstore(repo):
1783 wlock = repo.wlock()
1783 wlock = repo.wlock()
1784 with wlock, repo.lock(), pullop.trmanager:
1784 with wlock, repo.lock(), pullop.trmanager:
1785 # Use the modern wire protocol, if available.
1785 # Use the modern wire protocol, if available.
1786 if remote.capable(b'command-changesetdata'):
1786 if remote.capable(b'command-changesetdata'):
1787 exchangev2.pull(pullop)
1787 exchangev2.pull(pullop)
1788 else:
1788 else:
1789 # This should ideally be in _pullbundle2(). However, it needs to run
1789 # This should ideally be in _pullbundle2(). However, it needs to run
1790 # before discovery to avoid extra work.
1790 # before discovery to avoid extra work.
1791 _maybeapplyclonebundle(pullop)
1791 _maybeapplyclonebundle(pullop)
1792 streamclone.maybeperformlegacystreamclone(pullop)
1792 streamclone.maybeperformlegacystreamclone(pullop)
1793 _pulldiscovery(pullop)
1793 _pulldiscovery(pullop)
1794 if pullop.canusebundle2:
1794 if pullop.canusebundle2:
1795 _fullpullbundle2(repo, pullop)
1795 _fullpullbundle2(repo, pullop)
1796 _pullchangeset(pullop)
1796 _pullchangeset(pullop)
1797 _pullphase(pullop)
1797 _pullphase(pullop)
1798 _pullbookmarks(pullop)
1798 _pullbookmarks(pullop)
1799 _pullobsolete(pullop)
1799 _pullobsolete(pullop)
1800
1800
1801 # storing remotenames
1801 # storing remotenames
1802 if repo.ui.configbool(b'experimental', b'remotenames'):
1802 if repo.ui.configbool(b'experimental', b'remotenames'):
1803 logexchange.pullremotenames(repo, remote)
1803 logexchange.pullremotenames(repo, remote)
1804
1804
1805 return pullop
1805 return pullop
1806
1806
1807
1807
1808 # list of steps to perform discovery before pull
1808 # list of steps to perform discovery before pull
1809 pulldiscoveryorder = []
1809 pulldiscoveryorder = []
1810
1810
1811 # Mapping between step name and function
1811 # Mapping between step name and function
1812 #
1812 #
1813 # This exists to help extensions wrap steps if necessary
1813 # This exists to help extensions wrap steps if necessary
1814 pulldiscoverymapping = {}
1814 pulldiscoverymapping = {}
1815
1815
1816
1816
1817 def pulldiscovery(stepname):
1817 def pulldiscovery(stepname):
1818 """decorator for function performing discovery before pull
1818 """decorator for function performing discovery before pull
1819
1819
1820 The function is added to the step -> function mapping and appended to the
1820 The function is added to the step -> function mapping and appended to the
1821 list of steps. Beware that decorated function will be added in order (this
1821 list of steps. Beware that decorated function will be added in order (this
1822 may matter).
1822 may matter).
1823
1823
1824 You can only use this decorator for a new step, if you want to wrap a step
1824 You can only use this decorator for a new step, if you want to wrap a step
1825 from an extension, change the pulldiscovery dictionary directly."""
1825 from an extension, change the pulldiscovery dictionary directly."""
1826
1826
1827 def dec(func):
1827 def dec(func):
1828 assert stepname not in pulldiscoverymapping
1828 assert stepname not in pulldiscoverymapping
1829 pulldiscoverymapping[stepname] = func
1829 pulldiscoverymapping[stepname] = func
1830 pulldiscoveryorder.append(stepname)
1830 pulldiscoveryorder.append(stepname)
1831 return func
1831 return func
1832
1832
1833 return dec
1833 return dec
1834
1834
1835
1835
1836 def _pulldiscovery(pullop):
1836 def _pulldiscovery(pullop):
1837 """Run all discovery steps"""
1837 """Run all discovery steps"""
1838 for stepname in pulldiscoveryorder:
1838 for stepname in pulldiscoveryorder:
1839 step = pulldiscoverymapping[stepname]
1839 step = pulldiscoverymapping[stepname]
1840 step(pullop)
1840 step(pullop)
1841
1841
1842
1842
1843 @pulldiscovery(b'b1:bookmarks')
1843 @pulldiscovery(b'b1:bookmarks')
1844 def _pullbookmarkbundle1(pullop):
1844 def _pullbookmarkbundle1(pullop):
1845 """fetch bookmark data in bundle1 case
1845 """fetch bookmark data in bundle1 case
1846
1846
1847 If not using bundle2, we have to fetch bookmarks before changeset
1847 If not using bundle2, we have to fetch bookmarks before changeset
1848 discovery to reduce the chance and impact of race conditions."""
1848 discovery to reduce the chance and impact of race conditions."""
1849 if pullop.remotebookmarks is not None:
1849 if pullop.remotebookmarks is not None:
1850 return
1850 return
1851 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1851 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1852 # all known bundle2 servers now support listkeys, but lets be nice with
1852 # all known bundle2 servers now support listkeys, but lets be nice with
1853 # new implementation.
1853 # new implementation.
1854 return
1854 return
1855 books = listkeys(pullop.remote, b'bookmarks')
1855 books = listkeys(pullop.remote, b'bookmarks')
1856 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1856 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1857
1857
1858
1858
1859 @pulldiscovery(b'changegroup')
1859 @pulldiscovery(b'changegroup')
1860 def _pulldiscoverychangegroup(pullop):
1860 def _pulldiscoverychangegroup(pullop):
1861 """discovery phase for the pull
1861 """discovery phase for the pull
1862
1862
1863 Current handle changeset discovery only, will change handle all discovery
1863 Current handle changeset discovery only, will change handle all discovery
1864 at some point."""
1864 at some point."""
1865 tmp = discovery.findcommonincoming(
1865 tmp = discovery.findcommonincoming(
1866 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1866 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1867 )
1867 )
1868 common, fetch, rheads = tmp
1868 common, fetch, rheads = tmp
1869 has_node = pullop.repo.unfiltered().changelog.index.has_node
1869 has_node = pullop.repo.unfiltered().changelog.index.has_node
1870 if fetch and rheads:
1870 if fetch and rheads:
1871 # If a remote heads is filtered locally, put in back in common.
1871 # If a remote heads is filtered locally, put in back in common.
1872 #
1872 #
1873 # This is a hackish solution to catch most of "common but locally
1873 # This is a hackish solution to catch most of "common but locally
1874 # hidden situation". We do not performs discovery on unfiltered
1874 # hidden situation". We do not performs discovery on unfiltered
1875 # repository because it end up doing a pathological amount of round
1875 # repository because it end up doing a pathological amount of round
1876 # trip for w huge amount of changeset we do not care about.
1876 # trip for w huge amount of changeset we do not care about.
1877 #
1877 #
1878 # If a set of such "common but filtered" changeset exist on the server
1878 # If a set of such "common but filtered" changeset exist on the server
1879 # but are not including a remote heads, we'll not be able to detect it,
1879 # but are not including a remote heads, we'll not be able to detect it,
1880 scommon = set(common)
1880 scommon = set(common)
1881 for n in rheads:
1881 for n in rheads:
1882 if has_node(n):
1882 if has_node(n):
1883 if n not in scommon:
1883 if n not in scommon:
1884 common.append(n)
1884 common.append(n)
1885 if set(rheads).issubset(set(common)):
1885 if set(rheads).issubset(set(common)):
1886 fetch = []
1886 fetch = []
1887 pullop.common = common
1887 pullop.common = common
1888 pullop.fetch = fetch
1888 pullop.fetch = fetch
1889 pullop.rheads = rheads
1889 pullop.rheads = rheads
1890
1890
1891
1891
1892 def _pullbundle2(pullop):
1892 def _pullbundle2(pullop):
1893 """pull data using bundle2
1893 """pull data using bundle2
1894
1894
1895 For now, the only supported data are changegroup."""
1895 For now, the only supported data are changegroup."""
1896 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1896 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1897
1897
1898 # make ui easier to access
1898 # make ui easier to access
1899 ui = pullop.repo.ui
1899 ui = pullop.repo.ui
1900
1900
1901 # At the moment we don't do stream clones over bundle2. If that is
1901 # At the moment we don't do stream clones over bundle2. If that is
1902 # implemented then here's where the check for that will go.
1902 # implemented then here's where the check for that will go.
1903 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1903 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1904
1904
1905 # declare pull perimeters
1905 # declare pull perimeters
1906 kwargs[b'common'] = pullop.common
1906 kwargs[b'common'] = pullop.common
1907 kwargs[b'heads'] = pullop.heads or pullop.rheads
1907 kwargs[b'heads'] = pullop.heads or pullop.rheads
1908
1908
1909 # check server supports narrow and then adding includepats and excludepats
1909 # check server supports narrow and then adding includepats and excludepats
1910 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1910 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1911 if servernarrow and pullop.includepats:
1911 if servernarrow and pullop.includepats:
1912 kwargs[b'includepats'] = pullop.includepats
1912 kwargs[b'includepats'] = pullop.includepats
1913 if servernarrow and pullop.excludepats:
1913 if servernarrow and pullop.excludepats:
1914 kwargs[b'excludepats'] = pullop.excludepats
1914 kwargs[b'excludepats'] = pullop.excludepats
1915
1915
1916 if streaming:
1916 if streaming:
1917 kwargs[b'cg'] = False
1917 kwargs[b'cg'] = False
1918 kwargs[b'stream'] = True
1918 kwargs[b'stream'] = True
1919 pullop.stepsdone.add(b'changegroup')
1919 pullop.stepsdone.add(b'changegroup')
1920 pullop.stepsdone.add(b'phases')
1920 pullop.stepsdone.add(b'phases')
1921
1921
1922 else:
1922 else:
1923 # pulling changegroup
1923 # pulling changegroup
1924 pullop.stepsdone.add(b'changegroup')
1924 pullop.stepsdone.add(b'changegroup')
1925
1925
1926 kwargs[b'cg'] = pullop.fetch
1926 kwargs[b'cg'] = pullop.fetch
1927
1927
1928 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1928 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1929 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1929 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1930 if not legacyphase and hasbinaryphase:
1930 if not legacyphase and hasbinaryphase:
1931 kwargs[b'phases'] = True
1931 kwargs[b'phases'] = True
1932 pullop.stepsdone.add(b'phases')
1932 pullop.stepsdone.add(b'phases')
1933
1933
1934 if b'listkeys' in pullop.remotebundle2caps:
1934 if b'listkeys' in pullop.remotebundle2caps:
1935 if b'phases' not in pullop.stepsdone:
1935 if b'phases' not in pullop.stepsdone:
1936 kwargs[b'listkeys'] = [b'phases']
1936 kwargs[b'listkeys'] = [b'phases']
1937
1937
1938 bookmarksrequested = False
1938 bookmarksrequested = False
1939 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1939 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1940 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1940 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1941
1941
1942 if pullop.remotebookmarks is not None:
1942 if pullop.remotebookmarks is not None:
1943 pullop.stepsdone.add(b'request-bookmarks')
1943 pullop.stepsdone.add(b'request-bookmarks')
1944
1944
1945 if (
1945 if (
1946 b'request-bookmarks' not in pullop.stepsdone
1946 b'request-bookmarks' not in pullop.stepsdone
1947 and pullop.remotebookmarks is None
1947 and pullop.remotebookmarks is None
1948 and not legacybookmark
1948 and not legacybookmark
1949 and hasbinarybook
1949 and hasbinarybook
1950 ):
1950 ):
1951 kwargs[b'bookmarks'] = True
1951 kwargs[b'bookmarks'] = True
1952 bookmarksrequested = True
1952 bookmarksrequested = True
1953
1953
1954 if b'listkeys' in pullop.remotebundle2caps:
1954 if b'listkeys' in pullop.remotebundle2caps:
1955 if b'request-bookmarks' not in pullop.stepsdone:
1955 if b'request-bookmarks' not in pullop.stepsdone:
1956 # make sure to always includes bookmark data when migrating
1956 # make sure to always includes bookmark data when migrating
1957 # `hg incoming --bundle` to using this function.
1957 # `hg incoming --bundle` to using this function.
1958 pullop.stepsdone.add(b'request-bookmarks')
1958 pullop.stepsdone.add(b'request-bookmarks')
1959 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1959 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1960
1960
1961 # If this is a full pull / clone and the server supports the clone bundles
1961 # If this is a full pull / clone and the server supports the clone bundles
1962 # feature, tell the server whether we attempted a clone bundle. The
1962 # feature, tell the server whether we attempted a clone bundle. The
1963 # presence of this flag indicates the client supports clone bundles. This
1963 # presence of this flag indicates the client supports clone bundles. This
1964 # will enable the server to treat clients that support clone bundles
1964 # will enable the server to treat clients that support clone bundles
1965 # differently from those that don't.
1965 # differently from those that don't.
1966 if (
1966 if (
1967 pullop.remote.capable(b'clonebundles')
1967 pullop.remote.capable(b'clonebundles')
1968 and pullop.heads is None
1968 and pullop.heads is None
1969 and list(pullop.common) == [nullid]
1969 and list(pullop.common) == [nullid]
1970 ):
1970 ):
1971 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1971 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1972
1972
1973 if streaming:
1973 if streaming:
1974 pullop.repo.ui.status(_(b'streaming all changes\n'))
1974 pullop.repo.ui.status(_(b'streaming all changes\n'))
1975 elif not pullop.fetch:
1975 elif not pullop.fetch:
1976 pullop.repo.ui.status(_(b"no changes found\n"))
1976 pullop.repo.ui.status(_(b"no changes found\n"))
1977 pullop.cgresult = 0
1977 pullop.cgresult = 0
1978 else:
1978 else:
1979 if pullop.heads is None and list(pullop.common) == [nullid]:
1979 if pullop.heads is None and list(pullop.common) == [nullid]:
1980 pullop.repo.ui.status(_(b"requesting all changes\n"))
1980 pullop.repo.ui.status(_(b"requesting all changes\n"))
1981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1982 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1982 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1983 if obsolete.commonversion(remoteversions) is not None:
1983 if obsolete.commonversion(remoteversions) is not None:
1984 kwargs[b'obsmarkers'] = True
1984 kwargs[b'obsmarkers'] = True
1985 pullop.stepsdone.add(b'obsmarkers')
1985 pullop.stepsdone.add(b'obsmarkers')
1986 _pullbundle2extraprepare(pullop, kwargs)
1986 _pullbundle2extraprepare(pullop, kwargs)
1987
1987
1988 with pullop.remote.commandexecutor() as e:
1988 with pullop.remote.commandexecutor() as e:
1989 args = dict(kwargs)
1989 args = dict(kwargs)
1990 args[b'source'] = b'pull'
1990 args[b'source'] = b'pull'
1991 bundle = e.callcommand(b'getbundle', args).result()
1991 bundle = e.callcommand(b'getbundle', args).result()
1992
1992
1993 try:
1993 try:
1994 op = bundle2.bundleoperation(
1994 op = bundle2.bundleoperation(
1995 pullop.repo, pullop.gettransaction, source=b'pull'
1995 pullop.repo, pullop.gettransaction, source=b'pull'
1996 )
1996 )
1997 op.modes[b'bookmarks'] = b'records'
1997 op.modes[b'bookmarks'] = b'records'
1998 bundle2.processbundle(pullop.repo, bundle, op=op)
1998 bundle2.processbundle(pullop.repo, bundle, op=op)
1999 except bundle2.AbortFromPart as exc:
1999 except bundle2.AbortFromPart as exc:
2000 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2000 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2001 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2001 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2002 except error.BundleValueError as exc:
2002 except error.BundleValueError as exc:
2003 raise error.Abort(_(b'missing support for %s') % exc)
2003 raise error.Abort(_(b'missing support for %s') % exc)
2004
2004
2005 if pullop.fetch:
2005 if pullop.fetch:
2006 pullop.cgresult = bundle2.combinechangegroupresults(op)
2006 pullop.cgresult = bundle2.combinechangegroupresults(op)
2007
2007
2008 # processing phases change
2008 # processing phases change
2009 for namespace, value in op.records[b'listkeys']:
2009 for namespace, value in op.records[b'listkeys']:
2010 if namespace == b'phases':
2010 if namespace == b'phases':
2011 _pullapplyphases(pullop, value)
2011 _pullapplyphases(pullop, value)
2012
2012
2013 # processing bookmark update
2013 # processing bookmark update
2014 if bookmarksrequested:
2014 if bookmarksrequested:
2015 books = {}
2015 books = {}
2016 for record in op.records[b'bookmarks']:
2016 for record in op.records[b'bookmarks']:
2017 books[record[b'bookmark']] = record[b"node"]
2017 books[record[b'bookmark']] = record[b"node"]
2018 pullop.remotebookmarks = books
2018 pullop.remotebookmarks = books
2019 else:
2019 else:
2020 for namespace, value in op.records[b'listkeys']:
2020 for namespace, value in op.records[b'listkeys']:
2021 if namespace == b'bookmarks':
2021 if namespace == b'bookmarks':
2022 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2022 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2023
2023
2024 # bookmark data were either already there or pulled in the bundle
2024 # bookmark data were either already there or pulled in the bundle
2025 if pullop.remotebookmarks is not None:
2025 if pullop.remotebookmarks is not None:
2026 _pullbookmarks(pullop)
2026 _pullbookmarks(pullop)
2027
2027
2028
2028
2029 def _pullbundle2extraprepare(pullop, kwargs):
2029 def _pullbundle2extraprepare(pullop, kwargs):
2030 """hook function so that extensions can extend the getbundle call"""
2030 """hook function so that extensions can extend the getbundle call"""
2031
2031
2032
2032
2033 def _pullchangeset(pullop):
2033 def _pullchangeset(pullop):
2034 """pull changeset from unbundle into the local repo"""
2034 """pull changeset from unbundle into the local repo"""
2035 # We delay the open of the transaction as late as possible so we
2035 # We delay the open of the transaction as late as possible so we
2036 # don't open transaction for nothing or you break future useful
2036 # don't open transaction for nothing or you break future useful
2037 # rollback call
2037 # rollback call
2038 if b'changegroup' in pullop.stepsdone:
2038 if b'changegroup' in pullop.stepsdone:
2039 return
2039 return
2040 pullop.stepsdone.add(b'changegroup')
2040 pullop.stepsdone.add(b'changegroup')
2041 if not pullop.fetch:
2041 if not pullop.fetch:
2042 pullop.repo.ui.status(_(b"no changes found\n"))
2042 pullop.repo.ui.status(_(b"no changes found\n"))
2043 pullop.cgresult = 0
2043 pullop.cgresult = 0
2044 return
2044 return
2045 tr = pullop.gettransaction()
2045 tr = pullop.gettransaction()
2046 if pullop.heads is None and list(pullop.common) == [nullid]:
2046 if pullop.heads is None and list(pullop.common) == [nullid]:
2047 pullop.repo.ui.status(_(b"requesting all changes\n"))
2047 pullop.repo.ui.status(_(b"requesting all changes\n"))
2048 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2048 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2049 # issue1320, avoid a race if remote changed after discovery
2049 # issue1320, avoid a race if remote changed after discovery
2050 pullop.heads = pullop.rheads
2050 pullop.heads = pullop.rheads
2051
2051
2052 if pullop.remote.capable(b'getbundle'):
2052 if pullop.remote.capable(b'getbundle'):
2053 # TODO: get bundlecaps from remote
2053 # TODO: get bundlecaps from remote
2054 cg = pullop.remote.getbundle(
2054 cg = pullop.remote.getbundle(
2055 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2055 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2056 )
2056 )
2057 elif pullop.heads is None:
2057 elif pullop.heads is None:
2058 with pullop.remote.commandexecutor() as e:
2058 with pullop.remote.commandexecutor() as e:
2059 cg = e.callcommand(
2059 cg = e.callcommand(
2060 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2060 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2061 ).result()
2061 ).result()
2062
2062
2063 elif not pullop.remote.capable(b'changegroupsubset'):
2063 elif not pullop.remote.capable(b'changegroupsubset'):
2064 raise error.Abort(
2064 raise error.Abort(
2065 _(
2065 _(
2066 b"partial pull cannot be done because "
2066 b"partial pull cannot be done because "
2067 b"other repository doesn't support "
2067 b"other repository doesn't support "
2068 b"changegroupsubset."
2068 b"changegroupsubset."
2069 )
2069 )
2070 )
2070 )
2071 else:
2071 else:
2072 with pullop.remote.commandexecutor() as e:
2072 with pullop.remote.commandexecutor() as e:
2073 cg = e.callcommand(
2073 cg = e.callcommand(
2074 b'changegroupsubset',
2074 b'changegroupsubset',
2075 {
2075 {
2076 b'bases': pullop.fetch,
2076 b'bases': pullop.fetch,
2077 b'heads': pullop.heads,
2077 b'heads': pullop.heads,
2078 b'source': b'pull',
2078 b'source': b'pull',
2079 },
2079 },
2080 ).result()
2080 ).result()
2081
2081
2082 bundleop = bundle2.applybundle(
2082 bundleop = bundle2.applybundle(
2083 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2083 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2084 )
2084 )
2085 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2085 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2086
2086
2087
2087
2088 def _pullphase(pullop):
2088 def _pullphase(pullop):
2089 # Get remote phases data from remote
2089 # Get remote phases data from remote
2090 if b'phases' in pullop.stepsdone:
2090 if b'phases' in pullop.stepsdone:
2091 return
2091 return
2092 remotephases = listkeys(pullop.remote, b'phases')
2092 remotephases = listkeys(pullop.remote, b'phases')
2093 _pullapplyphases(pullop, remotephases)
2093 _pullapplyphases(pullop, remotephases)
2094
2094
2095
2095
2096 def _pullapplyphases(pullop, remotephases):
2096 def _pullapplyphases(pullop, remotephases):
2097 """apply phase movement from observed remote state"""
2097 """apply phase movement from observed remote state"""
2098 if b'phases' in pullop.stepsdone:
2098 if b'phases' in pullop.stepsdone:
2099 return
2099 return
2100 pullop.stepsdone.add(b'phases')
2100 pullop.stepsdone.add(b'phases')
2101 publishing = bool(remotephases.get(b'publishing', False))
2101 publishing = bool(remotephases.get(b'publishing', False))
2102 if remotephases and not publishing:
2102 if remotephases and not publishing:
2103 # remote is new and non-publishing
2103 # remote is new and non-publishing
2104 pheads, _dr = phases.analyzeremotephases(
2104 pheads, _dr = phases.analyzeremotephases(
2105 pullop.repo, pullop.pulledsubset, remotephases
2105 pullop.repo, pullop.pulledsubset, remotephases
2106 )
2106 )
2107 dheads = pullop.pulledsubset
2107 dheads = pullop.pulledsubset
2108 else:
2108 else:
2109 # Remote is old or publishing all common changesets
2109 # Remote is old or publishing all common changesets
2110 # should be seen as public
2110 # should be seen as public
2111 pheads = pullop.pulledsubset
2111 pheads = pullop.pulledsubset
2112 dheads = []
2112 dheads = []
2113 unfi = pullop.repo.unfiltered()
2113 unfi = pullop.repo.unfiltered()
2114 phase = unfi._phasecache.phase
2114 phase = unfi._phasecache.phase
2115 rev = unfi.changelog.index.get_rev
2115 rev = unfi.changelog.index.get_rev
2116 public = phases.public
2116 public = phases.public
2117 draft = phases.draft
2117 draft = phases.draft
2118
2118
2119 # exclude changesets already public locally and update the others
2119 # exclude changesets already public locally and update the others
2120 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2120 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2121 if pheads:
2121 if pheads:
2122 tr = pullop.gettransaction()
2122 tr = pullop.gettransaction()
2123 phases.advanceboundary(pullop.repo, tr, public, pheads)
2123 phases.advanceboundary(pullop.repo, tr, public, pheads)
2124
2124
2125 # exclude changesets already draft locally and update the others
2125 # exclude changesets already draft locally and update the others
2126 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2126 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2127 if dheads:
2127 if dheads:
2128 tr = pullop.gettransaction()
2128 tr = pullop.gettransaction()
2129 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2129 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2130
2130
2131
2131
2132 def _pullbookmarks(pullop):
2132 def _pullbookmarks(pullop):
2133 """process the remote bookmark information to update the local one"""
2133 """process the remote bookmark information to update the local one"""
2134 if b'bookmarks' in pullop.stepsdone:
2134 if b'bookmarks' in pullop.stepsdone:
2135 return
2135 return
2136 pullop.stepsdone.add(b'bookmarks')
2136 pullop.stepsdone.add(b'bookmarks')
2137 repo = pullop.repo
2137 repo = pullop.repo
2138 remotebookmarks = pullop.remotebookmarks
2138 remotebookmarks = pullop.remotebookmarks
2139 bookmod.updatefromremote(
2139 bookmod.updatefromremote(
2140 repo.ui,
2140 repo.ui,
2141 repo,
2141 repo,
2142 remotebookmarks,
2142 remotebookmarks,
2143 pullop.remote.url(),
2143 pullop.remote.url(),
2144 pullop.gettransaction,
2144 pullop.gettransaction,
2145 explicit=pullop.explicitbookmarks,
2145 explicit=pullop.explicitbookmarks,
2146 )
2146 )
2147
2147
2148
2148
2149 def _pullobsolete(pullop):
2149 def _pullobsolete(pullop):
2150 """utility function to pull obsolete markers from a remote
2150 """utility function to pull obsolete markers from a remote
2151
2151
2152 The `gettransaction` is function that return the pull transaction, creating
2152 The `gettransaction` is function that return the pull transaction, creating
2153 one if necessary. We return the transaction to inform the calling code that
2153 one if necessary. We return the transaction to inform the calling code that
2154 a new transaction have been created (when applicable).
2154 a new transaction have been created (when applicable).
2155
2155
2156 Exists mostly to allow overriding for experimentation purpose"""
2156 Exists mostly to allow overriding for experimentation purpose"""
2157 if b'obsmarkers' in pullop.stepsdone:
2157 if b'obsmarkers' in pullop.stepsdone:
2158 return
2158 return
2159 pullop.stepsdone.add(b'obsmarkers')
2159 pullop.stepsdone.add(b'obsmarkers')
2160 tr = None
2160 tr = None
2161 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2161 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2162 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2162 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2163 remoteobs = listkeys(pullop.remote, b'obsolete')
2163 remoteobs = listkeys(pullop.remote, b'obsolete')
2164 if b'dump0' in remoteobs:
2164 if b'dump0' in remoteobs:
2165 tr = pullop.gettransaction()
2165 tr = pullop.gettransaction()
2166 markers = []
2166 markers = []
2167 for key in sorted(remoteobs, reverse=True):
2167 for key in sorted(remoteobs, reverse=True):
2168 if key.startswith(b'dump'):
2168 if key.startswith(b'dump'):
2169 data = util.b85decode(remoteobs[key])
2169 data = util.b85decode(remoteobs[key])
2170 version, newmarks = obsolete._readmarkers(data)
2170 version, newmarks = obsolete._readmarkers(data)
2171 markers += newmarks
2171 markers += newmarks
2172 if markers:
2172 if markers:
2173 pullop.repo.obsstore.add(tr, markers)
2173 pullop.repo.obsstore.add(tr, markers)
2174 pullop.repo.invalidatevolatilesets()
2174 pullop.repo.invalidatevolatilesets()
2175 return tr
2175 return tr
2176
2176
2177
2177
2178 def applynarrowacl(repo, kwargs):
2178 def applynarrowacl(repo, kwargs):
2179 """Apply narrow fetch access control.
2179 """Apply narrow fetch access control.
2180
2180
2181 This massages the named arguments for getbundle wire protocol commands
2181 This massages the named arguments for getbundle wire protocol commands
2182 so requested data is filtered through access control rules.
2182 so requested data is filtered through access control rules.
2183 """
2183 """
2184 ui = repo.ui
2184 ui = repo.ui
2185 # TODO this assumes existence of HTTP and is a layering violation.
2185 # TODO this assumes existence of HTTP and is a layering violation.
2186 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2186 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2187 user_includes = ui.configlist(
2187 user_includes = ui.configlist(
2188 _NARROWACL_SECTION,
2188 _NARROWACL_SECTION,
2189 username + b'.includes',
2189 username + b'.includes',
2190 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2190 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2191 )
2191 )
2192 user_excludes = ui.configlist(
2192 user_excludes = ui.configlist(
2193 _NARROWACL_SECTION,
2193 _NARROWACL_SECTION,
2194 username + b'.excludes',
2194 username + b'.excludes',
2195 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2195 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2196 )
2196 )
2197 if not user_includes:
2197 if not user_includes:
2198 raise error.Abort(
2198 raise error.Abort(
2199 _(b"%s configuration for user %s is empty")
2199 _(b"%s configuration for user %s is empty")
2200 % (_NARROWACL_SECTION, username)
2200 % (_NARROWACL_SECTION, username)
2201 )
2201 )
2202
2202
2203 user_includes = [
2203 user_includes = [
2204 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2204 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2205 ]
2205 ]
2206 user_excludes = [
2206 user_excludes = [
2207 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2207 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2208 ]
2208 ]
2209
2209
2210 req_includes = set(kwargs.get('includepats', []))
2210 req_includes = set(kwargs.get('includepats', []))
2211 req_excludes = set(kwargs.get('excludepats', []))
2211 req_excludes = set(kwargs.get('excludepats', []))
2212
2212
2213 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2213 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2214 req_includes, req_excludes, user_includes, user_excludes
2214 req_includes, req_excludes, user_includes, user_excludes
2215 )
2215 )
2216
2216
2217 if invalid_includes:
2217 if invalid_includes:
2218 raise error.Abort(
2218 raise error.Abort(
2219 _(b"The following includes are not accessible for %s: %s")
2219 _(b"The following includes are not accessible for %s: %s")
2220 % (username, stringutil.pprint(invalid_includes))
2220 % (username, stringutil.pprint(invalid_includes))
2221 )
2221 )
2222
2222
2223 new_args = {}
2223 new_args = {}
2224 new_args.update(kwargs)
2224 new_args.update(kwargs)
2225 new_args['narrow'] = True
2225 new_args['narrow'] = True
2226 new_args['narrow_acl'] = True
2226 new_args['narrow_acl'] = True
2227 new_args['includepats'] = req_includes
2227 new_args['includepats'] = req_includes
2228 if req_excludes:
2228 if req_excludes:
2229 new_args['excludepats'] = req_excludes
2229 new_args['excludepats'] = req_excludes
2230
2230
2231 return new_args
2231 return new_args
2232
2232
2233
2233
2234 def _computeellipsis(repo, common, heads, known, match, depth=None):
2234 def _computeellipsis(repo, common, heads, known, match, depth=None):
2235 """Compute the shape of a narrowed DAG.
2235 """Compute the shape of a narrowed DAG.
2236
2236
2237 Args:
2237 Args:
2238 repo: The repository we're transferring.
2238 repo: The repository we're transferring.
2239 common: The roots of the DAG range we're transferring.
2239 common: The roots of the DAG range we're transferring.
2240 May be just [nullid], which means all ancestors of heads.
2240 May be just [nullid], which means all ancestors of heads.
2241 heads: The heads of the DAG range we're transferring.
2241 heads: The heads of the DAG range we're transferring.
2242 match: The narrowmatcher that allows us to identify relevant changes.
2242 match: The narrowmatcher that allows us to identify relevant changes.
2243 depth: If not None, only consider nodes to be full nodes if they are at
2243 depth: If not None, only consider nodes to be full nodes if they are at
2244 most depth changesets away from one of heads.
2244 most depth changesets away from one of heads.
2245
2245
2246 Returns:
2246 Returns:
2247 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2247 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2248
2248
2249 visitnodes: The list of nodes (either full or ellipsis) which
2249 visitnodes: The list of nodes (either full or ellipsis) which
2250 need to be sent to the client.
2250 need to be sent to the client.
2251 relevant_nodes: The set of changelog nodes which change a file inside
2251 relevant_nodes: The set of changelog nodes which change a file inside
2252 the narrowspec. The client needs these as non-ellipsis nodes.
2252 the narrowspec. The client needs these as non-ellipsis nodes.
2253 ellipsisroots: A dict of {rev: parents} that is used in
2253 ellipsisroots: A dict of {rev: parents} that is used in
2254 narrowchangegroup to produce ellipsis nodes with the
2254 narrowchangegroup to produce ellipsis nodes with the
2255 correct parents.
2255 correct parents.
2256 """
2256 """
2257 cl = repo.changelog
2257 cl = repo.changelog
2258 mfl = repo.manifestlog
2258 mfl = repo.manifestlog
2259
2259
2260 clrev = cl.rev
2260 clrev = cl.rev
2261
2261
2262 commonrevs = {clrev(n) for n in common} | {nullrev}
2262 commonrevs = {clrev(n) for n in common} | {nullrev}
2263 headsrevs = {clrev(n) for n in heads}
2263 headsrevs = {clrev(n) for n in heads}
2264
2264
2265 if depth:
2265 if depth:
2266 revdepth = {h: 0 for h in headsrevs}
2266 revdepth = {h: 0 for h in headsrevs}
2267
2267
2268 ellipsisheads = collections.defaultdict(set)
2268 ellipsisheads = collections.defaultdict(set)
2269 ellipsisroots = collections.defaultdict(set)
2269 ellipsisroots = collections.defaultdict(set)
2270
2270
2271 def addroot(head, curchange):
2271 def addroot(head, curchange):
2272 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2272 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2273 ellipsisroots[head].add(curchange)
2273 ellipsisroots[head].add(curchange)
2274 # Recursively split ellipsis heads with 3 roots by finding the
2274 # Recursively split ellipsis heads with 3 roots by finding the
2275 # roots' youngest common descendant which is an elided merge commit.
2275 # roots' youngest common descendant which is an elided merge commit.
2276 # That descendant takes 2 of the 3 roots as its own, and becomes a
2276 # That descendant takes 2 of the 3 roots as its own, and becomes a
2277 # root of the head.
2277 # root of the head.
2278 while len(ellipsisroots[head]) > 2:
2278 while len(ellipsisroots[head]) > 2:
2279 child, roots = splithead(head)
2279 child, roots = splithead(head)
2280 splitroots(head, child, roots)
2280 splitroots(head, child, roots)
2281 head = child # Recurse in case we just added a 3rd root
2281 head = child # Recurse in case we just added a 3rd root
2282
2282
2283 def splitroots(head, child, roots):
2283 def splitroots(head, child, roots):
2284 ellipsisroots[head].difference_update(roots)
2284 ellipsisroots[head].difference_update(roots)
2285 ellipsisroots[head].add(child)
2285 ellipsisroots[head].add(child)
2286 ellipsisroots[child].update(roots)
2286 ellipsisroots[child].update(roots)
2287 ellipsisroots[child].discard(child)
2287 ellipsisroots[child].discard(child)
2288
2288
2289 def splithead(head):
2289 def splithead(head):
2290 r1, r2, r3 = sorted(ellipsisroots[head])
2290 r1, r2, r3 = sorted(ellipsisroots[head])
2291 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2291 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2292 mid = repo.revs(
2292 mid = repo.revs(
2293 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2293 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2294 )
2294 )
2295 for j in mid:
2295 for j in mid:
2296 if j == nr2:
2296 if j == nr2:
2297 return nr2, (nr1, nr2)
2297 return nr2, (nr1, nr2)
2298 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2298 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2299 return j, (nr1, nr2)
2299 return j, (nr1, nr2)
2300 raise error.Abort(
2300 raise error.Abort(
2301 _(
2301 _(
2302 b'Failed to split up ellipsis node! head: %d, '
2302 b'Failed to split up ellipsis node! head: %d, '
2303 b'roots: %d %d %d'
2303 b'roots: %d %d %d'
2304 )
2304 )
2305 % (head, r1, r2, r3)
2305 % (head, r1, r2, r3)
2306 )
2306 )
2307
2307
2308 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2308 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2309 visit = reversed(missing)
2309 visit = reversed(missing)
2310 relevant_nodes = set()
2310 relevant_nodes = set()
2311 visitnodes = [cl.node(m) for m in missing]
2311 visitnodes = [cl.node(m) for m in missing]
2312 required = set(headsrevs) | known
2312 required = set(headsrevs) | known
2313 for rev in visit:
2313 for rev in visit:
2314 clrev = cl.changelogrevision(rev)
2314 clrev = cl.changelogrevision(rev)
2315 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2315 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2316 if depth is not None:
2316 if depth is not None:
2317 curdepth = revdepth[rev]
2317 curdepth = revdepth[rev]
2318 for p in ps:
2318 for p in ps:
2319 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2319 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2320 needed = False
2320 needed = False
2321 shallow_enough = depth is None or revdepth[rev] <= depth
2321 shallow_enough = depth is None or revdepth[rev] <= depth
2322 if shallow_enough:
2322 if shallow_enough:
2323 curmf = mfl[clrev.manifest].read()
2323 curmf = mfl[clrev.manifest].read()
2324 if ps:
2324 if ps:
2325 # We choose to not trust the changed files list in
2325 # We choose to not trust the changed files list in
2326 # changesets because it's not always correct. TODO: could
2326 # changesets because it's not always correct. TODO: could
2327 # we trust it for the non-merge case?
2327 # we trust it for the non-merge case?
2328 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2328 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2329 needed = bool(curmf.diff(p1mf, match))
2329 needed = bool(curmf.diff(p1mf, match))
2330 if not needed and len(ps) > 1:
2330 if not needed and len(ps) > 1:
2331 # For merge changes, the list of changed files is not
2331 # For merge changes, the list of changed files is not
2332 # helpful, since we need to emit the merge if a file
2332 # helpful, since we need to emit the merge if a file
2333 # in the narrow spec has changed on either side of the
2333 # in the narrow spec has changed on either side of the
2334 # merge. As a result, we do a manifest diff to check.
2334 # merge. As a result, we do a manifest diff to check.
2335 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2335 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2336 needed = bool(curmf.diff(p2mf, match))
2336 needed = bool(curmf.diff(p2mf, match))
2337 else:
2337 else:
2338 # For a root node, we need to include the node if any
2338 # For a root node, we need to include the node if any
2339 # files in the node match the narrowspec.
2339 # files in the node match the narrowspec.
2340 needed = any(curmf.walk(match))
2340 needed = any(curmf.walk(match))
2341
2341
2342 if needed:
2342 if needed:
2343 for head in ellipsisheads[rev]:
2343 for head in ellipsisheads[rev]:
2344 addroot(head, rev)
2344 addroot(head, rev)
2345 for p in ps:
2345 for p in ps:
2346 required.add(p)
2346 required.add(p)
2347 relevant_nodes.add(cl.node(rev))
2347 relevant_nodes.add(cl.node(rev))
2348 else:
2348 else:
2349 if not ps:
2349 if not ps:
2350 ps = [nullrev]
2350 ps = [nullrev]
2351 if rev in required:
2351 if rev in required:
2352 for head in ellipsisheads[rev]:
2352 for head in ellipsisheads[rev]:
2353 addroot(head, rev)
2353 addroot(head, rev)
2354 for p in ps:
2354 for p in ps:
2355 ellipsisheads[p].add(rev)
2355 ellipsisheads[p].add(rev)
2356 else:
2356 else:
2357 for p in ps:
2357 for p in ps:
2358 ellipsisheads[p] |= ellipsisheads[rev]
2358 ellipsisheads[p] |= ellipsisheads[rev]
2359
2359
2360 # add common changesets as roots of their reachable ellipsis heads
2360 # add common changesets as roots of their reachable ellipsis heads
2361 for c in commonrevs:
2361 for c in commonrevs:
2362 for head in ellipsisheads[c]:
2362 for head in ellipsisheads[c]:
2363 addroot(head, c)
2363 addroot(head, c)
2364 return visitnodes, relevant_nodes, ellipsisroots
2364 return visitnodes, relevant_nodes, ellipsisroots
2365
2365
2366
2366
2367 def caps20to10(repo, role):
2367 def caps20to10(repo, role):
2368 """return a set with appropriate options to use bundle20 during getbundle"""
2368 """return a set with appropriate options to use bundle20 during getbundle"""
2369 caps = {b'HG20'}
2369 caps = {b'HG20'}
2370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2371 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2371 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2372 return caps
2372 return caps
2373
2373
2374
2374
2375 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2375 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2376 getbundle2partsorder = []
2376 getbundle2partsorder = []
2377
2377
2378 # Mapping between step name and function
2378 # Mapping between step name and function
2379 #
2379 #
2380 # This exists to help extensions wrap steps if necessary
2380 # This exists to help extensions wrap steps if necessary
2381 getbundle2partsmapping = {}
2381 getbundle2partsmapping = {}
2382
2382
2383
2383
2384 def getbundle2partsgenerator(stepname, idx=None):
2384 def getbundle2partsgenerator(stepname, idx=None):
2385 """decorator for function generating bundle2 part for getbundle
2385 """decorator for function generating bundle2 part for getbundle
2386
2386
2387 The function is added to the step -> function mapping and appended to the
2387 The function is added to the step -> function mapping and appended to the
2388 list of steps. Beware that decorated functions will be added in order
2388 list of steps. Beware that decorated functions will be added in order
2389 (this may matter).
2389 (this may matter).
2390
2390
2391 You can only use this decorator for new steps, if you want to wrap a step
2391 You can only use this decorator for new steps, if you want to wrap a step
2392 from an extension, attack the getbundle2partsmapping dictionary directly."""
2392 from an extension, attack the getbundle2partsmapping dictionary directly."""
2393
2393
2394 def dec(func):
2394 def dec(func):
2395 assert stepname not in getbundle2partsmapping
2395 assert stepname not in getbundle2partsmapping
2396 getbundle2partsmapping[stepname] = func
2396 getbundle2partsmapping[stepname] = func
2397 if idx is None:
2397 if idx is None:
2398 getbundle2partsorder.append(stepname)
2398 getbundle2partsorder.append(stepname)
2399 else:
2399 else:
2400 getbundle2partsorder.insert(idx, stepname)
2400 getbundle2partsorder.insert(idx, stepname)
2401 return func
2401 return func
2402
2402
2403 return dec
2403 return dec
2404
2404
2405
2405
2406 def bundle2requested(bundlecaps):
2406 def bundle2requested(bundlecaps):
2407 if bundlecaps is not None:
2407 if bundlecaps is not None:
2408 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2408 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2409 return False
2409 return False
2410
2410
2411
2411
2412 def getbundlechunks(
2412 def getbundlechunks(
2413 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2413 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2414 ):
2414 ):
2415 """Return chunks constituting a bundle's raw data.
2415 """Return chunks constituting a bundle's raw data.
2416
2416
2417 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2417 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2418 passed.
2418 passed.
2419
2419
2420 Returns a 2-tuple of a dict with metadata about the generated bundle
2420 Returns a 2-tuple of a dict with metadata about the generated bundle
2421 and an iterator over raw chunks (of varying sizes).
2421 and an iterator over raw chunks (of varying sizes).
2422 """
2422 """
2423 kwargs = pycompat.byteskwargs(kwargs)
2423 kwargs = pycompat.byteskwargs(kwargs)
2424 info = {}
2424 info = {}
2425 usebundle2 = bundle2requested(bundlecaps)
2425 usebundle2 = bundle2requested(bundlecaps)
2426 # bundle10 case
2426 # bundle10 case
2427 if not usebundle2:
2427 if not usebundle2:
2428 if bundlecaps and not kwargs.get(b'cg', True):
2428 if bundlecaps and not kwargs.get(b'cg', True):
2429 raise ValueError(
2429 raise ValueError(
2430 _(b'request for bundle10 must include changegroup')
2430 _(b'request for bundle10 must include changegroup')
2431 )
2431 )
2432
2432
2433 if kwargs:
2433 if kwargs:
2434 raise ValueError(
2434 raise ValueError(
2435 _(b'unsupported getbundle arguments: %s')
2435 _(b'unsupported getbundle arguments: %s')
2436 % b', '.join(sorted(kwargs.keys()))
2436 % b', '.join(sorted(kwargs.keys()))
2437 )
2437 )
2438 outgoing = _computeoutgoing(repo, heads, common)
2438 outgoing = _computeoutgoing(repo, heads, common)
2439 info[b'bundleversion'] = 1
2439 info[b'bundleversion'] = 1
2440 return (
2440 return (
2441 info,
2441 info,
2442 changegroup.makestream(
2442 changegroup.makestream(
2443 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2443 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2444 ),
2444 ),
2445 )
2445 )
2446
2446
2447 # bundle20 case
2447 # bundle20 case
2448 info[b'bundleversion'] = 2
2448 info[b'bundleversion'] = 2
2449 b2caps = {}
2449 b2caps = {}
2450 for bcaps in bundlecaps:
2450 for bcaps in bundlecaps:
2451 if bcaps.startswith(b'bundle2='):
2451 if bcaps.startswith(b'bundle2='):
2452 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2452 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2453 b2caps.update(bundle2.decodecaps(blob))
2453 b2caps.update(bundle2.decodecaps(blob))
2454 bundler = bundle2.bundle20(repo.ui, b2caps)
2454 bundler = bundle2.bundle20(repo.ui, b2caps)
2455
2455
2456 kwargs[b'heads'] = heads
2456 kwargs[b'heads'] = heads
2457 kwargs[b'common'] = common
2457 kwargs[b'common'] = common
2458
2458
2459 for name in getbundle2partsorder:
2459 for name in getbundle2partsorder:
2460 func = getbundle2partsmapping[name]
2460 func = getbundle2partsmapping[name]
2461 func(
2461 func(
2462 bundler,
2462 bundler,
2463 repo,
2463 repo,
2464 source,
2464 source,
2465 bundlecaps=bundlecaps,
2465 bundlecaps=bundlecaps,
2466 b2caps=b2caps,
2466 b2caps=b2caps,
2467 **pycompat.strkwargs(kwargs)
2467 **pycompat.strkwargs(kwargs)
2468 )
2468 )
2469
2469
2470 info[b'prefercompressed'] = bundler.prefercompressed
2470 info[b'prefercompressed'] = bundler.prefercompressed
2471
2471
2472 return info, bundler.getchunks()
2472 return info, bundler.getchunks()
2473
2473
2474
2474
2475 @getbundle2partsgenerator(b'stream2')
2475 @getbundle2partsgenerator(b'stream2')
2476 def _getbundlestream2(bundler, repo, *args, **kwargs):
2476 def _getbundlestream2(bundler, repo, *args, **kwargs):
2477 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2477 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2478
2478
2479
2479
2480 @getbundle2partsgenerator(b'changegroup')
2480 @getbundle2partsgenerator(b'changegroup')
2481 def _getbundlechangegrouppart(
2481 def _getbundlechangegrouppart(
2482 bundler,
2482 bundler,
2483 repo,
2483 repo,
2484 source,
2484 source,
2485 bundlecaps=None,
2485 bundlecaps=None,
2486 b2caps=None,
2486 b2caps=None,
2487 heads=None,
2487 heads=None,
2488 common=None,
2488 common=None,
2489 **kwargs
2489 **kwargs
2490 ):
2490 ):
2491 """add a changegroup part to the requested bundle"""
2491 """add a changegroup part to the requested bundle"""
2492 if not kwargs.get('cg', True) or not b2caps:
2492 if not kwargs.get('cg', True) or not b2caps:
2493 return
2493 return
2494
2494
2495 version = b'01'
2495 version = b'01'
2496 cgversions = b2caps.get(b'changegroup')
2496 cgversions = b2caps.get(b'changegroup')
2497 if cgversions: # 3.1 and 3.2 ship with an empty value
2497 if cgversions: # 3.1 and 3.2 ship with an empty value
2498 cgversions = [
2498 cgversions = [
2499 v
2499 v
2500 for v in cgversions
2500 for v in cgversions
2501 if v in changegroup.supportedoutgoingversions(repo)
2501 if v in changegroup.supportedoutgoingversions(repo)
2502 ]
2502 ]
2503 if not cgversions:
2503 if not cgversions:
2504 raise error.Abort(_(b'no common changegroup version'))
2504 raise error.Abort(_(b'no common changegroup version'))
2505 version = max(cgversions)
2505 version = max(cgversions)
2506
2506
2507 outgoing = _computeoutgoing(repo, heads, common)
2507 outgoing = _computeoutgoing(repo, heads, common)
2508 if not outgoing.missing:
2508 if not outgoing.missing:
2509 return
2509 return
2510
2510
2511 if kwargs.get('narrow', False):
2511 if kwargs.get('narrow', False):
2512 include = sorted(filter(bool, kwargs.get('includepats', [])))
2512 include = sorted(filter(bool, kwargs.get('includepats', [])))
2513 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2513 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2514 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2514 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2515 else:
2515 else:
2516 matcher = None
2516 matcher = None
2517
2517
2518 cgstream = changegroup.makestream(
2518 cgstream = changegroup.makestream(
2519 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2519 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2520 )
2520 )
2521
2521
2522 part = bundler.newpart(b'changegroup', data=cgstream)
2522 part = bundler.newpart(b'changegroup', data=cgstream)
2523 if cgversions:
2523 if cgversions:
2524 part.addparam(b'version', version)
2524 part.addparam(b'version', version)
2525
2525
2526 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2526 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2527
2527
2528 if b'treemanifest' in repo.requirements:
2528 if b'treemanifest' in repo.requirements:
2529 part.addparam(b'treemanifest', b'1')
2529 part.addparam(b'treemanifest', b'1')
2530
2530
2531 if b'exp-sidedata-flag' in repo.requirements:
2531 if b'exp-sidedata-flag' in repo.requirements:
2532 part.addparam(b'exp-sidedata', b'1')
2532 part.addparam(b'exp-sidedata', b'1')
2533
2533
2534 if (
2534 if (
2535 kwargs.get('narrow', False)
2535 kwargs.get('narrow', False)
2536 and kwargs.get('narrow_acl', False)
2536 and kwargs.get('narrow_acl', False)
2537 and (include or exclude)
2537 and (include or exclude)
2538 ):
2538 ):
2539 # this is mandatory because otherwise ACL clients won't work
2539 # this is mandatory because otherwise ACL clients won't work
2540 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2540 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2541 narrowspecpart.data = b'%s\0%s' % (
2541 narrowspecpart.data = b'%s\0%s' % (
2542 b'\n'.join(include),
2542 b'\n'.join(include),
2543 b'\n'.join(exclude),
2543 b'\n'.join(exclude),
2544 )
2544 )
2545
2545
2546
2546
2547 @getbundle2partsgenerator(b'bookmarks')
2547 @getbundle2partsgenerator(b'bookmarks')
2548 def _getbundlebookmarkpart(
2548 def _getbundlebookmarkpart(
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2550 ):
2550 ):
2551 """add a bookmark part to the requested bundle"""
2551 """add a bookmark part to the requested bundle"""
2552 if not kwargs.get('bookmarks', False):
2552 if not kwargs.get('bookmarks', False):
2553 return
2553 return
2554 if not b2caps or b'bookmarks' not in b2caps:
2554 if not b2caps or b'bookmarks' not in b2caps:
2555 raise error.Abort(_(b'no common bookmarks exchange method'))
2555 raise error.Abort(_(b'no common bookmarks exchange method'))
2556 books = bookmod.listbinbookmarks(repo)
2556 books = bookmod.listbinbookmarks(repo)
2557 data = bookmod.binaryencode(books)
2557 data = bookmod.binaryencode(books)
2558 if data:
2558 if data:
2559 bundler.newpart(b'bookmarks', data=data)
2559 bundler.newpart(b'bookmarks', data=data)
2560
2560
2561
2561
2562 @getbundle2partsgenerator(b'listkeys')
2562 @getbundle2partsgenerator(b'listkeys')
2563 def _getbundlelistkeysparts(
2563 def _getbundlelistkeysparts(
2564 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2564 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2565 ):
2565 ):
2566 """add parts containing listkeys namespaces to the requested bundle"""
2566 """add parts containing listkeys namespaces to the requested bundle"""
2567 listkeys = kwargs.get('listkeys', ())
2567 listkeys = kwargs.get('listkeys', ())
2568 for namespace in listkeys:
2568 for namespace in listkeys:
2569 part = bundler.newpart(b'listkeys')
2569 part = bundler.newpart(b'listkeys')
2570 part.addparam(b'namespace', namespace)
2570 part.addparam(b'namespace', namespace)
2571 keys = repo.listkeys(namespace).items()
2571 keys = repo.listkeys(namespace).items()
2572 part.data = pushkey.encodekeys(keys)
2572 part.data = pushkey.encodekeys(keys)
2573
2573
2574
2574
2575 @getbundle2partsgenerator(b'obsmarkers')
2575 @getbundle2partsgenerator(b'obsmarkers')
2576 def _getbundleobsmarkerpart(
2576 def _getbundleobsmarkerpart(
2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2578 ):
2578 ):
2579 """add an obsolescence markers part to the requested bundle"""
2579 """add an obsolescence markers part to the requested bundle"""
2580 if kwargs.get('obsmarkers', False):
2580 if kwargs.get('obsmarkers', False):
2581 if heads is None:
2581 if heads is None:
2582 heads = repo.heads()
2582 heads = repo.heads()
2583 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2583 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2584 markers = repo.obsstore.relevantmarkers(subset)
2584 markers = repo.obsstore.relevantmarkers(subset)
2585 markers = obsutil.sortedmarkers(markers)
2585 markers = obsutil.sortedmarkers(markers)
2586 bundle2.buildobsmarkerspart(bundler, markers)
2586 bundle2.buildobsmarkerspart(bundler, markers)
2587
2587
2588
2588
2589 @getbundle2partsgenerator(b'phases')
2589 @getbundle2partsgenerator(b'phases')
2590 def _getbundlephasespart(
2590 def _getbundlephasespart(
2591 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2591 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2592 ):
2592 ):
2593 """add phase heads part to the requested bundle"""
2593 """add phase heads part to the requested bundle"""
2594 if kwargs.get('phases', False):
2594 if kwargs.get('phases', False):
2595 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2595 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2596 raise error.Abort(_(b'no common phases exchange method'))
2596 raise error.Abort(_(b'no common phases exchange method'))
2597 if heads is None:
2597 if heads is None:
2598 heads = repo.heads()
2598 heads = repo.heads()
2599
2599
2600 headsbyphase = collections.defaultdict(set)
2600 headsbyphase = collections.defaultdict(set)
2601 if repo.publishing():
2601 if repo.publishing():
2602 headsbyphase[phases.public] = heads
2602 headsbyphase[phases.public] = heads
2603 else:
2603 else:
2604 # find the appropriate heads to move
2604 # find the appropriate heads to move
2605
2605
2606 phase = repo._phasecache.phase
2606 phase = repo._phasecache.phase
2607 node = repo.changelog.node
2607 node = repo.changelog.node
2608 rev = repo.changelog.rev
2608 rev = repo.changelog.rev
2609 for h in heads:
2609 for h in heads:
2610 headsbyphase[phase(repo, rev(h))].add(h)
2610 headsbyphase[phase(repo, rev(h))].add(h)
2611 seenphases = list(headsbyphase.keys())
2611 seenphases = list(headsbyphase.keys())
2612
2612
2613 # We do not handle anything but public and draft phase for now)
2613 # We do not handle anything but public and draft phase for now)
2614 if seenphases:
2614 if seenphases:
2615 assert max(seenphases) <= phases.draft
2615 assert max(seenphases) <= phases.draft
2616
2616
2617 # if client is pulling non-public changesets, we need to find
2617 # if client is pulling non-public changesets, we need to find
2618 # intermediate public heads.
2618 # intermediate public heads.
2619 draftheads = headsbyphase.get(phases.draft, set())
2619 draftheads = headsbyphase.get(phases.draft, set())
2620 if draftheads:
2620 if draftheads:
2621 publicheads = headsbyphase.get(phases.public, set())
2621 publicheads = headsbyphase.get(phases.public, set())
2622
2622
2623 revset = b'heads(only(%ln, %ln) and public())'
2623 revset = b'heads(only(%ln, %ln) and public())'
2624 extraheads = repo.revs(revset, draftheads, publicheads)
2624 extraheads = repo.revs(revset, draftheads, publicheads)
2625 for r in extraheads:
2625 for r in extraheads:
2626 headsbyphase[phases.public].add(node(r))
2626 headsbyphase[phases.public].add(node(r))
2627
2627
2628 # transform data in a format used by the encoding function
2628 # transform data in a format used by the encoding function
2629 phasemapping = []
2629 phasemapping = []
2630 for phase in phases.allphases:
2630 for phase in phases.allphases:
2631 phasemapping.append(sorted(headsbyphase[phase]))
2631 phasemapping.append(sorted(headsbyphase[phase]))
2632
2632
2633 # generate the actual part
2633 # generate the actual part
2634 phasedata = phases.binaryencode(phasemapping)
2634 phasedata = phases.binaryencode(phasemapping)
2635 bundler.newpart(b'phase-heads', data=phasedata)
2635 bundler.newpart(b'phase-heads', data=phasedata)
2636
2636
2637
2637
2638 @getbundle2partsgenerator(b'hgtagsfnodes')
2638 @getbundle2partsgenerator(b'hgtagsfnodes')
2639 def _getbundletagsfnodes(
2639 def _getbundletagsfnodes(
2640 bundler,
2640 bundler,
2641 repo,
2641 repo,
2642 source,
2642 source,
2643 bundlecaps=None,
2643 bundlecaps=None,
2644 b2caps=None,
2644 b2caps=None,
2645 heads=None,
2645 heads=None,
2646 common=None,
2646 common=None,
2647 **kwargs
2647 **kwargs
2648 ):
2648 ):
2649 """Transfer the .hgtags filenodes mapping.
2649 """Transfer the .hgtags filenodes mapping.
2650
2650
2651 Only values for heads in this bundle will be transferred.
2651 Only values for heads in this bundle will be transferred.
2652
2652
2653 The part data consists of pairs of 20 byte changeset node and .hgtags
2653 The part data consists of pairs of 20 byte changeset node and .hgtags
2654 filenodes raw values.
2654 filenodes raw values.
2655 """
2655 """
2656 # Don't send unless:
2656 # Don't send unless:
2657 # - changeset are being exchanged,
2657 # - changeset are being exchanged,
2658 # - the client supports it.
2658 # - the client supports it.
2659 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2659 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2660 return
2660 return
2661
2661
2662 outgoing = _computeoutgoing(repo, heads, common)
2662 outgoing = _computeoutgoing(repo, heads, common)
2663 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2663 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2664
2664
2665
2665
2666 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2666 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2667 def _getbundlerevbranchcache(
2667 def _getbundlerevbranchcache(
2668 bundler,
2668 bundler,
2669 repo,
2669 repo,
2670 source,
2670 source,
2671 bundlecaps=None,
2671 bundlecaps=None,
2672 b2caps=None,
2672 b2caps=None,
2673 heads=None,
2673 heads=None,
2674 common=None,
2674 common=None,
2675 **kwargs
2675 **kwargs
2676 ):
2676 ):
2677 """Transfer the rev-branch-cache mapping
2677 """Transfer the rev-branch-cache mapping
2678
2678
2679 The payload is a series of data related to each branch
2679 The payload is a series of data related to each branch
2680
2680
2681 1) branch name length
2681 1) branch name length
2682 2) number of open heads
2682 2) number of open heads
2683 3) number of closed heads
2683 3) number of closed heads
2684 4) open heads nodes
2684 4) open heads nodes
2685 5) closed heads nodes
2685 5) closed heads nodes
2686 """
2686 """
2687 # Don't send unless:
2687 # Don't send unless:
2688 # - changeset are being exchanged,
2688 # - changeset are being exchanged,
2689 # - the client supports it.
2689 # - the client supports it.
2690 # - narrow bundle isn't in play (not currently compatible).
2690 # - narrow bundle isn't in play (not currently compatible).
2691 if (
2691 if (
2692 not kwargs.get('cg', True)
2692 not kwargs.get('cg', True)
2693 or not b2caps
2693 or not b2caps
2694 or b'rev-branch-cache' not in b2caps
2694 or b'rev-branch-cache' not in b2caps
2695 or kwargs.get('narrow', False)
2695 or kwargs.get('narrow', False)
2696 or repo.ui.has_section(_NARROWACL_SECTION)
2696 or repo.ui.has_section(_NARROWACL_SECTION)
2697 ):
2697 ):
2698 return
2698 return
2699
2699
2700 outgoing = _computeoutgoing(repo, heads, common)
2700 outgoing = _computeoutgoing(repo, heads, common)
2701 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2701 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2702
2702
2703
2703
2704 def check_heads(repo, their_heads, context):
2704 def check_heads(repo, their_heads, context):
2705 """check if the heads of a repo have been modified
2705 """check if the heads of a repo have been modified
2706
2706
2707 Used by peer for unbundling.
2707 Used by peer for unbundling.
2708 """
2708 """
2709 heads = repo.heads()
2709 heads = repo.heads()
2710 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2710 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2711 if not (
2711 if not (
2712 their_heads == [b'force']
2712 their_heads == [b'force']
2713 or their_heads == heads
2713 or their_heads == heads
2714 or their_heads == [b'hashed', heads_hash]
2714 or their_heads == [b'hashed', heads_hash]
2715 ):
2715 ):
2716 # someone else committed/pushed/unbundled while we
2716 # someone else committed/pushed/unbundled while we
2717 # were transferring data
2717 # were transferring data
2718 raise error.PushRaced(
2718 raise error.PushRaced(
2719 b'repository changed while %s - please try again' % context
2719 b'repository changed while %s - please try again' % context
2720 )
2720 )
2721
2721
2722
2722
2723 def unbundle(repo, cg, heads, source, url):
2723 def unbundle(repo, cg, heads, source, url):
2724 """Apply a bundle to a repo.
2724 """Apply a bundle to a repo.
2725
2725
2726 this function makes sure the repo is locked during the application and have
2726 this function makes sure the repo is locked during the application and have
2727 mechanism to check that no push race occurred between the creation of the
2727 mechanism to check that no push race occurred between the creation of the
2728 bundle and its application.
2728 bundle and its application.
2729
2729
2730 If the push was raced as PushRaced exception is raised."""
2730 If the push was raced as PushRaced exception is raised."""
2731 r = 0
2731 r = 0
2732 # need a transaction when processing a bundle2 stream
2732 # need a transaction when processing a bundle2 stream
2733 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2733 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2734 lockandtr = [None, None, None]
2734 lockandtr = [None, None, None]
2735 recordout = None
2735 recordout = None
2736 # quick fix for output mismatch with bundle2 in 3.4
2736 # quick fix for output mismatch with bundle2 in 3.4
2737 captureoutput = repo.ui.configbool(
2737 captureoutput = repo.ui.configbool(
2738 b'experimental', b'bundle2-output-capture'
2738 b'experimental', b'bundle2-output-capture'
2739 )
2739 )
2740 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2740 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2741 captureoutput = True
2741 captureoutput = True
2742 try:
2742 try:
2743 # note: outside bundle1, 'heads' is expected to be empty and this
2743 # note: outside bundle1, 'heads' is expected to be empty and this
2744 # 'check_heads' call wil be a no-op
2744 # 'check_heads' call wil be a no-op
2745 check_heads(repo, heads, b'uploading changes')
2745 check_heads(repo, heads, b'uploading changes')
2746 # push can proceed
2746 # push can proceed
2747 if not isinstance(cg, bundle2.unbundle20):
2747 if not isinstance(cg, bundle2.unbundle20):
2748 # legacy case: bundle1 (changegroup 01)
2748 # legacy case: bundle1 (changegroup 01)
2749 txnname = b"\n".join([source, util.hidepassword(url)])
2749 txnname = b"\n".join([source, util.hidepassword(url)])
2750 with repo.lock(), repo.transaction(txnname) as tr:
2750 with repo.lock(), repo.transaction(txnname) as tr:
2751 op = bundle2.applybundle(repo, cg, tr, source, url)
2751 op = bundle2.applybundle(repo, cg, tr, source, url)
2752 r = bundle2.combinechangegroupresults(op)
2752 r = bundle2.combinechangegroupresults(op)
2753 else:
2753 else:
2754 r = None
2754 r = None
2755 try:
2755 try:
2756
2756
2757 def gettransaction():
2757 def gettransaction():
2758 if not lockandtr[2]:
2758 if not lockandtr[2]:
2759 if not bookmod.bookmarksinstore(repo):
2759 if not bookmod.bookmarksinstore(repo):
2760 lockandtr[0] = repo.wlock()
2760 lockandtr[0] = repo.wlock()
2761 lockandtr[1] = repo.lock()
2761 lockandtr[1] = repo.lock()
2762 lockandtr[2] = repo.transaction(source)
2762 lockandtr[2] = repo.transaction(source)
2763 lockandtr[2].hookargs[b'source'] = source
2763 lockandtr[2].hookargs[b'source'] = source
2764 lockandtr[2].hookargs[b'url'] = url
2764 lockandtr[2].hookargs[b'url'] = url
2765 lockandtr[2].hookargs[b'bundle2'] = b'1'
2765 lockandtr[2].hookargs[b'bundle2'] = b'1'
2766 return lockandtr[2]
2766 return lockandtr[2]
2767
2767
2768 # Do greedy locking by default until we're satisfied with lazy
2768 # Do greedy locking by default until we're satisfied with lazy
2769 # locking.
2769 # locking.
2770 if not repo.ui.configbool(
2770 if not repo.ui.configbool(
2771 b'experimental', b'bundle2lazylocking'
2771 b'experimental', b'bundle2lazylocking'
2772 ):
2772 ):
2773 gettransaction()
2773 gettransaction()
2774
2774
2775 op = bundle2.bundleoperation(
2775 op = bundle2.bundleoperation(
2776 repo,
2776 repo,
2777 gettransaction,
2777 gettransaction,
2778 captureoutput=captureoutput,
2778 captureoutput=captureoutput,
2779 source=b'push',
2779 source=b'push',
2780 )
2780 )
2781 try:
2781 try:
2782 op = bundle2.processbundle(repo, cg, op=op)
2782 op = bundle2.processbundle(repo, cg, op=op)
2783 finally:
2783 finally:
2784 r = op.reply
2784 r = op.reply
2785 if captureoutput and r is not None:
2785 if captureoutput and r is not None:
2786 repo.ui.pushbuffer(error=True, subproc=True)
2786 repo.ui.pushbuffer(error=True, subproc=True)
2787
2787
2788 def recordout(output):
2788 def recordout(output):
2789 r.newpart(b'output', data=output, mandatory=False)
2789 r.newpart(b'output', data=output, mandatory=False)
2790
2790
2791 if lockandtr[2] is not None:
2791 if lockandtr[2] is not None:
2792 lockandtr[2].close()
2792 lockandtr[2].close()
2793 except BaseException as exc:
2793 except BaseException as exc:
2794 exc.duringunbundle2 = True
2794 exc.duringunbundle2 = True
2795 if captureoutput and r is not None:
2795 if captureoutput and r is not None:
2796 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2796 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2797
2797
2798 def recordout(output):
2798 def recordout(output):
2799 part = bundle2.bundlepart(
2799 part = bundle2.bundlepart(
2800 b'output', data=output, mandatory=False
2800 b'output', data=output, mandatory=False
2801 )
2801 )
2802 parts.append(part)
2802 parts.append(part)
2803
2803
2804 raise
2804 raise
2805 finally:
2805 finally:
2806 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2806 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2807 if recordout is not None:
2807 if recordout is not None:
2808 recordout(repo.ui.popbuffer())
2808 recordout(repo.ui.popbuffer())
2809 return r
2809 return r
2810
2810
2811
2811
2812 def _maybeapplyclonebundle(pullop):
2812 def _maybeapplyclonebundle(pullop):
2813 """Apply a clone bundle from a remote, if possible."""
2813 """Apply a clone bundle from a remote, if possible."""
2814
2814
2815 repo = pullop.repo
2815 repo = pullop.repo
2816 remote = pullop.remote
2816 remote = pullop.remote
2817
2817
2818 if not repo.ui.configbool(b'ui', b'clonebundles'):
2818 if not repo.ui.configbool(b'ui', b'clonebundles'):
2819 return
2819 return
2820
2820
2821 # Only run if local repo is empty.
2821 # Only run if local repo is empty.
2822 if len(repo):
2822 if len(repo):
2823 return
2823 return
2824
2824
2825 if pullop.heads:
2825 if pullop.heads:
2826 return
2826 return
2827
2827
2828 if not remote.capable(b'clonebundles'):
2828 if not remote.capable(b'clonebundles'):
2829 return
2829 return
2830
2830
2831 with remote.commandexecutor() as e:
2831 with remote.commandexecutor() as e:
2832 res = e.callcommand(b'clonebundles', {}).result()
2832 res = e.callcommand(b'clonebundles', {}).result()
2833
2833
2834 # If we call the wire protocol command, that's good enough to record the
2834 # If we call the wire protocol command, that's good enough to record the
2835 # attempt.
2835 # attempt.
2836 pullop.clonebundleattempted = True
2836 pullop.clonebundleattempted = True
2837
2837
2838 entries = parseclonebundlesmanifest(repo, res)
2838 entries = parseclonebundlesmanifest(repo, res)
2839 if not entries:
2839 if not entries:
2840 repo.ui.note(
2840 repo.ui.note(
2841 _(
2841 _(
2842 b'no clone bundles available on remote; '
2842 b'no clone bundles available on remote; '
2843 b'falling back to regular clone\n'
2843 b'falling back to regular clone\n'
2844 )
2844 )
2845 )
2845 )
2846 return
2846 return
2847
2847
2848 entries = filterclonebundleentries(
2848 entries = filterclonebundleentries(
2849 repo, entries, streamclonerequested=pullop.streamclonerequested
2849 repo, entries, streamclonerequested=pullop.streamclonerequested
2850 )
2850 )
2851
2851
2852 if not entries:
2852 if not entries:
2853 # There is a thundering herd concern here. However, if a server
2853 # There is a thundering herd concern here. However, if a server
2854 # operator doesn't advertise bundles appropriate for its clients,
2854 # operator doesn't advertise bundles appropriate for its clients,
2855 # they deserve what's coming. Furthermore, from a client's
2855 # they deserve what's coming. Furthermore, from a client's
2856 # perspective, no automatic fallback would mean not being able to
2856 # perspective, no automatic fallback would mean not being able to
2857 # clone!
2857 # clone!
2858 repo.ui.warn(
2858 repo.ui.warn(
2859 _(
2859 _(
2860 b'no compatible clone bundles available on server; '
2860 b'no compatible clone bundles available on server; '
2861 b'falling back to regular clone\n'
2861 b'falling back to regular clone\n'
2862 )
2862 )
2863 )
2863 )
2864 repo.ui.warn(
2864 repo.ui.warn(
2865 _(b'(you may want to report this to the server operator)\n')
2865 _(b'(you may want to report this to the server operator)\n')
2866 )
2866 )
2867 return
2867 return
2868
2868
2869 entries = sortclonebundleentries(repo.ui, entries)
2869 entries = sortclonebundleentries(repo.ui, entries)
2870
2870
2871 url = entries[0][b'URL']
2871 url = entries[0][b'URL']
2872 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2872 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2873 if trypullbundlefromurl(repo.ui, repo, url):
2873 if trypullbundlefromurl(repo.ui, repo, url):
2874 repo.ui.status(_(b'finished applying clone bundle\n'))
2874 repo.ui.status(_(b'finished applying clone bundle\n'))
2875 # Bundle failed.
2875 # Bundle failed.
2876 #
2876 #
2877 # We abort by default to avoid the thundering herd of
2877 # We abort by default to avoid the thundering herd of
2878 # clients flooding a server that was expecting expensive
2878 # clients flooding a server that was expecting expensive
2879 # clone load to be offloaded.
2879 # clone load to be offloaded.
2880 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2880 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2881 repo.ui.warn(_(b'falling back to normal clone\n'))
2881 repo.ui.warn(_(b'falling back to normal clone\n'))
2882 else:
2882 else:
2883 raise error.Abort(
2883 raise error.Abort(
2884 _(b'error applying bundle'),
2884 _(b'error applying bundle'),
2885 hint=_(
2885 hint=_(
2886 b'if this error persists, consider contacting '
2886 b'if this error persists, consider contacting '
2887 b'the server operator or disable clone '
2887 b'the server operator or disable clone '
2888 b'bundles via '
2888 b'bundles via '
2889 b'"--config ui.clonebundles=false"'
2889 b'"--config ui.clonebundles=false"'
2890 ),
2890 ),
2891 )
2891 )
2892
2892
2893
2893
2894 def parseclonebundlesmanifest(repo, s):
2894 def parseclonebundlesmanifest(repo, s):
2895 """Parses the raw text of a clone bundles manifest.
2895 """Parses the raw text of a clone bundles manifest.
2896
2896
2897 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2897 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2898 to the URL and other keys are the attributes for the entry.
2898 to the URL and other keys are the attributes for the entry.
2899 """
2899 """
2900 m = []
2900 m = []
2901 for line in s.splitlines():
2901 for line in s.splitlines():
2902 fields = line.split()
2902 fields = line.split()
2903 if not fields:
2903 if not fields:
2904 continue
2904 continue
2905 attrs = {b'URL': fields[0]}
2905 attrs = {b'URL': fields[0]}
2906 for rawattr in fields[1:]:
2906 for rawattr in fields[1:]:
2907 key, value = rawattr.split(b'=', 1)
2907 key, value = rawattr.split(b'=', 1)
2908 key = urlreq.unquote(key)
2908 key = urlreq.unquote(key)
2909 value = urlreq.unquote(value)
2909 value = urlreq.unquote(value)
2910 attrs[key] = value
2910 attrs[key] = value
2911
2911
2912 # Parse BUNDLESPEC into components. This makes client-side
2912 # Parse BUNDLESPEC into components. This makes client-side
2913 # preferences easier to specify since you can prefer a single
2913 # preferences easier to specify since you can prefer a single
2914 # component of the BUNDLESPEC.
2914 # component of the BUNDLESPEC.
2915 if key == b'BUNDLESPEC':
2915 if key == b'BUNDLESPEC':
2916 try:
2916 try:
2917 bundlespec = parsebundlespec(repo, value)
2917 bundlespec = parsebundlespec(repo, value)
2918 attrs[b'COMPRESSION'] = bundlespec.compression
2918 attrs[b'COMPRESSION'] = bundlespec.compression
2919 attrs[b'VERSION'] = bundlespec.version
2919 attrs[b'VERSION'] = bundlespec.version
2920 except error.InvalidBundleSpecification:
2920 except error.InvalidBundleSpecification:
2921 pass
2921 pass
2922 except error.UnsupportedBundleSpecification:
2922 except error.UnsupportedBundleSpecification:
2923 pass
2923 pass
2924
2924
2925 m.append(attrs)
2925 m.append(attrs)
2926
2926
2927 return m
2927 return m
2928
2928
2929
2929
2930 def isstreamclonespec(bundlespec):
2930 def isstreamclonespec(bundlespec):
2931 # Stream clone v1
2931 # Stream clone v1
2932 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2932 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2933 return True
2933 return True
2934
2934
2935 # Stream clone v2
2935 # Stream clone v2
2936 if (
2936 if (
2937 bundlespec.wirecompression == b'UN'
2937 bundlespec.wirecompression == b'UN'
2938 and bundlespec.wireversion == b'02'
2938 and bundlespec.wireversion == b'02'
2939 and bundlespec.contentopts.get(b'streamv2')
2939 and bundlespec.contentopts.get(b'streamv2')
2940 ):
2940 ):
2941 return True
2941 return True
2942
2942
2943 return False
2943 return False
2944
2944
2945
2945
2946 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2946 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2947 """Remove incompatible clone bundle manifest entries.
2947 """Remove incompatible clone bundle manifest entries.
2948
2948
2949 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2949 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2950 and returns a new list consisting of only the entries that this client
2950 and returns a new list consisting of only the entries that this client
2951 should be able to apply.
2951 should be able to apply.
2952
2952
2953 There is no guarantee we'll be able to apply all returned entries because
2953 There is no guarantee we'll be able to apply all returned entries because
2954 the metadata we use to filter on may be missing or wrong.
2954 the metadata we use to filter on may be missing or wrong.
2955 """
2955 """
2956 newentries = []
2956 newentries = []
2957 for entry in entries:
2957 for entry in entries:
2958 spec = entry.get(b'BUNDLESPEC')
2958 spec = entry.get(b'BUNDLESPEC')
2959 if spec:
2959 if spec:
2960 try:
2960 try:
2961 bundlespec = parsebundlespec(repo, spec, strict=True)
2961 bundlespec = parsebundlespec(repo, spec, strict=True)
2962
2962
2963 # If a stream clone was requested, filter out non-streamclone
2963 # If a stream clone was requested, filter out non-streamclone
2964 # entries.
2964 # entries.
2965 if streamclonerequested and not isstreamclonespec(bundlespec):
2965 if streamclonerequested and not isstreamclonespec(bundlespec):
2966 repo.ui.debug(
2966 repo.ui.debug(
2967 b'filtering %s because not a stream clone\n'
2967 b'filtering %s because not a stream clone\n'
2968 % entry[b'URL']
2968 % entry[b'URL']
2969 )
2969 )
2970 continue
2970 continue
2971
2971
2972 except error.InvalidBundleSpecification as e:
2972 except error.InvalidBundleSpecification as e:
2973 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2973 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2974 continue
2974 continue
2975 except error.UnsupportedBundleSpecification as e:
2975 except error.UnsupportedBundleSpecification as e:
2976 repo.ui.debug(
2976 repo.ui.debug(
2977 b'filtering %s because unsupported bundle '
2977 b'filtering %s because unsupported bundle '
2978 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2978 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2979 )
2979 )
2980 continue
2980 continue
2981 # If we don't have a spec and requested a stream clone, we don't know
2981 # If we don't have a spec and requested a stream clone, we don't know
2982 # what the entry is so don't attempt to apply it.
2982 # what the entry is so don't attempt to apply it.
2983 elif streamclonerequested:
2983 elif streamclonerequested:
2984 repo.ui.debug(
2984 repo.ui.debug(
2985 b'filtering %s because cannot determine if a stream '
2985 b'filtering %s because cannot determine if a stream '
2986 b'clone bundle\n' % entry[b'URL']
2986 b'clone bundle\n' % entry[b'URL']
2987 )
2987 )
2988 continue
2988 continue
2989
2989
2990 if b'REQUIRESNI' in entry and not sslutil.hassni:
2990 if b'REQUIRESNI' in entry and not sslutil.hassni:
2991 repo.ui.debug(
2991 repo.ui.debug(
2992 b'filtering %s because SNI not supported\n' % entry[b'URL']
2992 b'filtering %s because SNI not supported\n' % entry[b'URL']
2993 )
2993 )
2994 continue
2994 continue
2995
2995
2996 newentries.append(entry)
2996 newentries.append(entry)
2997
2997
2998 return newentries
2998 return newentries
2999
2999
3000
3000
3001 class clonebundleentry(object):
3001 class clonebundleentry(object):
3002 """Represents an item in a clone bundles manifest.
3002 """Represents an item in a clone bundles manifest.
3003
3003
3004 This rich class is needed to support sorting since sorted() in Python 3
3004 This rich class is needed to support sorting since sorted() in Python 3
3005 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3005 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3006 won't work.
3006 won't work.
3007 """
3007 """
3008
3008
3009 def __init__(self, value, prefers):
3009 def __init__(self, value, prefers):
3010 self.value = value
3010 self.value = value
3011 self.prefers = prefers
3011 self.prefers = prefers
3012
3012
3013 def _cmp(self, other):
3013 def _cmp(self, other):
3014 for prefkey, prefvalue in self.prefers:
3014 for prefkey, prefvalue in self.prefers:
3015 avalue = self.value.get(prefkey)
3015 avalue = self.value.get(prefkey)
3016 bvalue = other.value.get(prefkey)
3016 bvalue = other.value.get(prefkey)
3017
3017
3018 # Special case for b missing attribute and a matches exactly.
3018 # Special case for b missing attribute and a matches exactly.
3019 if avalue is not None and bvalue is None and avalue == prefvalue:
3019 if avalue is not None and bvalue is None and avalue == prefvalue:
3020 return -1
3020 return -1
3021
3021
3022 # Special case for a missing attribute and b matches exactly.
3022 # Special case for a missing attribute and b matches exactly.
3023 if bvalue is not None and avalue is None and bvalue == prefvalue:
3023 if bvalue is not None and avalue is None and bvalue == prefvalue:
3024 return 1
3024 return 1
3025
3025
3026 # We can't compare unless attribute present on both.
3026 # We can't compare unless attribute present on both.
3027 if avalue is None or bvalue is None:
3027 if avalue is None or bvalue is None:
3028 continue
3028 continue
3029
3029
3030 # Same values should fall back to next attribute.
3030 # Same values should fall back to next attribute.
3031 if avalue == bvalue:
3031 if avalue == bvalue:
3032 continue
3032 continue
3033
3033
3034 # Exact matches come first.
3034 # Exact matches come first.
3035 if avalue == prefvalue:
3035 if avalue == prefvalue:
3036 return -1
3036 return -1
3037 if bvalue == prefvalue:
3037 if bvalue == prefvalue:
3038 return 1
3038 return 1
3039
3039
3040 # Fall back to next attribute.
3040 # Fall back to next attribute.
3041 continue
3041 continue
3042
3042
3043 # If we got here we couldn't sort by attributes and prefers. Fall
3043 # If we got here we couldn't sort by attributes and prefers. Fall
3044 # back to index order.
3044 # back to index order.
3045 return 0
3045 return 0
3046
3046
3047 def __lt__(self, other):
3047 def __lt__(self, other):
3048 return self._cmp(other) < 0
3048 return self._cmp(other) < 0
3049
3049
3050 def __gt__(self, other):
3050 def __gt__(self, other):
3051 return self._cmp(other) > 0
3051 return self._cmp(other) > 0
3052
3052
3053 def __eq__(self, other):
3053 def __eq__(self, other):
3054 return self._cmp(other) == 0
3054 return self._cmp(other) == 0
3055
3055
3056 def __le__(self, other):
3056 def __le__(self, other):
3057 return self._cmp(other) <= 0
3057 return self._cmp(other) <= 0
3058
3058
3059 def __ge__(self, other):
3059 def __ge__(self, other):
3060 return self._cmp(other) >= 0
3060 return self._cmp(other) >= 0
3061
3061
3062 def __ne__(self, other):
3062 def __ne__(self, other):
3063 return self._cmp(other) != 0
3063 return self._cmp(other) != 0
3064
3064
3065
3065
3066 def sortclonebundleentries(ui, entries):
3066 def sortclonebundleentries(ui, entries):
3067 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3067 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3068 if not prefers:
3068 if not prefers:
3069 return list(entries)
3069 return list(entries)
3070
3070
3071 prefers = [p.split(b'=', 1) for p in prefers]
3071 def _split(p):
3072 if b'=' not in p:
3073 hint = _(b"each comma separated item should be key=value pairs")
3074 raise error.Abort(
3075 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3076 )
3077 return p.split(b'=', 1)
3078
3079 prefers = [_split(p) for p in prefers]
3072
3080
3073 items = sorted(clonebundleentry(v, prefers) for v in entries)
3081 items = sorted(clonebundleentry(v, prefers) for v in entries)
3074 return [i.value for i in items]
3082 return [i.value for i in items]
3075
3083
3076
3084
3077 def trypullbundlefromurl(ui, repo, url):
3085 def trypullbundlefromurl(ui, repo, url):
3078 """Attempt to apply a bundle from a URL."""
3086 """Attempt to apply a bundle from a URL."""
3079 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3087 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3080 try:
3088 try:
3081 fh = urlmod.open(ui, url)
3089 fh = urlmod.open(ui, url)
3082 cg = readbundle(ui, fh, b'stream')
3090 cg = readbundle(ui, fh, b'stream')
3083
3091
3084 if isinstance(cg, streamclone.streamcloneapplier):
3092 if isinstance(cg, streamclone.streamcloneapplier):
3085 cg.apply(repo)
3093 cg.apply(repo)
3086 else:
3094 else:
3087 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3095 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3088 return True
3096 return True
3089 except urlerr.httperror as e:
3097 except urlerr.httperror as e:
3090 ui.warn(
3098 ui.warn(
3091 _(b'HTTP error fetching bundle: %s\n')
3099 _(b'HTTP error fetching bundle: %s\n')
3092 % stringutil.forcebytestr(e)
3100 % stringutil.forcebytestr(e)
3093 )
3101 )
3094 except urlerr.urlerror as e:
3102 except urlerr.urlerror as e:
3095 ui.warn(
3103 ui.warn(
3096 _(b'error fetching bundle: %s\n')
3104 _(b'error fetching bundle: %s\n')
3097 % stringutil.forcebytestr(e.reason)
3105 % stringutil.forcebytestr(e.reason)
3098 )
3106 )
3099
3107
3100 return False
3108 return False
@@ -1,556 +1,569 b''
1 #require no-reposimplestore no-chg
1 #require no-reposimplestore no-chg
2
2
3 Set up a server
3 Set up a server
4
4
5 $ hg init server
5 $ hg init server
6 $ cd server
6 $ cd server
7 $ cat >> .hg/hgrc << EOF
7 $ cat >> .hg/hgrc << EOF
8 > [extensions]
8 > [extensions]
9 > clonebundles =
9 > clonebundles =
10 > EOF
10 > EOF
11
11
12 $ touch foo
12 $ touch foo
13 $ hg -q commit -A -m 'add foo'
13 $ hg -q commit -A -m 'add foo'
14 $ touch bar
14 $ touch bar
15 $ hg -q commit -A -m 'add bar'
15 $ hg -q commit -A -m 'add bar'
16
16
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 $ cat hg.pid >> $DAEMON_PIDS
18 $ cat hg.pid >> $DAEMON_PIDS
19 $ cd ..
19 $ cd ..
20
20
21 Missing manifest should not result in server lookup
21 Missing manifest should not result in server lookup
22
22
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 requesting all changes
24 requesting all changes
25 adding changesets
25 adding changesets
26 adding manifests
26 adding manifests
27 adding file changes
27 adding file changes
28 added 2 changesets with 2 changes to 2 files
28 added 2 changesets with 2 changes to 2 files
29 new changesets 53245c60e682:aaff8d2ffbbf
29 new changesets 53245c60e682:aaff8d2ffbbf
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31
31
32 $ cat server/access.log
32 $ cat server/access.log
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36
36
37 Empty manifest file results in retrieval
37 Empty manifest file results in retrieval
38 (the extension only checks if the manifest file exists)
38 (the extension only checks if the manifest file exists)
39
39
40 $ touch server/.hg/clonebundles.manifest
40 $ touch server/.hg/clonebundles.manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 no clone bundles available on remote; falling back to regular clone
42 no clone bundles available on remote; falling back to regular clone
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48 new changesets 53245c60e682:aaff8d2ffbbf
48 new changesets 53245c60e682:aaff8d2ffbbf
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50
50
51 Manifest file with invalid URL aborts
51 Manifest file with invalid URL aborts
52
52
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 $ hg clone http://localhost:$HGPORT 404-url
54 $ hg clone http://localhost:$HGPORT 404-url
55 applying clone bundle from http://does.not.exist/bundle.hg
55 applying clone bundle from http://does.not.exist/bundle.hg
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution)) (re) (no-windows !)
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution)) (re) (no-windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 abort: error applying bundle
58 abort: error applying bundle
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 [255]
60 [255]
61
61
62 Server is not running aborts
62 Server is not running aborts
63
63
64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
65 $ hg clone http://localhost:$HGPORT server-not-runner
65 $ hg clone http://localhost:$HGPORT server-not-runner
66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
68 abort: error applying bundle
68 abort: error applying bundle
69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
70 [255]
70 [255]
71
71
72 Server returns 404
72 Server returns 404
73
73
74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
75 $ cat http.pid >> $DAEMON_PIDS
75 $ cat http.pid >> $DAEMON_PIDS
76 $ hg clone http://localhost:$HGPORT running-404
76 $ hg clone http://localhost:$HGPORT running-404
77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
78 HTTP error fetching bundle: HTTP Error 404: File not found
78 HTTP error fetching bundle: HTTP Error 404: File not found
79 abort: error applying bundle
79 abort: error applying bundle
80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
81 [255]
81 [255]
82
82
83 We can override failure to fall back to regular clone
83 We can override failure to fall back to regular clone
84
84
85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
87 HTTP error fetching bundle: HTTP Error 404: File not found
87 HTTP error fetching bundle: HTTP Error 404: File not found
88 falling back to normal clone
88 falling back to normal clone
89 requesting all changes
89 requesting all changes
90 adding changesets
90 adding changesets
91 adding manifests
91 adding manifests
92 adding file changes
92 adding file changes
93 added 2 changesets with 2 changes to 2 files
93 added 2 changesets with 2 changes to 2 files
94 new changesets 53245c60e682:aaff8d2ffbbf
94 new changesets 53245c60e682:aaff8d2ffbbf
95
95
96 Bundle with partial content works
96 Bundle with partial content works
97
97
98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
99 1 changesets found
99 1 changesets found
100
100
101 We verify exact bundle content as an extra check against accidental future
101 We verify exact bundle content as an extra check against accidental future
102 changes. If this output changes, we could break old clients.
102 changes. If this output changes, we could break old clients.
103
103
104 $ f --size --hexdump partial.hg
104 $ f --size --hexdump partial.hg
105 partial.hg: size=207
105 partial.hg: size=207
106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
119
119
120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
121 $ hg clone -U http://localhost:$HGPORT partial-bundle
121 $ hg clone -U http://localhost:$HGPORT partial-bundle
122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
123 adding changesets
123 adding changesets
124 adding manifests
124 adding manifests
125 adding file changes
125 adding file changes
126 added 1 changesets with 1 changes to 1 files
126 added 1 changesets with 1 changes to 1 files
127 finished applying clone bundle
127 finished applying clone bundle
128 searching for changes
128 searching for changes
129 adding changesets
129 adding changesets
130 adding manifests
130 adding manifests
131 adding file changes
131 adding file changes
132 added 1 changesets with 1 changes to 1 files
132 added 1 changesets with 1 changes to 1 files
133 new changesets aaff8d2ffbbf
133 new changesets aaff8d2ffbbf
134 1 local changesets published
134 1 local changesets published
135
135
136 Incremental pull doesn't fetch bundle
136 Incremental pull doesn't fetch bundle
137
137
138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
139 adding changesets
139 adding changesets
140 adding manifests
140 adding manifests
141 adding file changes
141 adding file changes
142 added 1 changesets with 1 changes to 1 files
142 added 1 changesets with 1 changes to 1 files
143 new changesets 53245c60e682
143 new changesets 53245c60e682
144
144
145 $ cd partial-clone
145 $ cd partial-clone
146 $ hg pull
146 $ hg pull
147 pulling from http://localhost:$HGPORT/
147 pulling from http://localhost:$HGPORT/
148 searching for changes
148 searching for changes
149 adding changesets
149 adding changesets
150 adding manifests
150 adding manifests
151 adding file changes
151 adding file changes
152 added 1 changesets with 1 changes to 1 files
152 added 1 changesets with 1 changes to 1 files
153 new changesets aaff8d2ffbbf
153 new changesets aaff8d2ffbbf
154 (run 'hg update' to get a working copy)
154 (run 'hg update' to get a working copy)
155 $ cd ..
155 $ cd ..
156
156
157 Bundle with full content works
157 Bundle with full content works
158
158
159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
160 2 changesets found
160 2 changesets found
161
161
162 Again, we perform an extra check against bundle content changes. If this content
162 Again, we perform an extra check against bundle content changes. If this content
163 changes, clone bundles produced by new Mercurial versions may not be readable
163 changes, clone bundles produced by new Mercurial versions may not be readable
164 by old clients.
164 by old clients.
165
165
166 $ f --size --hexdump full.hg
166 $ f --size --hexdump full.hg
167 full.hg: size=442
167 full.hg: size=442
168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
196
196
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
198 $ hg clone -U http://localhost:$HGPORT full-bundle
198 $ hg clone -U http://localhost:$HGPORT full-bundle
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
200 adding changesets
200 adding changesets
201 adding manifests
201 adding manifests
202 adding file changes
202 adding file changes
203 added 2 changesets with 2 changes to 2 files
203 added 2 changesets with 2 changes to 2 files
204 finished applying clone bundle
204 finished applying clone bundle
205 searching for changes
205 searching for changes
206 no changes found
206 no changes found
207 2 local changesets published
207 2 local changesets published
208
208
209 Feature works over SSH
209 Feature works over SSH
210
210
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 adding changesets
213 adding changesets
214 adding manifests
214 adding manifests
215 adding file changes
215 adding file changes
216 added 2 changesets with 2 changes to 2 files
216 added 2 changesets with 2 changes to 2 files
217 finished applying clone bundle
217 finished applying clone bundle
218 searching for changes
218 searching for changes
219 no changes found
219 no changes found
220 2 local changesets published
220 2 local changesets published
221
221
222 Entry with unknown BUNDLESPEC is filtered and not used
222 Entry with unknown BUNDLESPEC is filtered and not used
223
223
224 $ cat > server/.hg/clonebundles.manifest << EOF
224 $ cat > server/.hg/clonebundles.manifest << EOF
225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
226 > http://bad.entry2 BUNDLESPEC=xz-v1
226 > http://bad.entry2 BUNDLESPEC=xz-v1
227 > http://bad.entry3 BUNDLESPEC=none-v100
227 > http://bad.entry3 BUNDLESPEC=none-v100
228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
229 > EOF
229 > EOF
230
230
231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
232 applying clone bundle from http://localhost:$HGPORT1/full.hg
232 applying clone bundle from http://localhost:$HGPORT1/full.hg
233 adding changesets
233 adding changesets
234 adding manifests
234 adding manifests
235 adding file changes
235 adding file changes
236 added 2 changesets with 2 changes to 2 files
236 added 2 changesets with 2 changes to 2 files
237 finished applying clone bundle
237 finished applying clone bundle
238 searching for changes
238 searching for changes
239 no changes found
239 no changes found
240 2 local changesets published
240 2 local changesets published
241
241
242 Automatic fallback when all entries are filtered
242 Automatic fallback when all entries are filtered
243
243
244 $ cat > server/.hg/clonebundles.manifest << EOF
244 $ cat > server/.hg/clonebundles.manifest << EOF
245 > http://bad.entry BUNDLESPEC=UNKNOWN
245 > http://bad.entry BUNDLESPEC=UNKNOWN
246 > EOF
246 > EOF
247
247
248 $ hg clone -U http://localhost:$HGPORT filter-all
248 $ hg clone -U http://localhost:$HGPORT filter-all
249 no compatible clone bundles available on server; falling back to regular clone
249 no compatible clone bundles available on server; falling back to regular clone
250 (you may want to report this to the server operator)
250 (you may want to report this to the server operator)
251 requesting all changes
251 requesting all changes
252 adding changesets
252 adding changesets
253 adding manifests
253 adding manifests
254 adding file changes
254 adding file changes
255 added 2 changesets with 2 changes to 2 files
255 added 2 changesets with 2 changes to 2 files
256 new changesets 53245c60e682:aaff8d2ffbbf
256 new changesets 53245c60e682:aaff8d2ffbbf
257
257
258 URLs requiring SNI are filtered in Python <2.7.9
258 URLs requiring SNI are filtered in Python <2.7.9
259
259
260 $ cp full.hg sni.hg
260 $ cp full.hg sni.hg
261 $ cat > server/.hg/clonebundles.manifest << EOF
261 $ cat > server/.hg/clonebundles.manifest << EOF
262 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
262 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
263 > http://localhost:$HGPORT1/full.hg
263 > http://localhost:$HGPORT1/full.hg
264 > EOF
264 > EOF
265
265
266 #if sslcontext
266 #if sslcontext
267 Python 2.7.9+ support SNI
267 Python 2.7.9+ support SNI
268
268
269 $ hg clone -U http://localhost:$HGPORT sni-supported
269 $ hg clone -U http://localhost:$HGPORT sni-supported
270 applying clone bundle from http://localhost:$HGPORT1/sni.hg
270 applying clone bundle from http://localhost:$HGPORT1/sni.hg
271 adding changesets
271 adding changesets
272 adding manifests
272 adding manifests
273 adding file changes
273 adding file changes
274 added 2 changesets with 2 changes to 2 files
274 added 2 changesets with 2 changes to 2 files
275 finished applying clone bundle
275 finished applying clone bundle
276 searching for changes
276 searching for changes
277 no changes found
277 no changes found
278 2 local changesets published
278 2 local changesets published
279 #else
279 #else
280 Python <2.7.9 will filter SNI URLs
280 Python <2.7.9 will filter SNI URLs
281
281
282 $ hg clone -U http://localhost:$HGPORT sni-unsupported
282 $ hg clone -U http://localhost:$HGPORT sni-unsupported
283 applying clone bundle from http://localhost:$HGPORT1/full.hg
283 applying clone bundle from http://localhost:$HGPORT1/full.hg
284 adding changesets
284 adding changesets
285 adding manifests
285 adding manifests
286 adding file changes
286 adding file changes
287 added 2 changesets with 2 changes to 2 files
287 added 2 changesets with 2 changes to 2 files
288 finished applying clone bundle
288 finished applying clone bundle
289 searching for changes
289 searching for changes
290 no changes found
290 no changes found
291 2 local changesets published
291 2 local changesets published
292 #endif
292 #endif
293
293
294 Stream clone bundles are supported
294 Stream clone bundles are supported
295
295
296 $ hg -R server debugcreatestreamclonebundle packed.hg
296 $ hg -R server debugcreatestreamclonebundle packed.hg
297 writing 613 bytes for 4 files
297 writing 613 bytes for 4 files
298 bundle requirements: generaldelta, revlogv1, sparserevlog
298 bundle requirements: generaldelta, revlogv1, sparserevlog
299
299
300 No bundle spec should work
300 No bundle spec should work
301
301
302 $ cat > server/.hg/clonebundles.manifest << EOF
302 $ cat > server/.hg/clonebundles.manifest << EOF
303 > http://localhost:$HGPORT1/packed.hg
303 > http://localhost:$HGPORT1/packed.hg
304 > EOF
304 > EOF
305
305
306 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
306 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
307 applying clone bundle from http://localhost:$HGPORT1/packed.hg
307 applying clone bundle from http://localhost:$HGPORT1/packed.hg
308 4 files to transfer, 613 bytes of data
308 4 files to transfer, 613 bytes of data
309 transferred 613 bytes in *.* seconds (*) (glob)
309 transferred 613 bytes in *.* seconds (*) (glob)
310 finished applying clone bundle
310 finished applying clone bundle
311 searching for changes
311 searching for changes
312 no changes found
312 no changes found
313
313
314 Bundle spec without parameters should work
314 Bundle spec without parameters should work
315
315
316 $ cat > server/.hg/clonebundles.manifest << EOF
316 $ cat > server/.hg/clonebundles.manifest << EOF
317 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
317 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
318 > EOF
318 > EOF
319
319
320 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
320 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
321 applying clone bundle from http://localhost:$HGPORT1/packed.hg
321 applying clone bundle from http://localhost:$HGPORT1/packed.hg
322 4 files to transfer, 613 bytes of data
322 4 files to transfer, 613 bytes of data
323 transferred 613 bytes in *.* seconds (*) (glob)
323 transferred 613 bytes in *.* seconds (*) (glob)
324 finished applying clone bundle
324 finished applying clone bundle
325 searching for changes
325 searching for changes
326 no changes found
326 no changes found
327
327
328 Bundle spec with format requirements should work
328 Bundle spec with format requirements should work
329
329
330 $ cat > server/.hg/clonebundles.manifest << EOF
330 $ cat > server/.hg/clonebundles.manifest << EOF
331 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
331 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
332 > EOF
332 > EOF
333
333
334 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
334 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
335 applying clone bundle from http://localhost:$HGPORT1/packed.hg
335 applying clone bundle from http://localhost:$HGPORT1/packed.hg
336 4 files to transfer, 613 bytes of data
336 4 files to transfer, 613 bytes of data
337 transferred 613 bytes in *.* seconds (*) (glob)
337 transferred 613 bytes in *.* seconds (*) (glob)
338 finished applying clone bundle
338 finished applying clone bundle
339 searching for changes
339 searching for changes
340 no changes found
340 no changes found
341
341
342 Stream bundle spec with unknown requirements should be filtered out
342 Stream bundle spec with unknown requirements should be filtered out
343
343
344 $ cat > server/.hg/clonebundles.manifest << EOF
344 $ cat > server/.hg/clonebundles.manifest << EOF
345 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
345 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
346 > EOF
346 > EOF
347
347
348 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
348 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
349 no compatible clone bundles available on server; falling back to regular clone
349 no compatible clone bundles available on server; falling back to regular clone
350 (you may want to report this to the server operator)
350 (you may want to report this to the server operator)
351 requesting all changes
351 requesting all changes
352 adding changesets
352 adding changesets
353 adding manifests
353 adding manifests
354 adding file changes
354 adding file changes
355 added 2 changesets with 2 changes to 2 files
355 added 2 changesets with 2 changes to 2 files
356 new changesets 53245c60e682:aaff8d2ffbbf
356 new changesets 53245c60e682:aaff8d2ffbbf
357
357
358 Set up manifest for testing preferences
358 Set up manifest for testing preferences
359 (Remember, the TYPE does not have to match reality - the URL is
359 (Remember, the TYPE does not have to match reality - the URL is
360 important)
360 important)
361
361
362 $ cp full.hg gz-a.hg
362 $ cp full.hg gz-a.hg
363 $ cp full.hg gz-b.hg
363 $ cp full.hg gz-b.hg
364 $ cp full.hg bz2-a.hg
364 $ cp full.hg bz2-a.hg
365 $ cp full.hg bz2-b.hg
365 $ cp full.hg bz2-b.hg
366 $ cat > server/.hg/clonebundles.manifest << EOF
366 $ cat > server/.hg/clonebundles.manifest << EOF
367 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
367 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
368 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
368 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
369 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
369 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
370 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
370 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
371 > EOF
371 > EOF
372
372
373 Preferring an undefined attribute will take first entry
373 Preferring an undefined attribute will take first entry
374
374
375 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
375 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
376 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
376 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
377 adding changesets
377 adding changesets
378 adding manifests
378 adding manifests
379 adding file changes
379 adding file changes
380 added 2 changesets with 2 changes to 2 files
380 added 2 changesets with 2 changes to 2 files
381 finished applying clone bundle
381 finished applying clone bundle
382 searching for changes
382 searching for changes
383 no changes found
383 no changes found
384 2 local changesets published
384 2 local changesets published
385
385
386 Preferring bz2 type will download first entry of that type
386 Preferring bz2 type will download first entry of that type
387
387
388 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
388 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
389 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
389 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
390 adding changesets
390 adding changesets
391 adding manifests
391 adding manifests
392 adding file changes
392 adding file changes
393 added 2 changesets with 2 changes to 2 files
393 added 2 changesets with 2 changes to 2 files
394 finished applying clone bundle
394 finished applying clone bundle
395 searching for changes
395 searching for changes
396 no changes found
396 no changes found
397 2 local changesets published
397 2 local changesets published
398
398
399 Preferring multiple values of an option works
399 Preferring multiple values of an option works
400
400
401 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
401 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
402 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
402 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
403 adding changesets
403 adding changesets
404 adding manifests
404 adding manifests
405 adding file changes
405 adding file changes
406 added 2 changesets with 2 changes to 2 files
406 added 2 changesets with 2 changes to 2 files
407 finished applying clone bundle
407 finished applying clone bundle
408 searching for changes
408 searching for changes
409 no changes found
409 no changes found
410 2 local changesets published
410 2 local changesets published
411
411
412 Sorting multiple values should get us back to original first entry
412 Sorting multiple values should get us back to original first entry
413
413
414 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
414 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
415 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
415 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
416 adding changesets
416 adding changesets
417 adding manifests
417 adding manifests
418 adding file changes
418 adding file changes
419 added 2 changesets with 2 changes to 2 files
419 added 2 changesets with 2 changes to 2 files
420 finished applying clone bundle
420 finished applying clone bundle
421 searching for changes
421 searching for changes
422 no changes found
422 no changes found
423 2 local changesets published
423 2 local changesets published
424
424
425 Preferring multiple attributes has correct order
425 Preferring multiple attributes has correct order
426
426
427 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
427 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
428 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
428 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
429 adding changesets
429 adding changesets
430 adding manifests
430 adding manifests
431 adding file changes
431 adding file changes
432 added 2 changesets with 2 changes to 2 files
432 added 2 changesets with 2 changes to 2 files
433 finished applying clone bundle
433 finished applying clone bundle
434 searching for changes
434 searching for changes
435 no changes found
435 no changes found
436 2 local changesets published
436 2 local changesets published
437
437
438 Test where attribute is missing from some entries
438 Test where attribute is missing from some entries
439
439
440 $ cat > server/.hg/clonebundles.manifest << EOF
440 $ cat > server/.hg/clonebundles.manifest << EOF
441 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
441 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
442 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
442 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
443 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
443 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
444 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
444 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
445 > EOF
445 > EOF
446
446
447 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
447 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
448 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
448 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
449 adding changesets
449 adding changesets
450 adding manifests
450 adding manifests
451 adding file changes
451 adding file changes
452 added 2 changesets with 2 changes to 2 files
452 added 2 changesets with 2 changes to 2 files
453 finished applying clone bundle
453 finished applying clone bundle
454 searching for changes
454 searching for changes
455 no changes found
455 no changes found
456 2 local changesets published
456 2 local changesets published
457
457
458 Test a bad attribute list
459
460 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
461 abort: invalid ui.clonebundleprefers item: bad
462 (each comma separated item should be key=value pairs)
463 [255]
464 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
465 > -U http://localhost:$HGPORT bad-input
466 abort: invalid ui.clonebundleprefers item: bad
467 (each comma separated item should be key=value pairs)
468 [255]
469
470
458 Test interaction between clone bundles and --stream
471 Test interaction between clone bundles and --stream
459
472
460 A manifest with just a gzip bundle
473 A manifest with just a gzip bundle
461
474
462 $ cat > server/.hg/clonebundles.manifest << EOF
475 $ cat > server/.hg/clonebundles.manifest << EOF
463 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
476 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
464 > EOF
477 > EOF
465
478
466 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
479 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
467 no compatible clone bundles available on server; falling back to regular clone
480 no compatible clone bundles available on server; falling back to regular clone
468 (you may want to report this to the server operator)
481 (you may want to report this to the server operator)
469 streaming all changes
482 streaming all changes
470 9 files to transfer, 816 bytes of data
483 9 files to transfer, 816 bytes of data
471 transferred 816 bytes in * seconds (*) (glob)
484 transferred 816 bytes in * seconds (*) (glob)
472
485
473 A manifest with a stream clone but no BUNDLESPEC
486 A manifest with a stream clone but no BUNDLESPEC
474
487
475 $ cat > server/.hg/clonebundles.manifest << EOF
488 $ cat > server/.hg/clonebundles.manifest << EOF
476 > http://localhost:$HGPORT1/packed.hg
489 > http://localhost:$HGPORT1/packed.hg
477 > EOF
490 > EOF
478
491
479 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
492 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
480 no compatible clone bundles available on server; falling back to regular clone
493 no compatible clone bundles available on server; falling back to regular clone
481 (you may want to report this to the server operator)
494 (you may want to report this to the server operator)
482 streaming all changes
495 streaming all changes
483 9 files to transfer, 816 bytes of data
496 9 files to transfer, 816 bytes of data
484 transferred 816 bytes in * seconds (*) (glob)
497 transferred 816 bytes in * seconds (*) (glob)
485
498
486 A manifest with a gzip bundle and a stream clone
499 A manifest with a gzip bundle and a stream clone
487
500
488 $ cat > server/.hg/clonebundles.manifest << EOF
501 $ cat > server/.hg/clonebundles.manifest << EOF
489 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
502 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
490 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
503 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
491 > EOF
504 > EOF
492
505
493 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
506 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
494 applying clone bundle from http://localhost:$HGPORT1/packed.hg
507 applying clone bundle from http://localhost:$HGPORT1/packed.hg
495 4 files to transfer, 613 bytes of data
508 4 files to transfer, 613 bytes of data
496 transferred 613 bytes in * seconds (*) (glob)
509 transferred 613 bytes in * seconds (*) (glob)
497 finished applying clone bundle
510 finished applying clone bundle
498 searching for changes
511 searching for changes
499 no changes found
512 no changes found
500
513
501 A manifest with a gzip bundle and stream clone with supported requirements
514 A manifest with a gzip bundle and stream clone with supported requirements
502
515
503 $ cat > server/.hg/clonebundles.manifest << EOF
516 $ cat > server/.hg/clonebundles.manifest << EOF
504 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
517 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
505 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
518 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
506 > EOF
519 > EOF
507
520
508 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
521 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
509 applying clone bundle from http://localhost:$HGPORT1/packed.hg
522 applying clone bundle from http://localhost:$HGPORT1/packed.hg
510 4 files to transfer, 613 bytes of data
523 4 files to transfer, 613 bytes of data
511 transferred 613 bytes in * seconds (*) (glob)
524 transferred 613 bytes in * seconds (*) (glob)
512 finished applying clone bundle
525 finished applying clone bundle
513 searching for changes
526 searching for changes
514 no changes found
527 no changes found
515
528
516 A manifest with a gzip bundle and a stream clone with unsupported requirements
529 A manifest with a gzip bundle and a stream clone with unsupported requirements
517
530
518 $ cat > server/.hg/clonebundles.manifest << EOF
531 $ cat > server/.hg/clonebundles.manifest << EOF
519 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
532 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
520 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
533 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
521 > EOF
534 > EOF
522
535
523 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
536 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
524 no compatible clone bundles available on server; falling back to regular clone
537 no compatible clone bundles available on server; falling back to regular clone
525 (you may want to report this to the server operator)
538 (you may want to report this to the server operator)
526 streaming all changes
539 streaming all changes
527 9 files to transfer, 816 bytes of data
540 9 files to transfer, 816 bytes of data
528 transferred 816 bytes in * seconds (*) (glob)
541 transferred 816 bytes in * seconds (*) (glob)
529
542
530 Test clone bundle retrieved through bundle2
543 Test clone bundle retrieved through bundle2
531
544
532 $ cat << EOF >> $HGRCPATH
545 $ cat << EOF >> $HGRCPATH
533 > [extensions]
546 > [extensions]
534 > largefiles=
547 > largefiles=
535 > EOF
548 > EOF
536 $ killdaemons.py
549 $ killdaemons.py
537 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
550 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
538 $ cat hg.pid >> $DAEMON_PIDS
551 $ cat hg.pid >> $DAEMON_PIDS
539
552
540 $ hg -R server debuglfput gz-a.hg
553 $ hg -R server debuglfput gz-a.hg
541 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
554 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
542
555
543 $ cat > server/.hg/clonebundles.manifest << EOF
556 $ cat > server/.hg/clonebundles.manifest << EOF
544 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
557 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
545 > EOF
558 > EOF
546
559
547 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
560 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
548 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
561 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
549 adding changesets
562 adding changesets
550 adding manifests
563 adding manifests
551 adding file changes
564 adding file changes
552 added 2 changesets with 2 changes to 2 files
565 added 2 changesets with 2 changes to 2 files
553 finished applying clone bundle
566 finished applying clone bundle
554 searching for changes
567 searching for changes
555 no changes found
568 no changes found
556 2 local changesets published
569 2 local changesets published
General Comments 0
You need to be logged in to leave comments. Login now