##// END OF EJS Templates
clonebundles: optional memory-requirement attribution...
Joerg Sonnenberger -
r45608:9c7ff887 default draft
parent child Browse files
Show More
@@ -1,3140 +1,3157 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 sslutil,
36 sslutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 wireprototypes,
40 wireprototypes,
41 )
41 )
42 from .interfaces import repository
42 from .interfaces import repository
43 from .utils import (
43 from .utils import (
44 hashutil,
44 hashutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = b'narrowacl'
51 _NARROWACL_SECTION = b'narrowacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {
54 _bundlespeccgversions = {
55 b'v1': b'01',
55 b'v1': b'01',
56 b'v2': b'02',
56 b'v2': b'02',
57 b'packed1': b's1',
57 b'packed1': b's1',
58 b'bundle2': b'02', # legacy
58 b'bundle2': b'02', # legacy
59 }
59 }
60
60
61 # Maps bundle version with content opts to choose which part to bundle
61 # Maps bundle version with content opts to choose which part to bundle
62 _bundlespeccontentopts = {
62 _bundlespeccontentopts = {
63 b'v1': {
63 b'v1': {
64 b'changegroup': True,
64 b'changegroup': True,
65 b'cg.version': b'01',
65 b'cg.version': b'01',
66 b'obsolescence': False,
66 b'obsolescence': False,
67 b'phases': False,
67 b'phases': False,
68 b'tagsfnodescache': False,
68 b'tagsfnodescache': False,
69 b'revbranchcache': False,
69 b'revbranchcache': False,
70 },
70 },
71 b'v2': {
71 b'v2': {
72 b'changegroup': True,
72 b'changegroup': True,
73 b'cg.version': b'02',
73 b'cg.version': b'02',
74 b'obsolescence': False,
74 b'obsolescence': False,
75 b'phases': False,
75 b'phases': False,
76 b'tagsfnodescache': True,
76 b'tagsfnodescache': True,
77 b'revbranchcache': True,
77 b'revbranchcache': True,
78 },
78 },
79 b'packed1': {b'cg.version': b's1'},
79 b'packed1': {b'cg.version': b's1'},
80 }
80 }
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
82
82
83 _bundlespecvariants = {
83 _bundlespecvariants = {
84 b"streamv2": {
84 b"streamv2": {
85 b"changegroup": False,
85 b"changegroup": False,
86 b"streamv2": True,
86 b"streamv2": True,
87 b"tagsfnodescache": False,
87 b"tagsfnodescache": False,
88 b"revbranchcache": False,
88 b"revbranchcache": False,
89 }
89 }
90 }
90 }
91
91
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
94
94
95
95
96 @attr.s
96 @attr.s
97 class bundlespec(object):
97 class bundlespec(object):
98 compression = attr.ib()
98 compression = attr.ib()
99 wirecompression = attr.ib()
99 wirecompression = attr.ib()
100 version = attr.ib()
100 version = attr.ib()
101 wireversion = attr.ib()
101 wireversion = attr.ib()
102 params = attr.ib()
102 params = attr.ib()
103 contentopts = attr.ib()
103 contentopts = attr.ib()
104
104
105
105
106 def parsebundlespec(repo, spec, strict=True):
106 def parsebundlespec(repo, spec, strict=True):
107 """Parse a bundle string specification into parts.
107 """Parse a bundle string specification into parts.
108
108
109 Bundle specifications denote a well-defined bundle/exchange format.
109 Bundle specifications denote a well-defined bundle/exchange format.
110 The content of a given specification should not change over time in
110 The content of a given specification should not change over time in
111 order to ensure that bundles produced by a newer version of Mercurial are
111 order to ensure that bundles produced by a newer version of Mercurial are
112 readable from an older version.
112 readable from an older version.
113
113
114 The string currently has the form:
114 The string currently has the form:
115
115
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
117
117
118 Where <compression> is one of the supported compression formats
118 Where <compression> is one of the supported compression formats
119 and <type> is (currently) a version string. A ";" can follow the type and
119 and <type> is (currently) a version string. A ";" can follow the type and
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 pairs.
121 pairs.
122
122
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 it is optional.
124 it is optional.
125
125
126 Returns a bundlespec object of (compression, version, parameters).
126 Returns a bundlespec object of (compression, version, parameters).
127 Compression will be ``None`` if not in strict mode and a compression isn't
127 Compression will be ``None`` if not in strict mode and a compression isn't
128 defined.
128 defined.
129
129
130 An ``InvalidBundleSpecification`` is raised when the specification is
130 An ``InvalidBundleSpecification`` is raised when the specification is
131 not syntactically well formed.
131 not syntactically well formed.
132
132
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 bundle type/version is not recognized.
134 bundle type/version is not recognized.
135
135
136 Note: this function will likely eventually return a more complex data
136 Note: this function will likely eventually return a more complex data
137 structure, including bundle2 part information.
137 structure, including bundle2 part information.
138 """
138 """
139
139
140 def parseparams(s):
140 def parseparams(s):
141 if b';' not in s:
141 if b';' not in s:
142 return s, {}
142 return s, {}
143
143
144 params = {}
144 params = {}
145 version, paramstr = s.split(b';', 1)
145 version, paramstr = s.split(b';', 1)
146
146
147 for p in paramstr.split(b';'):
147 for p in paramstr.split(b';'):
148 if b'=' not in p:
148 if b'=' not in p:
149 raise error.InvalidBundleSpecification(
149 raise error.InvalidBundleSpecification(
150 _(
150 _(
151 b'invalid bundle specification: '
151 b'invalid bundle specification: '
152 b'missing "=" in parameter: %s'
152 b'missing "=" in parameter: %s'
153 )
153 )
154 % p
154 % p
155 )
155 )
156
156
157 key, value = p.split(b'=', 1)
157 key, value = p.split(b'=', 1)
158 key = urlreq.unquote(key)
158 key = urlreq.unquote(key)
159 value = urlreq.unquote(value)
159 value = urlreq.unquote(value)
160 params[key] = value
160 params[key] = value
161
161
162 return version, params
162 return version, params
163
163
164 if strict and b'-' not in spec:
164 if strict and b'-' not in spec:
165 raise error.InvalidBundleSpecification(
165 raise error.InvalidBundleSpecification(
166 _(
166 _(
167 b'invalid bundle specification; '
167 b'invalid bundle specification; '
168 b'must be prefixed with compression: %s'
168 b'must be prefixed with compression: %s'
169 )
169 )
170 % spec
170 % spec
171 )
171 )
172
172
173 if b'-' in spec:
173 if b'-' in spec:
174 compression, version = spec.split(b'-', 1)
174 compression, version = spec.split(b'-', 1)
175
175
176 if compression not in util.compengines.supportedbundlenames:
176 if compression not in util.compengines.supportedbundlenames:
177 raise error.UnsupportedBundleSpecification(
177 raise error.UnsupportedBundleSpecification(
178 _(b'%s compression is not supported') % compression
178 _(b'%s compression is not supported') % compression
179 )
179 )
180
180
181 version, params = parseparams(version)
181 version, params = parseparams(version)
182
182
183 if version not in _bundlespeccgversions:
183 if version not in _bundlespeccgversions:
184 raise error.UnsupportedBundleSpecification(
184 raise error.UnsupportedBundleSpecification(
185 _(b'%s is not a recognized bundle version') % version
185 _(b'%s is not a recognized bundle version') % version
186 )
186 )
187 else:
187 else:
188 # Value could be just the compression or just the version, in which
188 # Value could be just the compression or just the version, in which
189 # case some defaults are assumed (but only when not in strict mode).
189 # case some defaults are assumed (but only when not in strict mode).
190 assert not strict
190 assert not strict
191
191
192 spec, params = parseparams(spec)
192 spec, params = parseparams(spec)
193
193
194 if spec in util.compengines.supportedbundlenames:
194 if spec in util.compengines.supportedbundlenames:
195 compression = spec
195 compression = spec
196 version = b'v1'
196 version = b'v1'
197 # Generaldelta repos require v2.
197 # Generaldelta repos require v2.
198 if b'generaldelta' in repo.requirements:
198 if b'generaldelta' in repo.requirements:
199 version = b'v2'
199 version = b'v2'
200 # Modern compression engines require v2.
200 # Modern compression engines require v2.
201 if compression not in _bundlespecv1compengines:
201 if compression not in _bundlespecv1compengines:
202 version = b'v2'
202 version = b'v2'
203 elif spec in _bundlespeccgversions:
203 elif spec in _bundlespeccgversions:
204 if spec == b'packed1':
204 if spec == b'packed1':
205 compression = b'none'
205 compression = b'none'
206 else:
206 else:
207 compression = b'bzip2'
207 compression = b'bzip2'
208 version = spec
208 version = spec
209 else:
209 else:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _(b'%s is not a recognized bundle specification') % spec
211 _(b'%s is not a recognized bundle specification') % spec
212 )
212 )
213
213
214 # Bundle version 1 only supports a known set of compression engines.
214 # Bundle version 1 only supports a known set of compression engines.
215 if version == b'v1' and compression not in _bundlespecv1compengines:
215 if version == b'v1' and compression not in _bundlespecv1compengines:
216 raise error.UnsupportedBundleSpecification(
216 raise error.UnsupportedBundleSpecification(
217 _(b'compression engine %s is not supported on v1 bundles')
217 _(b'compression engine %s is not supported on v1 bundles')
218 % compression
218 % compression
219 )
219 )
220
220
221 # The specification for packed1 can optionally declare the data formats
221 # The specification for packed1 can optionally declare the data formats
222 # required to apply it. If we see this metadata, compare against what the
222 # required to apply it. If we see this metadata, compare against what the
223 # repo supports and error if the bundle isn't compatible.
223 # repo supports and error if the bundle isn't compatible.
224 if version == b'packed1' and b'requirements' in params:
224 if version == b'packed1' and b'requirements' in params:
225 requirements = set(params[b'requirements'].split(b','))
225 requirements = set(params[b'requirements'].split(b','))
226 missingreqs = requirements - repo.supportedformats
226 missingreqs = requirements - repo.supportedformats
227 if missingreqs:
227 if missingreqs:
228 raise error.UnsupportedBundleSpecification(
228 raise error.UnsupportedBundleSpecification(
229 _(b'missing support for repository features: %s')
229 _(b'missing support for repository features: %s')
230 % b', '.join(sorted(missingreqs))
230 % b', '.join(sorted(missingreqs))
231 )
231 )
232
232
233 # Compute contentopts based on the version
233 # Compute contentopts based on the version
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235
235
236 # Process the variants
236 # Process the variants
237 if b"stream" in params and params[b"stream"] == b"v2":
237 if b"stream" in params and params[b"stream"] == b"v2":
238 variant = _bundlespecvariants[b"streamv2"]
238 variant = _bundlespecvariants[b"streamv2"]
239 contentopts.update(variant)
239 contentopts.update(variant)
240
240
241 engine = util.compengines.forbundlename(compression)
241 engine = util.compengines.forbundlename(compression)
242 compression, wirecompression = engine.bundletype()
242 compression, wirecompression = engine.bundletype()
243 wireversion = _bundlespeccgversions[version]
243 wireversion = _bundlespeccgversions[version]
244
244
245 return bundlespec(
245 return bundlespec(
246 compression, wirecompression, version, wireversion, params, contentopts
246 compression, wirecompression, version, wireversion, params, contentopts
247 )
247 )
248
248
249
249
250 def readbundle(ui, fh, fname, vfs=None):
250 def readbundle(ui, fh, fname, vfs=None):
251 header = changegroup.readexactly(fh, 4)
251 header = changegroup.readexactly(fh, 4)
252
252
253 alg = None
253 alg = None
254 if not fname:
254 if not fname:
255 fname = b"stream"
255 fname = b"stream"
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 fh = changegroup.headerlessfixup(fh, header)
257 fh = changegroup.headerlessfixup(fh, header)
258 header = b"HG10"
258 header = b"HG10"
259 alg = b'UN'
259 alg = b'UN'
260 elif vfs:
260 elif vfs:
261 fname = vfs.join(fname)
261 fname = vfs.join(fname)
262
262
263 magic, version = header[0:2], header[2:4]
263 magic, version = header[0:2], header[2:4]
264
264
265 if magic != b'HG':
265 if magic != b'HG':
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 if version == b'10':
267 if version == b'10':
268 if alg is None:
268 if alg is None:
269 alg = changegroup.readexactly(fh, 2)
269 alg = changegroup.readexactly(fh, 2)
270 return changegroup.cg1unpacker(fh, alg)
270 return changegroup.cg1unpacker(fh, alg)
271 elif version.startswith(b'2'):
271 elif version.startswith(b'2'):
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 elif version == b'S1':
273 elif version == b'S1':
274 return streamclone.streamcloneapplier(fh)
274 return streamclone.streamcloneapplier(fh)
275 else:
275 else:
276 raise error.Abort(
276 raise error.Abort(
277 _(b'%s: unknown bundle version %s') % (fname, version)
277 _(b'%s: unknown bundle version %s') % (fname, version)
278 )
278 )
279
279
280
280
281 def getbundlespec(ui, fh):
281 def getbundlespec(ui, fh):
282 """Infer the bundlespec from a bundle file handle.
282 """Infer the bundlespec from a bundle file handle.
283
283
284 The input file handle is seeked and the original seek position is not
284 The input file handle is seeked and the original seek position is not
285 restored.
285 restored.
286 """
286 """
287
287
288 def speccompression(alg):
288 def speccompression(alg):
289 try:
289 try:
290 return util.compengines.forbundletype(alg).bundletype()[0]
290 return util.compengines.forbundletype(alg).bundletype()[0]
291 except KeyError:
291 except KeyError:
292 return None
292 return None
293
293
294 b = readbundle(ui, fh, None)
294 b = readbundle(ui, fh, None)
295 if isinstance(b, changegroup.cg1unpacker):
295 if isinstance(b, changegroup.cg1unpacker):
296 alg = b._type
296 alg = b._type
297 if alg == b'_truncatedBZ':
297 if alg == b'_truncatedBZ':
298 alg = b'BZ'
298 alg = b'BZ'
299 comp = speccompression(alg)
299 comp = speccompression(alg)
300 if not comp:
300 if not comp:
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 return b'%s-v1' % comp
302 return b'%s-v1' % comp
303 elif isinstance(b, bundle2.unbundle20):
303 elif isinstance(b, bundle2.unbundle20):
304 if b'Compression' in b.params:
304 if b'Compression' in b.params:
305 comp = speccompression(b.params[b'Compression'])
305 comp = speccompression(b.params[b'Compression'])
306 if not comp:
306 if not comp:
307 raise error.Abort(
307 raise error.Abort(
308 _(b'unknown compression algorithm: %s') % comp
308 _(b'unknown compression algorithm: %s') % comp
309 )
309 )
310 else:
310 else:
311 comp = b'none'
311 comp = b'none'
312
312
313 version = None
313 version = None
314 for part in b.iterparts():
314 for part in b.iterparts():
315 if part.type == b'changegroup':
315 if part.type == b'changegroup':
316 version = part.params[b'version']
316 version = part.params[b'version']
317 if version in (b'01', b'02'):
317 if version in (b'01', b'02'):
318 version = b'v2'
318 version = b'v2'
319 else:
319 else:
320 raise error.Abort(
320 raise error.Abort(
321 _(
321 _(
322 b'changegroup version %s does not have '
322 b'changegroup version %s does not have '
323 b'a known bundlespec'
323 b'a known bundlespec'
324 )
324 )
325 % version,
325 % version,
326 hint=_(b'try upgrading your Mercurial client'),
326 hint=_(b'try upgrading your Mercurial client'),
327 )
327 )
328 elif part.type == b'stream2' and version is None:
328 elif part.type == b'stream2' and version is None:
329 # A stream2 part requires to be part of a v2 bundle
329 # A stream2 part requires to be part of a v2 bundle
330 requirements = urlreq.unquote(part.params[b'requirements'])
330 requirements = urlreq.unquote(part.params[b'requirements'])
331 splitted = requirements.split()
331 splitted = requirements.split()
332 params = bundle2._formatrequirementsparams(splitted)
332 params = bundle2._formatrequirementsparams(splitted)
333 return b'none-v2;stream=v2;%s' % params
333 return b'none-v2;stream=v2;%s' % params
334
334
335 if not version:
335 if not version:
336 raise error.Abort(
336 raise error.Abort(
337 _(b'could not identify changegroup version in bundle')
337 _(b'could not identify changegroup version in bundle')
338 )
338 )
339
339
340 return b'%s-%s' % (comp, version)
340 return b'%s-%s' % (comp, version)
341 elif isinstance(b, streamclone.streamcloneapplier):
341 elif isinstance(b, streamclone.streamcloneapplier):
342 requirements = streamclone.readbundle1header(fh)[2]
342 requirements = streamclone.readbundle1header(fh)[2]
343 formatted = bundle2._formatrequirementsparams(requirements)
343 formatted = bundle2._formatrequirementsparams(requirements)
344 return b'none-packed1;%s' % formatted
344 return b'none-packed1;%s' % formatted
345 else:
345 else:
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
347
347
348
348
349 def _computeoutgoing(repo, heads, common):
349 def _computeoutgoing(repo, heads, common):
350 """Computes which revs are outgoing given a set of common
350 """Computes which revs are outgoing given a set of common
351 and a set of heads.
351 and a set of heads.
352
352
353 This is a separate function so extensions can have access to
353 This is a separate function so extensions can have access to
354 the logic.
354 the logic.
355
355
356 Returns a discovery.outgoing object.
356 Returns a discovery.outgoing object.
357 """
357 """
358 cl = repo.changelog
358 cl = repo.changelog
359 if common:
359 if common:
360 hasnode = cl.hasnode
360 hasnode = cl.hasnode
361 common = [n for n in common if hasnode(n)]
361 common = [n for n in common if hasnode(n)]
362 else:
362 else:
363 common = [nullid]
363 common = [nullid]
364 if not heads:
364 if not heads:
365 heads = cl.heads()
365 heads = cl.heads()
366 return discovery.outgoing(repo, common, heads)
366 return discovery.outgoing(repo, common, heads)
367
367
368
368
369 def _checkpublish(pushop):
369 def _checkpublish(pushop):
370 repo = pushop.repo
370 repo = pushop.repo
371 ui = repo.ui
371 ui = repo.ui
372 behavior = ui.config(b'experimental', b'auto-publish')
372 behavior = ui.config(b'experimental', b'auto-publish')
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 return
374 return
375 remotephases = listkeys(pushop.remote, b'phases')
375 remotephases = listkeys(pushop.remote, b'phases')
376 if not remotephases.get(b'publishing', False):
376 if not remotephases.get(b'publishing', False):
377 return
377 return
378
378
379 if pushop.revs is None:
379 if pushop.revs is None:
380 published = repo.filtered(b'served').revs(b'not public()')
380 published = repo.filtered(b'served').revs(b'not public()')
381 else:
381 else:
382 published = repo.revs(b'::%ln - public()', pushop.revs)
382 published = repo.revs(b'::%ln - public()', pushop.revs)
383 if published:
383 if published:
384 if behavior == b'warn':
384 if behavior == b'warn':
385 ui.warn(
385 ui.warn(
386 _(b'%i changesets about to be published\n') % len(published)
386 _(b'%i changesets about to be published\n') % len(published)
387 )
387 )
388 elif behavior == b'confirm':
388 elif behavior == b'confirm':
389 if ui.promptchoice(
389 if ui.promptchoice(
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 % len(published)
391 % len(published)
392 ):
392 ):
393 raise error.Abort(_(b'user quit'))
393 raise error.Abort(_(b'user quit'))
394 elif behavior == b'abort':
394 elif behavior == b'abort':
395 msg = _(b'push would publish %i changesets') % len(published)
395 msg = _(b'push would publish %i changesets') % len(published)
396 hint = _(
396 hint = _(
397 b"use --publish or adjust 'experimental.auto-publish'"
397 b"use --publish or adjust 'experimental.auto-publish'"
398 b" config"
398 b" config"
399 )
399 )
400 raise error.Abort(msg, hint=hint)
400 raise error.Abort(msg, hint=hint)
401
401
402
402
403 def _forcebundle1(op):
403 def _forcebundle1(op):
404 """return true if a pull/push must use bundle1
404 """return true if a pull/push must use bundle1
405
405
406 This function is used to allow testing of the older bundle version"""
406 This function is used to allow testing of the older bundle version"""
407 ui = op.repo.ui
407 ui = op.repo.ui
408 # The goal is this config is to allow developer to choose the bundle
408 # The goal is this config is to allow developer to choose the bundle
409 # version used during exchanged. This is especially handy during test.
409 # version used during exchanged. This is especially handy during test.
410 # Value is a list of bundle version to be picked from, highest version
410 # Value is a list of bundle version to be picked from, highest version
411 # should be used.
411 # should be used.
412 #
412 #
413 # developer config: devel.legacy.exchange
413 # developer config: devel.legacy.exchange
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 return forcebundle1 or not op.remote.capable(b'bundle2')
416 return forcebundle1 or not op.remote.capable(b'bundle2')
417
417
418
418
419 class pushoperation(object):
419 class pushoperation(object):
420 """A object that represent a single push operation
420 """A object that represent a single push operation
421
421
422 Its purpose is to carry push related state and very common operations.
422 Its purpose is to carry push related state and very common operations.
423
423
424 A new pushoperation should be created at the beginning of each push and
424 A new pushoperation should be created at the beginning of each push and
425 discarded afterward.
425 discarded afterward.
426 """
426 """
427
427
428 def __init__(
428 def __init__(
429 self,
429 self,
430 repo,
430 repo,
431 remote,
431 remote,
432 force=False,
432 force=False,
433 revs=None,
433 revs=None,
434 newbranch=False,
434 newbranch=False,
435 bookmarks=(),
435 bookmarks=(),
436 publish=False,
436 publish=False,
437 pushvars=None,
437 pushvars=None,
438 ):
438 ):
439 # repo we push from
439 # repo we push from
440 self.repo = repo
440 self.repo = repo
441 self.ui = repo.ui
441 self.ui = repo.ui
442 # repo we push to
442 # repo we push to
443 self.remote = remote
443 self.remote = remote
444 # force option provided
444 # force option provided
445 self.force = force
445 self.force = force
446 # revs to be pushed (None is "all")
446 # revs to be pushed (None is "all")
447 self.revs = revs
447 self.revs = revs
448 # bookmark explicitly pushed
448 # bookmark explicitly pushed
449 self.bookmarks = bookmarks
449 self.bookmarks = bookmarks
450 # allow push of new branch
450 # allow push of new branch
451 self.newbranch = newbranch
451 self.newbranch = newbranch
452 # step already performed
452 # step already performed
453 # (used to check what steps have been already performed through bundle2)
453 # (used to check what steps have been already performed through bundle2)
454 self.stepsdone = set()
454 self.stepsdone = set()
455 # Integer version of the changegroup push result
455 # Integer version of the changegroup push result
456 # - None means nothing to push
456 # - None means nothing to push
457 # - 0 means HTTP error
457 # - 0 means HTTP error
458 # - 1 means we pushed and remote head count is unchanged *or*
458 # - 1 means we pushed and remote head count is unchanged *or*
459 # we have outgoing changesets but refused to push
459 # we have outgoing changesets but refused to push
460 # - other values as described by addchangegroup()
460 # - other values as described by addchangegroup()
461 self.cgresult = None
461 self.cgresult = None
462 # Boolean value for the bookmark push
462 # Boolean value for the bookmark push
463 self.bkresult = None
463 self.bkresult = None
464 # discover.outgoing object (contains common and outgoing data)
464 # discover.outgoing object (contains common and outgoing data)
465 self.outgoing = None
465 self.outgoing = None
466 # all remote topological heads before the push
466 # all remote topological heads before the push
467 self.remoteheads = None
467 self.remoteheads = None
468 # Details of the remote branch pre and post push
468 # Details of the remote branch pre and post push
469 #
469 #
470 # mapping: {'branch': ([remoteheads],
470 # mapping: {'branch': ([remoteheads],
471 # [newheads],
471 # [newheads],
472 # [unsyncedheads],
472 # [unsyncedheads],
473 # [discardedheads])}
473 # [discardedheads])}
474 # - branch: the branch name
474 # - branch: the branch name
475 # - remoteheads: the list of remote heads known locally
475 # - remoteheads: the list of remote heads known locally
476 # None if the branch is new
476 # None if the branch is new
477 # - newheads: the new remote heads (known locally) with outgoing pushed
477 # - newheads: the new remote heads (known locally) with outgoing pushed
478 # - unsyncedheads: the list of remote heads unknown locally.
478 # - unsyncedheads: the list of remote heads unknown locally.
479 # - discardedheads: the list of remote heads made obsolete by the push
479 # - discardedheads: the list of remote heads made obsolete by the push
480 self.pushbranchmap = None
480 self.pushbranchmap = None
481 # testable as a boolean indicating if any nodes are missing locally.
481 # testable as a boolean indicating if any nodes are missing locally.
482 self.incoming = None
482 self.incoming = None
483 # summary of the remote phase situation
483 # summary of the remote phase situation
484 self.remotephases = None
484 self.remotephases = None
485 # phases changes that must be pushed along side the changesets
485 # phases changes that must be pushed along side the changesets
486 self.outdatedphases = None
486 self.outdatedphases = None
487 # phases changes that must be pushed if changeset push fails
487 # phases changes that must be pushed if changeset push fails
488 self.fallbackoutdatedphases = None
488 self.fallbackoutdatedphases = None
489 # outgoing obsmarkers
489 # outgoing obsmarkers
490 self.outobsmarkers = set()
490 self.outobsmarkers = set()
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 self.outbookmarks = []
492 self.outbookmarks = []
493 # transaction manager
493 # transaction manager
494 self.trmanager = None
494 self.trmanager = None
495 # map { pushkey partid -> callback handling failure}
495 # map { pushkey partid -> callback handling failure}
496 # used to handle exception from mandatory pushkey part failure
496 # used to handle exception from mandatory pushkey part failure
497 self.pkfailcb = {}
497 self.pkfailcb = {}
498 # an iterable of pushvars or None
498 # an iterable of pushvars or None
499 self.pushvars = pushvars
499 self.pushvars = pushvars
500 # publish pushed changesets
500 # publish pushed changesets
501 self.publish = publish
501 self.publish = publish
502
502
503 @util.propertycache
503 @util.propertycache
504 def futureheads(self):
504 def futureheads(self):
505 """future remote heads if the changeset push succeeds"""
505 """future remote heads if the changeset push succeeds"""
506 return self.outgoing.missingheads
506 return self.outgoing.missingheads
507
507
508 @util.propertycache
508 @util.propertycache
509 def fallbackheads(self):
509 def fallbackheads(self):
510 """future remote heads if the changeset push fails"""
510 """future remote heads if the changeset push fails"""
511 if self.revs is None:
511 if self.revs is None:
512 # not target to push, all common are relevant
512 # not target to push, all common are relevant
513 return self.outgoing.commonheads
513 return self.outgoing.commonheads
514 unfi = self.repo.unfiltered()
514 unfi = self.repo.unfiltered()
515 # I want cheads = heads(::missingheads and ::commonheads)
515 # I want cheads = heads(::missingheads and ::commonheads)
516 # (missingheads is revs with secret changeset filtered out)
516 # (missingheads is revs with secret changeset filtered out)
517 #
517 #
518 # This can be expressed as:
518 # This can be expressed as:
519 # cheads = ( (missingheads and ::commonheads)
519 # cheads = ( (missingheads and ::commonheads)
520 # + (commonheads and ::missingheads))"
520 # + (commonheads and ::missingheads))"
521 # )
521 # )
522 #
522 #
523 # while trying to push we already computed the following:
523 # while trying to push we already computed the following:
524 # common = (::commonheads)
524 # common = (::commonheads)
525 # missing = ((commonheads::missingheads) - commonheads)
525 # missing = ((commonheads::missingheads) - commonheads)
526 #
526 #
527 # We can pick:
527 # We can pick:
528 # * missingheads part of common (::commonheads)
528 # * missingheads part of common (::commonheads)
529 common = self.outgoing.common
529 common = self.outgoing.common
530 rev = self.repo.changelog.index.rev
530 rev = self.repo.changelog.index.rev
531 cheads = [node for node in self.revs if rev(node) in common]
531 cheads = [node for node in self.revs if rev(node) in common]
532 # and
532 # and
533 # * commonheads parents on missing
533 # * commonheads parents on missing
534 revset = unfi.set(
534 revset = unfi.set(
535 b'%ln and parents(roots(%ln))',
535 b'%ln and parents(roots(%ln))',
536 self.outgoing.commonheads,
536 self.outgoing.commonheads,
537 self.outgoing.missing,
537 self.outgoing.missing,
538 )
538 )
539 cheads.extend(c.node() for c in revset)
539 cheads.extend(c.node() for c in revset)
540 return cheads
540 return cheads
541
541
542 @property
542 @property
543 def commonheads(self):
543 def commonheads(self):
544 """set of all common heads after changeset bundle push"""
544 """set of all common heads after changeset bundle push"""
545 if self.cgresult:
545 if self.cgresult:
546 return self.futureheads
546 return self.futureheads
547 else:
547 else:
548 return self.fallbackheads
548 return self.fallbackheads
549
549
550
550
551 # mapping of message used when pushing bookmark
551 # mapping of message used when pushing bookmark
552 bookmsgmap = {
552 bookmsgmap = {
553 b'update': (
553 b'update': (
554 _(b"updating bookmark %s\n"),
554 _(b"updating bookmark %s\n"),
555 _(b'updating bookmark %s failed!\n'),
555 _(b'updating bookmark %s failed!\n'),
556 ),
556 ),
557 b'export': (
557 b'export': (
558 _(b"exporting bookmark %s\n"),
558 _(b"exporting bookmark %s\n"),
559 _(b'exporting bookmark %s failed!\n'),
559 _(b'exporting bookmark %s failed!\n'),
560 ),
560 ),
561 b'delete': (
561 b'delete': (
562 _(b"deleting remote bookmark %s\n"),
562 _(b"deleting remote bookmark %s\n"),
563 _(b'deleting remote bookmark %s failed!\n'),
563 _(b'deleting remote bookmark %s failed!\n'),
564 ),
564 ),
565 }
565 }
566
566
567
567
568 def push(
568 def push(
569 repo,
569 repo,
570 remote,
570 remote,
571 force=False,
571 force=False,
572 revs=None,
572 revs=None,
573 newbranch=False,
573 newbranch=False,
574 bookmarks=(),
574 bookmarks=(),
575 publish=False,
575 publish=False,
576 opargs=None,
576 opargs=None,
577 ):
577 ):
578 '''Push outgoing changesets (limited by revs) from a local
578 '''Push outgoing changesets (limited by revs) from a local
579 repository to remote. Return an integer:
579 repository to remote. Return an integer:
580 - None means nothing to push
580 - None means nothing to push
581 - 0 means HTTP error
581 - 0 means HTTP error
582 - 1 means we pushed and remote head count is unchanged *or*
582 - 1 means we pushed and remote head count is unchanged *or*
583 we have outgoing changesets but refused to push
583 we have outgoing changesets but refused to push
584 - other values as described by addchangegroup()
584 - other values as described by addchangegroup()
585 '''
585 '''
586 if opargs is None:
586 if opargs is None:
587 opargs = {}
587 opargs = {}
588 pushop = pushoperation(
588 pushop = pushoperation(
589 repo,
589 repo,
590 remote,
590 remote,
591 force,
591 force,
592 revs,
592 revs,
593 newbranch,
593 newbranch,
594 bookmarks,
594 bookmarks,
595 publish,
595 publish,
596 **pycompat.strkwargs(opargs)
596 **pycompat.strkwargs(opargs)
597 )
597 )
598 if pushop.remote.local():
598 if pushop.remote.local():
599 missing = (
599 missing = (
600 set(pushop.repo.requirements) - pushop.remote.local().supported
600 set(pushop.repo.requirements) - pushop.remote.local().supported
601 )
601 )
602 if missing:
602 if missing:
603 msg = _(
603 msg = _(
604 b"required features are not"
604 b"required features are not"
605 b" supported in the destination:"
605 b" supported in the destination:"
606 b" %s"
606 b" %s"
607 ) % (b', '.join(sorted(missing)))
607 ) % (b', '.join(sorted(missing)))
608 raise error.Abort(msg)
608 raise error.Abort(msg)
609
609
610 if not pushop.remote.canpush():
610 if not pushop.remote.canpush():
611 raise error.Abort(_(b"destination does not support push"))
611 raise error.Abort(_(b"destination does not support push"))
612
612
613 if not pushop.remote.capable(b'unbundle'):
613 if not pushop.remote.capable(b'unbundle'):
614 raise error.Abort(
614 raise error.Abort(
615 _(
615 _(
616 b'cannot push: destination does not support the '
616 b'cannot push: destination does not support the '
617 b'unbundle wire protocol command'
617 b'unbundle wire protocol command'
618 )
618 )
619 )
619 )
620
620
621 # get lock as we might write phase data
621 # get lock as we might write phase data
622 wlock = lock = None
622 wlock = lock = None
623 try:
623 try:
624 # bundle2 push may receive a reply bundle touching bookmarks
624 # bundle2 push may receive a reply bundle touching bookmarks
625 # requiring the wlock. Take it now to ensure proper ordering.
625 # requiring the wlock. Take it now to ensure proper ordering.
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 if (
627 if (
628 (not _forcebundle1(pushop))
628 (not _forcebundle1(pushop))
629 and maypushback
629 and maypushback
630 and not bookmod.bookmarksinstore(repo)
630 and not bookmod.bookmarksinstore(repo)
631 ):
631 ):
632 wlock = pushop.repo.wlock()
632 wlock = pushop.repo.wlock()
633 lock = pushop.repo.lock()
633 lock = pushop.repo.lock()
634 pushop.trmanager = transactionmanager(
634 pushop.trmanager = transactionmanager(
635 pushop.repo, b'push-response', pushop.remote.url()
635 pushop.repo, b'push-response', pushop.remote.url()
636 )
636 )
637 except error.LockUnavailable as err:
637 except error.LockUnavailable as err:
638 # source repo cannot be locked.
638 # source repo cannot be locked.
639 # We do not abort the push, but just disable the local phase
639 # We do not abort the push, but just disable the local phase
640 # synchronisation.
640 # synchronisation.
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 err
642 err
643 )
643 )
644 pushop.ui.debug(msg)
644 pushop.ui.debug(msg)
645
645
646 with wlock or util.nullcontextmanager():
646 with wlock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
649 pushop.repo.checkpush(pushop)
649 pushop.repo.checkpush(pushop)
650 _checkpublish(pushop)
650 _checkpublish(pushop)
651 _pushdiscovery(pushop)
651 _pushdiscovery(pushop)
652 if not pushop.force:
652 if not pushop.force:
653 _checksubrepostate(pushop)
653 _checksubrepostate(pushop)
654 if not _forcebundle1(pushop):
654 if not _forcebundle1(pushop):
655 _pushbundle2(pushop)
655 _pushbundle2(pushop)
656 _pushchangeset(pushop)
656 _pushchangeset(pushop)
657 _pushsyncphase(pushop)
657 _pushsyncphase(pushop)
658 _pushobsolete(pushop)
658 _pushobsolete(pushop)
659 _pushbookmark(pushop)
659 _pushbookmark(pushop)
660
660
661 if repo.ui.configbool(b'experimental', b'remotenames'):
661 if repo.ui.configbool(b'experimental', b'remotenames'):
662 logexchange.pullremotenames(repo, remote)
662 logexchange.pullremotenames(repo, remote)
663
663
664 return pushop
664 return pushop
665
665
666
666
667 # list of steps to perform discovery before push
667 # list of steps to perform discovery before push
668 pushdiscoveryorder = []
668 pushdiscoveryorder = []
669
669
670 # Mapping between step name and function
670 # Mapping between step name and function
671 #
671 #
672 # This exists to help extensions wrap steps if necessary
672 # This exists to help extensions wrap steps if necessary
673 pushdiscoverymapping = {}
673 pushdiscoverymapping = {}
674
674
675
675
676 def pushdiscovery(stepname):
676 def pushdiscovery(stepname):
677 """decorator for function performing discovery before push
677 """decorator for function performing discovery before push
678
678
679 The function is added to the step -> function mapping and appended to the
679 The function is added to the step -> function mapping and appended to the
680 list of steps. Beware that decorated function will be added in order (this
680 list of steps. Beware that decorated function will be added in order (this
681 may matter).
681 may matter).
682
682
683 You can only use this decorator for a new step, if you want to wrap a step
683 You can only use this decorator for a new step, if you want to wrap a step
684 from an extension, change the pushdiscovery dictionary directly."""
684 from an extension, change the pushdiscovery dictionary directly."""
685
685
686 def dec(func):
686 def dec(func):
687 assert stepname not in pushdiscoverymapping
687 assert stepname not in pushdiscoverymapping
688 pushdiscoverymapping[stepname] = func
688 pushdiscoverymapping[stepname] = func
689 pushdiscoveryorder.append(stepname)
689 pushdiscoveryorder.append(stepname)
690 return func
690 return func
691
691
692 return dec
692 return dec
693
693
694
694
695 def _pushdiscovery(pushop):
695 def _pushdiscovery(pushop):
696 """Run all discovery steps"""
696 """Run all discovery steps"""
697 for stepname in pushdiscoveryorder:
697 for stepname in pushdiscoveryorder:
698 step = pushdiscoverymapping[stepname]
698 step = pushdiscoverymapping[stepname]
699 step(pushop)
699 step(pushop)
700
700
701
701
702 def _checksubrepostate(pushop):
702 def _checksubrepostate(pushop):
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
704 for n in pushop.outgoing.missing:
704 for n in pushop.outgoing.missing:
705 ctx = pushop.repo[n]
705 ctx = pushop.repo[n]
706
706
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
708 for subpath in sorted(ctx.substate):
708 for subpath in sorted(ctx.substate):
709 sub = ctx.sub(subpath)
709 sub = ctx.sub(subpath)
710 sub.verify(onpush=True)
710 sub.verify(onpush=True)
711
711
712
712
713 @pushdiscovery(b'changeset')
713 @pushdiscovery(b'changeset')
714 def _pushdiscoverychangeset(pushop):
714 def _pushdiscoverychangeset(pushop):
715 """discover the changeset that need to be pushed"""
715 """discover the changeset that need to be pushed"""
716 fci = discovery.findcommonincoming
716 fci = discovery.findcommonincoming
717 if pushop.revs:
717 if pushop.revs:
718 commoninc = fci(
718 commoninc = fci(
719 pushop.repo,
719 pushop.repo,
720 pushop.remote,
720 pushop.remote,
721 force=pushop.force,
721 force=pushop.force,
722 ancestorsof=pushop.revs,
722 ancestorsof=pushop.revs,
723 )
723 )
724 else:
724 else:
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
726 common, inc, remoteheads = commoninc
726 common, inc, remoteheads = commoninc
727 fco = discovery.findcommonoutgoing
727 fco = discovery.findcommonoutgoing
728 outgoing = fco(
728 outgoing = fco(
729 pushop.repo,
729 pushop.repo,
730 pushop.remote,
730 pushop.remote,
731 onlyheads=pushop.revs,
731 onlyheads=pushop.revs,
732 commoninc=commoninc,
732 commoninc=commoninc,
733 force=pushop.force,
733 force=pushop.force,
734 )
734 )
735 pushop.outgoing = outgoing
735 pushop.outgoing = outgoing
736 pushop.remoteheads = remoteheads
736 pushop.remoteheads = remoteheads
737 pushop.incoming = inc
737 pushop.incoming = inc
738
738
739
739
740 @pushdiscovery(b'phase')
740 @pushdiscovery(b'phase')
741 def _pushdiscoveryphase(pushop):
741 def _pushdiscoveryphase(pushop):
742 """discover the phase that needs to be pushed
742 """discover the phase that needs to be pushed
743
743
744 (computed for both success and failure case for changesets push)"""
744 (computed for both success and failure case for changesets push)"""
745 outgoing = pushop.outgoing
745 outgoing = pushop.outgoing
746 unfi = pushop.repo.unfiltered()
746 unfi = pushop.repo.unfiltered()
747 remotephases = listkeys(pushop.remote, b'phases')
747 remotephases = listkeys(pushop.remote, b'phases')
748
748
749 if (
749 if (
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
751 and remotephases # server supports phases
751 and remotephases # server supports phases
752 and not pushop.outgoing.missing # no changesets to be pushed
752 and not pushop.outgoing.missing # no changesets to be pushed
753 and remotephases.get(b'publishing', False)
753 and remotephases.get(b'publishing', False)
754 ):
754 ):
755 # When:
755 # When:
756 # - this is a subrepo push
756 # - this is a subrepo push
757 # - and remote support phase
757 # - and remote support phase
758 # - and no changeset are to be pushed
758 # - and no changeset are to be pushed
759 # - and remote is publishing
759 # - and remote is publishing
760 # We may be in issue 3781 case!
760 # We may be in issue 3781 case!
761 # We drop the possible phase synchronisation done by
761 # We drop the possible phase synchronisation done by
762 # courtesy to publish changesets possibly locally draft
762 # courtesy to publish changesets possibly locally draft
763 # on the remote.
763 # on the remote.
764 pushop.outdatedphases = []
764 pushop.outdatedphases = []
765 pushop.fallbackoutdatedphases = []
765 pushop.fallbackoutdatedphases = []
766 return
766 return
767
767
768 pushop.remotephases = phases.remotephasessummary(
768 pushop.remotephases = phases.remotephasessummary(
769 pushop.repo, pushop.fallbackheads, remotephases
769 pushop.repo, pushop.fallbackheads, remotephases
770 )
770 )
771 droots = pushop.remotephases.draftroots
771 droots = pushop.remotephases.draftroots
772
772
773 extracond = b''
773 extracond = b''
774 if not pushop.remotephases.publishing:
774 if not pushop.remotephases.publishing:
775 extracond = b' and public()'
775 extracond = b' and public()'
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
777 # Get the list of all revs draft on remote by public here.
777 # Get the list of all revs draft on remote by public here.
778 # XXX Beware that revset break if droots is not strictly
778 # XXX Beware that revset break if droots is not strictly
779 # XXX root we may want to ensure it is but it is costly
779 # XXX root we may want to ensure it is but it is costly
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
781 if not pushop.remotephases.publishing and pushop.publish:
781 if not pushop.remotephases.publishing and pushop.publish:
782 future = list(
782 future = list(
783 unfi.set(
783 unfi.set(
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
785 )
785 )
786 )
786 )
787 elif not outgoing.missing:
787 elif not outgoing.missing:
788 future = fallback
788 future = fallback
789 else:
789 else:
790 # adds changeset we are going to push as draft
790 # adds changeset we are going to push as draft
791 #
791 #
792 # should not be necessary for publishing server, but because of an
792 # should not be necessary for publishing server, but because of an
793 # issue fixed in xxxxx we have to do it anyway.
793 # issue fixed in xxxxx we have to do it anyway.
794 fdroots = list(
794 fdroots = list(
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
796 )
796 )
797 fdroots = [f.node() for f in fdroots]
797 fdroots = [f.node() for f in fdroots]
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
799 pushop.outdatedphases = future
799 pushop.outdatedphases = future
800 pushop.fallbackoutdatedphases = fallback
800 pushop.fallbackoutdatedphases = fallback
801
801
802
802
803 @pushdiscovery(b'obsmarker')
803 @pushdiscovery(b'obsmarker')
804 def _pushdiscoveryobsmarkers(pushop):
804 def _pushdiscoveryobsmarkers(pushop):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
806 return
806 return
807
807
808 if not pushop.repo.obsstore:
808 if not pushop.repo.obsstore:
809 return
809 return
810
810
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
812 return
812 return
813
813
814 repo = pushop.repo
814 repo = pushop.repo
815 # very naive computation, that can be quite expensive on big repo.
815 # very naive computation, that can be quite expensive on big repo.
816 # However: evolution is currently slow on them anyway.
816 # However: evolution is currently slow on them anyway.
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
819
819
820
820
821 @pushdiscovery(b'bookmarks')
821 @pushdiscovery(b'bookmarks')
822 def _pushdiscoverybookmarks(pushop):
822 def _pushdiscoverybookmarks(pushop):
823 ui = pushop.ui
823 ui = pushop.ui
824 repo = pushop.repo.unfiltered()
824 repo = pushop.repo.unfiltered()
825 remote = pushop.remote
825 remote = pushop.remote
826 ui.debug(b"checking for updated bookmarks\n")
826 ui.debug(b"checking for updated bookmarks\n")
827 ancestors = ()
827 ancestors = ()
828 if pushop.revs:
828 if pushop.revs:
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
831
831
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
833
833
834 explicit = {
834 explicit = {
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
836 }
836 }
837
837
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
840
840
841
841
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
843 """take decision on bookmarks to push to the remote repo
843 """take decision on bookmarks to push to the remote repo
844
844
845 Exists to help extensions alter this behavior.
845 Exists to help extensions alter this behavior.
846 """
846 """
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
848
848
849 repo = pushop.repo
849 repo = pushop.repo
850
850
851 for b, scid, dcid in advsrc:
851 for b, scid, dcid in advsrc:
852 if b in explicit:
852 if b in explicit:
853 explicit.remove(b)
853 explicit.remove(b)
854 if not pushed or repo[scid].rev() in pushed:
854 if not pushed or repo[scid].rev() in pushed:
855 pushop.outbookmarks.append((b, dcid, scid))
855 pushop.outbookmarks.append((b, dcid, scid))
856 # search added bookmark
856 # search added bookmark
857 for b, scid, dcid in addsrc:
857 for b, scid, dcid in addsrc:
858 if b in explicit:
858 if b in explicit:
859 explicit.remove(b)
859 explicit.remove(b)
860 if bookmod.isdivergent(b):
860 if bookmod.isdivergent(b):
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
862 pushop.bkresult = 2
862 pushop.bkresult = 2
863 else:
863 else:
864 pushop.outbookmarks.append((b, b'', scid))
864 pushop.outbookmarks.append((b, b'', scid))
865 # search for overwritten bookmark
865 # search for overwritten bookmark
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
867 if b in explicit:
867 if b in explicit:
868 explicit.remove(b)
868 explicit.remove(b)
869 pushop.outbookmarks.append((b, dcid, scid))
869 pushop.outbookmarks.append((b, dcid, scid))
870 # search for bookmark to delete
870 # search for bookmark to delete
871 for b, scid, dcid in adddst:
871 for b, scid, dcid in adddst:
872 if b in explicit:
872 if b in explicit:
873 explicit.remove(b)
873 explicit.remove(b)
874 # treat as "deleted locally"
874 # treat as "deleted locally"
875 pushop.outbookmarks.append((b, dcid, b''))
875 pushop.outbookmarks.append((b, dcid, b''))
876 # identical bookmarks shouldn't get reported
876 # identical bookmarks shouldn't get reported
877 for b, scid, dcid in same:
877 for b, scid, dcid in same:
878 if b in explicit:
878 if b in explicit:
879 explicit.remove(b)
879 explicit.remove(b)
880
880
881 if explicit:
881 if explicit:
882 explicit = sorted(explicit)
882 explicit = sorted(explicit)
883 # we should probably list all of them
883 # we should probably list all of them
884 pushop.ui.warn(
884 pushop.ui.warn(
885 _(
885 _(
886 b'bookmark %s does not exist on the local '
886 b'bookmark %s does not exist on the local '
887 b'or remote repository!\n'
887 b'or remote repository!\n'
888 )
888 )
889 % explicit[0]
889 % explicit[0]
890 )
890 )
891 pushop.bkresult = 2
891 pushop.bkresult = 2
892
892
893 pushop.outbookmarks.sort()
893 pushop.outbookmarks.sort()
894
894
895
895
896 def _pushcheckoutgoing(pushop):
896 def _pushcheckoutgoing(pushop):
897 outgoing = pushop.outgoing
897 outgoing = pushop.outgoing
898 unfi = pushop.repo.unfiltered()
898 unfi = pushop.repo.unfiltered()
899 if not outgoing.missing:
899 if not outgoing.missing:
900 # nothing to push
900 # nothing to push
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
902 return False
902 return False
903 # something to push
903 # something to push
904 if not pushop.force:
904 if not pushop.force:
905 # if repo.obsstore == False --> no obsolete
905 # if repo.obsstore == False --> no obsolete
906 # then, save the iteration
906 # then, save the iteration
907 if unfi.obsstore:
907 if unfi.obsstore:
908 # this message are here for 80 char limit reason
908 # this message are here for 80 char limit reason
909 mso = _(b"push includes obsolete changeset: %s!")
909 mso = _(b"push includes obsolete changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
912 mst = {
912 mst = {
913 b"orphan": _(b"push includes orphan changeset: %s!"),
913 b"orphan": _(b"push includes orphan changeset: %s!"),
914 b"phase-divergent": mspd,
914 b"phase-divergent": mspd,
915 b"content-divergent": mscd,
915 b"content-divergent": mscd,
916 }
916 }
917 # If we are to push if there is at least one
917 # If we are to push if there is at least one
918 # obsolete or unstable changeset in missing, at
918 # obsolete or unstable changeset in missing, at
919 # least one of the missinghead will be obsolete or
919 # least one of the missinghead will be obsolete or
920 # unstable. So checking heads only is ok
920 # unstable. So checking heads only is ok
921 for node in outgoing.missingheads:
921 for node in outgoing.missingheads:
922 ctx = unfi[node]
922 ctx = unfi[node]
923 if ctx.obsolete():
923 if ctx.obsolete():
924 raise error.Abort(mso % ctx)
924 raise error.Abort(mso % ctx)
925 elif ctx.isunstable():
925 elif ctx.isunstable():
926 # TODO print more than one instability in the abort
926 # TODO print more than one instability in the abort
927 # message
927 # message
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
929
929
930 discovery.checkheads(pushop)
930 discovery.checkheads(pushop)
931 return True
931 return True
932
932
933
933
934 # List of names of steps to perform for an outgoing bundle2, order matters.
934 # List of names of steps to perform for an outgoing bundle2, order matters.
935 b2partsgenorder = []
935 b2partsgenorder = []
936
936
937 # Mapping between step name and function
937 # Mapping between step name and function
938 #
938 #
939 # This exists to help extensions wrap steps if necessary
939 # This exists to help extensions wrap steps if necessary
940 b2partsgenmapping = {}
940 b2partsgenmapping = {}
941
941
942
942
943 def b2partsgenerator(stepname, idx=None):
943 def b2partsgenerator(stepname, idx=None):
944 """decorator for function generating bundle2 part
944 """decorator for function generating bundle2 part
945
945
946 The function is added to the step -> function mapping and appended to the
946 The function is added to the step -> function mapping and appended to the
947 list of steps. Beware that decorated functions will be added in order
947 list of steps. Beware that decorated functions will be added in order
948 (this may matter).
948 (this may matter).
949
949
950 You can only use this decorator for new steps, if you want to wrap a step
950 You can only use this decorator for new steps, if you want to wrap a step
951 from an extension, attack the b2partsgenmapping dictionary directly."""
951 from an extension, attack the b2partsgenmapping dictionary directly."""
952
952
953 def dec(func):
953 def dec(func):
954 assert stepname not in b2partsgenmapping
954 assert stepname not in b2partsgenmapping
955 b2partsgenmapping[stepname] = func
955 b2partsgenmapping[stepname] = func
956 if idx is None:
956 if idx is None:
957 b2partsgenorder.append(stepname)
957 b2partsgenorder.append(stepname)
958 else:
958 else:
959 b2partsgenorder.insert(idx, stepname)
959 b2partsgenorder.insert(idx, stepname)
960 return func
960 return func
961
961
962 return dec
962 return dec
963
963
964
964
965 def _pushb2ctxcheckheads(pushop, bundler):
965 def _pushb2ctxcheckheads(pushop, bundler):
966 """Generate race condition checking parts
966 """Generate race condition checking parts
967
967
968 Exists as an independent function to aid extensions
968 Exists as an independent function to aid extensions
969 """
969 """
970 # * 'force' do not check for push race,
970 # * 'force' do not check for push race,
971 # * if we don't push anything, there are nothing to check.
971 # * if we don't push anything, there are nothing to check.
972 if not pushop.force and pushop.outgoing.missingheads:
972 if not pushop.force and pushop.outgoing.missingheads:
973 allowunrelated = b'related' in bundler.capabilities.get(
973 allowunrelated = b'related' in bundler.capabilities.get(
974 b'checkheads', ()
974 b'checkheads', ()
975 )
975 )
976 emptyremote = pushop.pushbranchmap is None
976 emptyremote = pushop.pushbranchmap is None
977 if not allowunrelated or emptyremote:
977 if not allowunrelated or emptyremote:
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
979 else:
979 else:
980 affected = set()
980 affected = set()
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
983 if remoteheads is not None:
983 if remoteheads is not None:
984 remote = set(remoteheads)
984 remote = set(remoteheads)
985 affected |= set(discardedheads) & remote
985 affected |= set(discardedheads) & remote
986 affected |= remote - set(newheads)
986 affected |= remote - set(newheads)
987 if affected:
987 if affected:
988 data = iter(sorted(affected))
988 data = iter(sorted(affected))
989 bundler.newpart(b'check:updated-heads', data=data)
989 bundler.newpart(b'check:updated-heads', data=data)
990
990
991
991
992 def _pushing(pushop):
992 def _pushing(pushop):
993 """return True if we are pushing anything"""
993 """return True if we are pushing anything"""
994 return bool(
994 return bool(
995 pushop.outgoing.missing
995 pushop.outgoing.missing
996 or pushop.outdatedphases
996 or pushop.outdatedphases
997 or pushop.outobsmarkers
997 or pushop.outobsmarkers
998 or pushop.outbookmarks
998 or pushop.outbookmarks
999 )
999 )
1000
1000
1001
1001
1002 @b2partsgenerator(b'check-bookmarks')
1002 @b2partsgenerator(b'check-bookmarks')
1003 def _pushb2checkbookmarks(pushop, bundler):
1003 def _pushb2checkbookmarks(pushop, bundler):
1004 """insert bookmark move checking"""
1004 """insert bookmark move checking"""
1005 if not _pushing(pushop) or pushop.force:
1005 if not _pushing(pushop) or pushop.force:
1006 return
1006 return
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1010 return
1010 return
1011 data = []
1011 data = []
1012 for book, old, new in pushop.outbookmarks:
1012 for book, old, new in pushop.outbookmarks:
1013 data.append((book, old))
1013 data.append((book, old))
1014 checkdata = bookmod.binaryencode(data)
1014 checkdata = bookmod.binaryencode(data)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1016
1016
1017
1017
1018 @b2partsgenerator(b'check-phases')
1018 @b2partsgenerator(b'check-phases')
1019 def _pushb2checkphases(pushop, bundler):
1019 def _pushb2checkphases(pushop, bundler):
1020 """insert phase move checking"""
1020 """insert phase move checking"""
1021 if not _pushing(pushop) or pushop.force:
1021 if not _pushing(pushop) or pushop.force:
1022 return
1022 return
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1025 if pushop.remotephases is not None and hasphaseheads:
1025 if pushop.remotephases is not None and hasphaseheads:
1026 # check that the remote phase has not changed
1026 # check that the remote phase has not changed
1027 checks = [[] for p in phases.allphases]
1027 checks = [[] for p in phases.allphases]
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1030 if any(checks):
1030 if any(checks):
1031 for nodes in checks:
1031 for nodes in checks:
1032 nodes.sort()
1032 nodes.sort()
1033 checkdata = phases.binaryencode(checks)
1033 checkdata = phases.binaryencode(checks)
1034 bundler.newpart(b'check:phases', data=checkdata)
1034 bundler.newpart(b'check:phases', data=checkdata)
1035
1035
1036
1036
1037 @b2partsgenerator(b'changeset')
1037 @b2partsgenerator(b'changeset')
1038 def _pushb2ctx(pushop, bundler):
1038 def _pushb2ctx(pushop, bundler):
1039 """handle changegroup push through bundle2
1039 """handle changegroup push through bundle2
1040
1040
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1042 """
1042 """
1043 if b'changesets' in pushop.stepsdone:
1043 if b'changesets' in pushop.stepsdone:
1044 return
1044 return
1045 pushop.stepsdone.add(b'changesets')
1045 pushop.stepsdone.add(b'changesets')
1046 # Send known heads to the server for race detection.
1046 # Send known heads to the server for race detection.
1047 if not _pushcheckoutgoing(pushop):
1047 if not _pushcheckoutgoing(pushop):
1048 return
1048 return
1049 pushop.repo.prepushoutgoinghooks(pushop)
1049 pushop.repo.prepushoutgoinghooks(pushop)
1050
1050
1051 _pushb2ctxcheckheads(pushop, bundler)
1051 _pushb2ctxcheckheads(pushop, bundler)
1052
1052
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1054 version = b'01'
1054 version = b'01'
1055 cgversions = b2caps.get(b'changegroup')
1055 cgversions = b2caps.get(b'changegroup')
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1057 cgversions = [
1057 cgversions = [
1058 v
1058 v
1059 for v in cgversions
1059 for v in cgversions
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1061 ]
1061 ]
1062 if not cgversions:
1062 if not cgversions:
1063 raise error.Abort(_(b'no common changegroup version'))
1063 raise error.Abort(_(b'no common changegroup version'))
1064 version = max(cgversions)
1064 version = max(cgversions)
1065 cgstream = changegroup.makestream(
1065 cgstream = changegroup.makestream(
1066 pushop.repo, pushop.outgoing, version, b'push'
1066 pushop.repo, pushop.outgoing, version, b'push'
1067 )
1067 )
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1069 if cgversions:
1069 if cgversions:
1070 cgpart.addparam(b'version', version)
1070 cgpart.addparam(b'version', version)
1071 if b'treemanifest' in pushop.repo.requirements:
1071 if b'treemanifest' in pushop.repo.requirements:
1072 cgpart.addparam(b'treemanifest', b'1')
1072 cgpart.addparam(b'treemanifest', b'1')
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1074 cgpart.addparam(b'exp-sidedata', b'1')
1074 cgpart.addparam(b'exp-sidedata', b'1')
1075
1075
1076 def handlereply(op):
1076 def handlereply(op):
1077 """extract addchangegroup returns from server reply"""
1077 """extract addchangegroup returns from server reply"""
1078 cgreplies = op.records.getreplies(cgpart.id)
1078 cgreplies = op.records.getreplies(cgpart.id)
1079 assert len(cgreplies[b'changegroup']) == 1
1079 assert len(cgreplies[b'changegroup']) == 1
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1081
1081
1082 return handlereply
1082 return handlereply
1083
1083
1084
1084
1085 @b2partsgenerator(b'phase')
1085 @b2partsgenerator(b'phase')
1086 def _pushb2phases(pushop, bundler):
1086 def _pushb2phases(pushop, bundler):
1087 """handle phase push through bundle2"""
1087 """handle phase push through bundle2"""
1088 if b'phases' in pushop.stepsdone:
1088 if b'phases' in pushop.stepsdone:
1089 return
1089 return
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1091 ui = pushop.repo.ui
1091 ui = pushop.repo.ui
1092
1092
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1094 haspushkey = b'pushkey' in b2caps
1094 haspushkey = b'pushkey' in b2caps
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1096
1096
1097 if hasphaseheads and not legacyphase:
1097 if hasphaseheads and not legacyphase:
1098 return _pushb2phaseheads(pushop, bundler)
1098 return _pushb2phaseheads(pushop, bundler)
1099 elif haspushkey:
1099 elif haspushkey:
1100 return _pushb2phasespushkey(pushop, bundler)
1100 return _pushb2phasespushkey(pushop, bundler)
1101
1101
1102
1102
1103 def _pushb2phaseheads(pushop, bundler):
1103 def _pushb2phaseheads(pushop, bundler):
1104 """push phase information through a bundle2 - binary part"""
1104 """push phase information through a bundle2 - binary part"""
1105 pushop.stepsdone.add(b'phases')
1105 pushop.stepsdone.add(b'phases')
1106 if pushop.outdatedphases:
1106 if pushop.outdatedphases:
1107 updates = [[] for p in phases.allphases]
1107 updates = [[] for p in phases.allphases]
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1109 phasedata = phases.binaryencode(updates)
1109 phasedata = phases.binaryencode(updates)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1111
1111
1112
1112
1113 def _pushb2phasespushkey(pushop, bundler):
1113 def _pushb2phasespushkey(pushop, bundler):
1114 """push phase information through a bundle2 - pushkey part"""
1114 """push phase information through a bundle2 - pushkey part"""
1115 pushop.stepsdone.add(b'phases')
1115 pushop.stepsdone.add(b'phases')
1116 part2node = []
1116 part2node = []
1117
1117
1118 def handlefailure(pushop, exc):
1118 def handlefailure(pushop, exc):
1119 targetid = int(exc.partid)
1119 targetid = int(exc.partid)
1120 for partid, node in part2node:
1120 for partid, node in part2node:
1121 if partid == targetid:
1121 if partid == targetid:
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1123
1123
1124 enc = pushkey.encode
1124 enc = pushkey.encode
1125 for newremotehead in pushop.outdatedphases:
1125 for newremotehead in pushop.outdatedphases:
1126 part = bundler.newpart(b'pushkey')
1126 part = bundler.newpart(b'pushkey')
1127 part.addparam(b'namespace', enc(b'phases'))
1127 part.addparam(b'namespace', enc(b'phases'))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1131 part2node.append((part.id, newremotehead))
1131 part2node.append((part.id, newremotehead))
1132 pushop.pkfailcb[part.id] = handlefailure
1132 pushop.pkfailcb[part.id] = handlefailure
1133
1133
1134 def handlereply(op):
1134 def handlereply(op):
1135 for partid, node in part2node:
1135 for partid, node in part2node:
1136 partrep = op.records.getreplies(partid)
1136 partrep = op.records.getreplies(partid)
1137 results = partrep[b'pushkey']
1137 results = partrep[b'pushkey']
1138 assert len(results) <= 1
1138 assert len(results) <= 1
1139 msg = None
1139 msg = None
1140 if not results:
1140 if not results:
1141 msg = _(b'server ignored update of %s to public!\n') % node
1141 msg = _(b'server ignored update of %s to public!\n') % node
1142 elif not int(results[0][b'return']):
1142 elif not int(results[0][b'return']):
1143 msg = _(b'updating %s to public failed!\n') % node
1143 msg = _(b'updating %s to public failed!\n') % node
1144 if msg is not None:
1144 if msg is not None:
1145 pushop.ui.warn(msg)
1145 pushop.ui.warn(msg)
1146
1146
1147 return handlereply
1147 return handlereply
1148
1148
1149
1149
1150 @b2partsgenerator(b'obsmarkers')
1150 @b2partsgenerator(b'obsmarkers')
1151 def _pushb2obsmarkers(pushop, bundler):
1151 def _pushb2obsmarkers(pushop, bundler):
1152 if b'obsmarkers' in pushop.stepsdone:
1152 if b'obsmarkers' in pushop.stepsdone:
1153 return
1153 return
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1155 if obsolete.commonversion(remoteversions) is None:
1155 if obsolete.commonversion(remoteversions) is None:
1156 return
1156 return
1157 pushop.stepsdone.add(b'obsmarkers')
1157 pushop.stepsdone.add(b'obsmarkers')
1158 if pushop.outobsmarkers:
1158 if pushop.outobsmarkers:
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1161
1161
1162
1162
1163 @b2partsgenerator(b'bookmarks')
1163 @b2partsgenerator(b'bookmarks')
1164 def _pushb2bookmarks(pushop, bundler):
1164 def _pushb2bookmarks(pushop, bundler):
1165 """handle bookmark push through bundle2"""
1165 """handle bookmark push through bundle2"""
1166 if b'bookmarks' in pushop.stepsdone:
1166 if b'bookmarks' in pushop.stepsdone:
1167 return
1167 return
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1169
1169
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1171 legacybooks = b'bookmarks' in legacy
1171 legacybooks = b'bookmarks' in legacy
1172
1172
1173 if not legacybooks and b'bookmarks' in b2caps:
1173 if not legacybooks and b'bookmarks' in b2caps:
1174 return _pushb2bookmarkspart(pushop, bundler)
1174 return _pushb2bookmarkspart(pushop, bundler)
1175 elif b'pushkey' in b2caps:
1175 elif b'pushkey' in b2caps:
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1177
1177
1178
1178
1179 def _bmaction(old, new):
1179 def _bmaction(old, new):
1180 """small utility for bookmark pushing"""
1180 """small utility for bookmark pushing"""
1181 if not old:
1181 if not old:
1182 return b'export'
1182 return b'export'
1183 elif not new:
1183 elif not new:
1184 return b'delete'
1184 return b'delete'
1185 return b'update'
1185 return b'update'
1186
1186
1187
1187
1188 def _abortonsecretctx(pushop, node, b):
1188 def _abortonsecretctx(pushop, node, b):
1189 """abort if a given bookmark points to a secret changeset"""
1189 """abort if a given bookmark points to a secret changeset"""
1190 if node and pushop.repo[node].phase() == phases.secret:
1190 if node and pushop.repo[node].phase() == phases.secret:
1191 raise error.Abort(
1191 raise error.Abort(
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1193 )
1193 )
1194
1194
1195
1195
1196 def _pushb2bookmarkspart(pushop, bundler):
1196 def _pushb2bookmarkspart(pushop, bundler):
1197 pushop.stepsdone.add(b'bookmarks')
1197 pushop.stepsdone.add(b'bookmarks')
1198 if not pushop.outbookmarks:
1198 if not pushop.outbookmarks:
1199 return
1199 return
1200
1200
1201 allactions = []
1201 allactions = []
1202 data = []
1202 data = []
1203 for book, old, new in pushop.outbookmarks:
1203 for book, old, new in pushop.outbookmarks:
1204 _abortonsecretctx(pushop, new, book)
1204 _abortonsecretctx(pushop, new, book)
1205 data.append((book, new))
1205 data.append((book, new))
1206 allactions.append((book, _bmaction(old, new)))
1206 allactions.append((book, _bmaction(old, new)))
1207 checkdata = bookmod.binaryencode(data)
1207 checkdata = bookmod.binaryencode(data)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1209
1209
1210 def handlereply(op):
1210 def handlereply(op):
1211 ui = pushop.ui
1211 ui = pushop.ui
1212 # if success
1212 # if success
1213 for book, action in allactions:
1213 for book, action in allactions:
1214 ui.status(bookmsgmap[action][0] % book)
1214 ui.status(bookmsgmap[action][0] % book)
1215
1215
1216 return handlereply
1216 return handlereply
1217
1217
1218
1218
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1220 pushop.stepsdone.add(b'bookmarks')
1220 pushop.stepsdone.add(b'bookmarks')
1221 part2book = []
1221 part2book = []
1222 enc = pushkey.encode
1222 enc = pushkey.encode
1223
1223
1224 def handlefailure(pushop, exc):
1224 def handlefailure(pushop, exc):
1225 targetid = int(exc.partid)
1225 targetid = int(exc.partid)
1226 for partid, book, action in part2book:
1226 for partid, book, action in part2book:
1227 if partid == targetid:
1227 if partid == targetid:
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1229 # we should not be called for part we did not generated
1229 # we should not be called for part we did not generated
1230 assert False
1230 assert False
1231
1231
1232 for book, old, new in pushop.outbookmarks:
1232 for book, old, new in pushop.outbookmarks:
1233 _abortonsecretctx(pushop, new, book)
1233 _abortonsecretctx(pushop, new, book)
1234 part = bundler.newpart(b'pushkey')
1234 part = bundler.newpart(b'pushkey')
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1236 part.addparam(b'key', enc(book))
1236 part.addparam(b'key', enc(book))
1237 part.addparam(b'old', enc(hex(old)))
1237 part.addparam(b'old', enc(hex(old)))
1238 part.addparam(b'new', enc(hex(new)))
1238 part.addparam(b'new', enc(hex(new)))
1239 action = b'update'
1239 action = b'update'
1240 if not old:
1240 if not old:
1241 action = b'export'
1241 action = b'export'
1242 elif not new:
1242 elif not new:
1243 action = b'delete'
1243 action = b'delete'
1244 part2book.append((part.id, book, action))
1244 part2book.append((part.id, book, action))
1245 pushop.pkfailcb[part.id] = handlefailure
1245 pushop.pkfailcb[part.id] = handlefailure
1246
1246
1247 def handlereply(op):
1247 def handlereply(op):
1248 ui = pushop.ui
1248 ui = pushop.ui
1249 for partid, book, action in part2book:
1249 for partid, book, action in part2book:
1250 partrep = op.records.getreplies(partid)
1250 partrep = op.records.getreplies(partid)
1251 results = partrep[b'pushkey']
1251 results = partrep[b'pushkey']
1252 assert len(results) <= 1
1252 assert len(results) <= 1
1253 if not results:
1253 if not results:
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1255 else:
1255 else:
1256 ret = int(results[0][b'return'])
1256 ret = int(results[0][b'return'])
1257 if ret:
1257 if ret:
1258 ui.status(bookmsgmap[action][0] % book)
1258 ui.status(bookmsgmap[action][0] % book)
1259 else:
1259 else:
1260 ui.warn(bookmsgmap[action][1] % book)
1260 ui.warn(bookmsgmap[action][1] % book)
1261 if pushop.bkresult is not None:
1261 if pushop.bkresult is not None:
1262 pushop.bkresult = 1
1262 pushop.bkresult = 1
1263
1263
1264 return handlereply
1264 return handlereply
1265
1265
1266
1266
1267 @b2partsgenerator(b'pushvars', idx=0)
1267 @b2partsgenerator(b'pushvars', idx=0)
1268 def _getbundlesendvars(pushop, bundler):
1268 def _getbundlesendvars(pushop, bundler):
1269 '''send shellvars via bundle2'''
1269 '''send shellvars via bundle2'''
1270 pushvars = pushop.pushvars
1270 pushvars = pushop.pushvars
1271 if pushvars:
1271 if pushvars:
1272 shellvars = {}
1272 shellvars = {}
1273 for raw in pushvars:
1273 for raw in pushvars:
1274 if b'=' not in raw:
1274 if b'=' not in raw:
1275 msg = (
1275 msg = (
1276 b"unable to parse variable '%s', should follow "
1276 b"unable to parse variable '%s', should follow "
1277 b"'KEY=VALUE' or 'KEY=' format"
1277 b"'KEY=VALUE' or 'KEY=' format"
1278 )
1278 )
1279 raise error.Abort(msg % raw)
1279 raise error.Abort(msg % raw)
1280 k, v = raw.split(b'=', 1)
1280 k, v = raw.split(b'=', 1)
1281 shellvars[k] = v
1281 shellvars[k] = v
1282
1282
1283 part = bundler.newpart(b'pushvars')
1283 part = bundler.newpart(b'pushvars')
1284
1284
1285 for key, value in pycompat.iteritems(shellvars):
1285 for key, value in pycompat.iteritems(shellvars):
1286 part.addparam(key, value, mandatory=False)
1286 part.addparam(key, value, mandatory=False)
1287
1287
1288
1288
1289 def _pushbundle2(pushop):
1289 def _pushbundle2(pushop):
1290 """push data to the remote using bundle2
1290 """push data to the remote using bundle2
1291
1291
1292 The only currently supported type of data is changegroup but this will
1292 The only currently supported type of data is changegroup but this will
1293 evolve in the future."""
1293 evolve in the future."""
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1296 b'experimental', b'bundle2.pushback'
1296 b'experimental', b'bundle2.pushback'
1297 )
1297 )
1298
1298
1299 # create reply capability
1299 # create reply capability
1300 capsblob = bundle2.encodecaps(
1300 capsblob = bundle2.encodecaps(
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1302 )
1302 )
1303 bundler.newpart(b'replycaps', data=capsblob)
1303 bundler.newpart(b'replycaps', data=capsblob)
1304 replyhandlers = []
1304 replyhandlers = []
1305 for partgenname in b2partsgenorder:
1305 for partgenname in b2partsgenorder:
1306 partgen = b2partsgenmapping[partgenname]
1306 partgen = b2partsgenmapping[partgenname]
1307 ret = partgen(pushop, bundler)
1307 ret = partgen(pushop, bundler)
1308 if callable(ret):
1308 if callable(ret):
1309 replyhandlers.append(ret)
1309 replyhandlers.append(ret)
1310 # do not push if nothing to push
1310 # do not push if nothing to push
1311 if bundler.nbparts <= 1:
1311 if bundler.nbparts <= 1:
1312 return
1312 return
1313 stream = util.chunkbuffer(bundler.getchunks())
1313 stream = util.chunkbuffer(bundler.getchunks())
1314 try:
1314 try:
1315 try:
1315 try:
1316 with pushop.remote.commandexecutor() as e:
1316 with pushop.remote.commandexecutor() as e:
1317 reply = e.callcommand(
1317 reply = e.callcommand(
1318 b'unbundle',
1318 b'unbundle',
1319 {
1319 {
1320 b'bundle': stream,
1320 b'bundle': stream,
1321 b'heads': [b'force'],
1321 b'heads': [b'force'],
1322 b'url': pushop.remote.url(),
1322 b'url': pushop.remote.url(),
1323 },
1323 },
1324 ).result()
1324 ).result()
1325 except error.BundleValueError as exc:
1325 except error.BundleValueError as exc:
1326 raise error.Abort(_(b'missing support for %s') % exc)
1326 raise error.Abort(_(b'missing support for %s') % exc)
1327 try:
1327 try:
1328 trgetter = None
1328 trgetter = None
1329 if pushback:
1329 if pushback:
1330 trgetter = pushop.trmanager.transaction
1330 trgetter = pushop.trmanager.transaction
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1332 except error.BundleValueError as exc:
1332 except error.BundleValueError as exc:
1333 raise error.Abort(_(b'missing support for %s') % exc)
1333 raise error.Abort(_(b'missing support for %s') % exc)
1334 except bundle2.AbortFromPart as exc:
1334 except bundle2.AbortFromPart as exc:
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1336 if exc.hint is not None:
1336 if exc.hint is not None:
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1338 raise error.Abort(_(b'push failed on remote'))
1338 raise error.Abort(_(b'push failed on remote'))
1339 except error.PushkeyFailed as exc:
1339 except error.PushkeyFailed as exc:
1340 partid = int(exc.partid)
1340 partid = int(exc.partid)
1341 if partid not in pushop.pkfailcb:
1341 if partid not in pushop.pkfailcb:
1342 raise
1342 raise
1343 pushop.pkfailcb[partid](pushop, exc)
1343 pushop.pkfailcb[partid](pushop, exc)
1344 for rephand in replyhandlers:
1344 for rephand in replyhandlers:
1345 rephand(op)
1345 rephand(op)
1346
1346
1347
1347
1348 def _pushchangeset(pushop):
1348 def _pushchangeset(pushop):
1349 """Make the actual push of changeset bundle to remote repo"""
1349 """Make the actual push of changeset bundle to remote repo"""
1350 if b'changesets' in pushop.stepsdone:
1350 if b'changesets' in pushop.stepsdone:
1351 return
1351 return
1352 pushop.stepsdone.add(b'changesets')
1352 pushop.stepsdone.add(b'changesets')
1353 if not _pushcheckoutgoing(pushop):
1353 if not _pushcheckoutgoing(pushop):
1354 return
1354 return
1355
1355
1356 # Should have verified this in push().
1356 # Should have verified this in push().
1357 assert pushop.remote.capable(b'unbundle')
1357 assert pushop.remote.capable(b'unbundle')
1358
1358
1359 pushop.repo.prepushoutgoinghooks(pushop)
1359 pushop.repo.prepushoutgoinghooks(pushop)
1360 outgoing = pushop.outgoing
1360 outgoing = pushop.outgoing
1361 # TODO: get bundlecaps from remote
1361 # TODO: get bundlecaps from remote
1362 bundlecaps = None
1362 bundlecaps = None
1363 # create a changegroup from local
1363 # create a changegroup from local
1364 if pushop.revs is None and not (
1364 if pushop.revs is None and not (
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1366 ):
1366 ):
1367 # push everything,
1367 # push everything,
1368 # use the fast path, no race possible on push
1368 # use the fast path, no race possible on push
1369 cg = changegroup.makechangegroup(
1369 cg = changegroup.makechangegroup(
1370 pushop.repo,
1370 pushop.repo,
1371 outgoing,
1371 outgoing,
1372 b'01',
1372 b'01',
1373 b'push',
1373 b'push',
1374 fastpath=True,
1374 fastpath=True,
1375 bundlecaps=bundlecaps,
1375 bundlecaps=bundlecaps,
1376 )
1376 )
1377 else:
1377 else:
1378 cg = changegroup.makechangegroup(
1378 cg = changegroup.makechangegroup(
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1380 )
1380 )
1381
1381
1382 # apply changegroup to remote
1382 # apply changegroup to remote
1383 # local repo finds heads on server, finds out what
1383 # local repo finds heads on server, finds out what
1384 # revs it must push. once revs transferred, if server
1384 # revs it must push. once revs transferred, if server
1385 # finds it has different heads (someone else won
1385 # finds it has different heads (someone else won
1386 # commit/push race), server aborts.
1386 # commit/push race), server aborts.
1387 if pushop.force:
1387 if pushop.force:
1388 remoteheads = [b'force']
1388 remoteheads = [b'force']
1389 else:
1389 else:
1390 remoteheads = pushop.remoteheads
1390 remoteheads = pushop.remoteheads
1391 # ssh: return remote's addchangegroup()
1391 # ssh: return remote's addchangegroup()
1392 # http: return remote's addchangegroup() or 0 for error
1392 # http: return remote's addchangegroup() or 0 for error
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1394
1394
1395
1395
1396 def _pushsyncphase(pushop):
1396 def _pushsyncphase(pushop):
1397 """synchronise phase information locally and remotely"""
1397 """synchronise phase information locally and remotely"""
1398 cheads = pushop.commonheads
1398 cheads = pushop.commonheads
1399 # even when we don't push, exchanging phase data is useful
1399 # even when we don't push, exchanging phase data is useful
1400 remotephases = listkeys(pushop.remote, b'phases')
1400 remotephases = listkeys(pushop.remote, b'phases')
1401 if (
1401 if (
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1403 and remotephases # server supports phases
1403 and remotephases # server supports phases
1404 and pushop.cgresult is None # nothing was pushed
1404 and pushop.cgresult is None # nothing was pushed
1405 and remotephases.get(b'publishing', False)
1405 and remotephases.get(b'publishing', False)
1406 ):
1406 ):
1407 # When:
1407 # When:
1408 # - this is a subrepo push
1408 # - this is a subrepo push
1409 # - and remote support phase
1409 # - and remote support phase
1410 # - and no changeset was pushed
1410 # - and no changeset was pushed
1411 # - and remote is publishing
1411 # - and remote is publishing
1412 # We may be in issue 3871 case!
1412 # We may be in issue 3871 case!
1413 # We drop the possible phase synchronisation done by
1413 # We drop the possible phase synchronisation done by
1414 # courtesy to publish changesets possibly locally draft
1414 # courtesy to publish changesets possibly locally draft
1415 # on the remote.
1415 # on the remote.
1416 remotephases = {b'publishing': b'True'}
1416 remotephases = {b'publishing': b'True'}
1417 if not remotephases: # old server or public only reply from non-publishing
1417 if not remotephases: # old server or public only reply from non-publishing
1418 _localphasemove(pushop, cheads)
1418 _localphasemove(pushop, cheads)
1419 # don't push any phase data as there is nothing to push
1419 # don't push any phase data as there is nothing to push
1420 else:
1420 else:
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1422 pheads, droots = ana
1422 pheads, droots = ana
1423 ### Apply remote phase on local
1423 ### Apply remote phase on local
1424 if remotephases.get(b'publishing', False):
1424 if remotephases.get(b'publishing', False):
1425 _localphasemove(pushop, cheads)
1425 _localphasemove(pushop, cheads)
1426 else: # publish = False
1426 else: # publish = False
1427 _localphasemove(pushop, pheads)
1427 _localphasemove(pushop, pheads)
1428 _localphasemove(pushop, cheads, phases.draft)
1428 _localphasemove(pushop, cheads, phases.draft)
1429 ### Apply local phase on remote
1429 ### Apply local phase on remote
1430
1430
1431 if pushop.cgresult:
1431 if pushop.cgresult:
1432 if b'phases' in pushop.stepsdone:
1432 if b'phases' in pushop.stepsdone:
1433 # phases already pushed though bundle2
1433 # phases already pushed though bundle2
1434 return
1434 return
1435 outdated = pushop.outdatedphases
1435 outdated = pushop.outdatedphases
1436 else:
1436 else:
1437 outdated = pushop.fallbackoutdatedphases
1437 outdated = pushop.fallbackoutdatedphases
1438
1438
1439 pushop.stepsdone.add(b'phases')
1439 pushop.stepsdone.add(b'phases')
1440
1440
1441 # filter heads already turned public by the push
1441 # filter heads already turned public by the push
1442 outdated = [c for c in outdated if c.node() not in pheads]
1442 outdated = [c for c in outdated if c.node() not in pheads]
1443 # fallback to independent pushkey command
1443 # fallback to independent pushkey command
1444 for newremotehead in outdated:
1444 for newremotehead in outdated:
1445 with pushop.remote.commandexecutor() as e:
1445 with pushop.remote.commandexecutor() as e:
1446 r = e.callcommand(
1446 r = e.callcommand(
1447 b'pushkey',
1447 b'pushkey',
1448 {
1448 {
1449 b'namespace': b'phases',
1449 b'namespace': b'phases',
1450 b'key': newremotehead.hex(),
1450 b'key': newremotehead.hex(),
1451 b'old': b'%d' % phases.draft,
1451 b'old': b'%d' % phases.draft,
1452 b'new': b'%d' % phases.public,
1452 b'new': b'%d' % phases.public,
1453 },
1453 },
1454 ).result()
1454 ).result()
1455
1455
1456 if not r:
1456 if not r:
1457 pushop.ui.warn(
1457 pushop.ui.warn(
1458 _(b'updating %s to public failed!\n') % newremotehead
1458 _(b'updating %s to public failed!\n') % newremotehead
1459 )
1459 )
1460
1460
1461
1461
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1463 """move <nodes> to <phase> in the local source repo"""
1463 """move <nodes> to <phase> in the local source repo"""
1464 if pushop.trmanager:
1464 if pushop.trmanager:
1465 phases.advanceboundary(
1465 phases.advanceboundary(
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1467 )
1467 )
1468 else:
1468 else:
1469 # repo is not locked, do not change any phases!
1469 # repo is not locked, do not change any phases!
1470 # Informs the user that phases should have been moved when
1470 # Informs the user that phases should have been moved when
1471 # applicable.
1471 # applicable.
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1473 phasestr = phases.phasenames[phase]
1473 phasestr = phases.phasenames[phase]
1474 if actualmoves:
1474 if actualmoves:
1475 pushop.ui.status(
1475 pushop.ui.status(
1476 _(
1476 _(
1477 b'cannot lock source repo, skipping '
1477 b'cannot lock source repo, skipping '
1478 b'local %s phase update\n'
1478 b'local %s phase update\n'
1479 )
1479 )
1480 % phasestr
1480 % phasestr
1481 )
1481 )
1482
1482
1483
1483
1484 def _pushobsolete(pushop):
1484 def _pushobsolete(pushop):
1485 """utility function to push obsolete markers to a remote"""
1485 """utility function to push obsolete markers to a remote"""
1486 if b'obsmarkers' in pushop.stepsdone:
1486 if b'obsmarkers' in pushop.stepsdone:
1487 return
1487 return
1488 repo = pushop.repo
1488 repo = pushop.repo
1489 remote = pushop.remote
1489 remote = pushop.remote
1490 pushop.stepsdone.add(b'obsmarkers')
1490 pushop.stepsdone.add(b'obsmarkers')
1491 if pushop.outobsmarkers:
1491 if pushop.outobsmarkers:
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1493 rslts = []
1493 rslts = []
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1495 remotedata = obsolete._pushkeyescape(markers)
1495 remotedata = obsolete._pushkeyescape(markers)
1496 for key in sorted(remotedata, reverse=True):
1496 for key in sorted(remotedata, reverse=True):
1497 # reverse sort to ensure we end with dump0
1497 # reverse sort to ensure we end with dump0
1498 data = remotedata[key]
1498 data = remotedata[key]
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1500 if [r for r in rslts if not r]:
1500 if [r for r in rslts if not r]:
1501 msg = _(b'failed to push some obsolete markers!\n')
1501 msg = _(b'failed to push some obsolete markers!\n')
1502 repo.ui.warn(msg)
1502 repo.ui.warn(msg)
1503
1503
1504
1504
1505 def _pushbookmark(pushop):
1505 def _pushbookmark(pushop):
1506 """Update bookmark position on remote"""
1506 """Update bookmark position on remote"""
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1508 return
1508 return
1509 pushop.stepsdone.add(b'bookmarks')
1509 pushop.stepsdone.add(b'bookmarks')
1510 ui = pushop.ui
1510 ui = pushop.ui
1511 remote = pushop.remote
1511 remote = pushop.remote
1512
1512
1513 for b, old, new in pushop.outbookmarks:
1513 for b, old, new in pushop.outbookmarks:
1514 action = b'update'
1514 action = b'update'
1515 if not old:
1515 if not old:
1516 action = b'export'
1516 action = b'export'
1517 elif not new:
1517 elif not new:
1518 action = b'delete'
1518 action = b'delete'
1519
1519
1520 with remote.commandexecutor() as e:
1520 with remote.commandexecutor() as e:
1521 r = e.callcommand(
1521 r = e.callcommand(
1522 b'pushkey',
1522 b'pushkey',
1523 {
1523 {
1524 b'namespace': b'bookmarks',
1524 b'namespace': b'bookmarks',
1525 b'key': b,
1525 b'key': b,
1526 b'old': hex(old),
1526 b'old': hex(old),
1527 b'new': hex(new),
1527 b'new': hex(new),
1528 },
1528 },
1529 ).result()
1529 ).result()
1530
1530
1531 if r:
1531 if r:
1532 ui.status(bookmsgmap[action][0] % b)
1532 ui.status(bookmsgmap[action][0] % b)
1533 else:
1533 else:
1534 ui.warn(bookmsgmap[action][1] % b)
1534 ui.warn(bookmsgmap[action][1] % b)
1535 # discovery can have set the value form invalid entry
1535 # discovery can have set the value form invalid entry
1536 if pushop.bkresult is not None:
1536 if pushop.bkresult is not None:
1537 pushop.bkresult = 1
1537 pushop.bkresult = 1
1538
1538
1539
1539
1540 class pulloperation(object):
1540 class pulloperation(object):
1541 """A object that represent a single pull operation
1541 """A object that represent a single pull operation
1542
1542
1543 It purpose is to carry pull related state and very common operation.
1543 It purpose is to carry pull related state and very common operation.
1544
1544
1545 A new should be created at the beginning of each pull and discarded
1545 A new should be created at the beginning of each pull and discarded
1546 afterward.
1546 afterward.
1547 """
1547 """
1548
1548
1549 def __init__(
1549 def __init__(
1550 self,
1550 self,
1551 repo,
1551 repo,
1552 remote,
1552 remote,
1553 heads=None,
1553 heads=None,
1554 force=False,
1554 force=False,
1555 bookmarks=(),
1555 bookmarks=(),
1556 remotebookmarks=None,
1556 remotebookmarks=None,
1557 streamclonerequested=None,
1557 streamclonerequested=None,
1558 includepats=None,
1558 includepats=None,
1559 excludepats=None,
1559 excludepats=None,
1560 depth=None,
1560 depth=None,
1561 ):
1561 ):
1562 # repo we pull into
1562 # repo we pull into
1563 self.repo = repo
1563 self.repo = repo
1564 # repo we pull from
1564 # repo we pull from
1565 self.remote = remote
1565 self.remote = remote
1566 # revision we try to pull (None is "all")
1566 # revision we try to pull (None is "all")
1567 self.heads = heads
1567 self.heads = heads
1568 # bookmark pulled explicitly
1568 # bookmark pulled explicitly
1569 self.explicitbookmarks = [
1569 self.explicitbookmarks = [
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1571 ]
1571 ]
1572 # do we force pull?
1572 # do we force pull?
1573 self.force = force
1573 self.force = force
1574 # whether a streaming clone was requested
1574 # whether a streaming clone was requested
1575 self.streamclonerequested = streamclonerequested
1575 self.streamclonerequested = streamclonerequested
1576 # transaction manager
1576 # transaction manager
1577 self.trmanager = None
1577 self.trmanager = None
1578 # set of common changeset between local and remote before pull
1578 # set of common changeset between local and remote before pull
1579 self.common = None
1579 self.common = None
1580 # set of pulled head
1580 # set of pulled head
1581 self.rheads = None
1581 self.rheads = None
1582 # list of missing changeset to fetch remotely
1582 # list of missing changeset to fetch remotely
1583 self.fetch = None
1583 self.fetch = None
1584 # remote bookmarks data
1584 # remote bookmarks data
1585 self.remotebookmarks = remotebookmarks
1585 self.remotebookmarks = remotebookmarks
1586 # result of changegroup pulling (used as return code by pull)
1586 # result of changegroup pulling (used as return code by pull)
1587 self.cgresult = None
1587 self.cgresult = None
1588 # list of step already done
1588 # list of step already done
1589 self.stepsdone = set()
1589 self.stepsdone = set()
1590 # Whether we attempted a clone from pre-generated bundles.
1590 # Whether we attempted a clone from pre-generated bundles.
1591 self.clonebundleattempted = False
1591 self.clonebundleattempted = False
1592 # Set of file patterns to include.
1592 # Set of file patterns to include.
1593 self.includepats = includepats
1593 self.includepats = includepats
1594 # Set of file patterns to exclude.
1594 # Set of file patterns to exclude.
1595 self.excludepats = excludepats
1595 self.excludepats = excludepats
1596 # Number of ancestor changesets to pull from each pulled head.
1596 # Number of ancestor changesets to pull from each pulled head.
1597 self.depth = depth
1597 self.depth = depth
1598
1598
1599 @util.propertycache
1599 @util.propertycache
1600 def pulledsubset(self):
1600 def pulledsubset(self):
1601 """heads of the set of changeset target by the pull"""
1601 """heads of the set of changeset target by the pull"""
1602 # compute target subset
1602 # compute target subset
1603 if self.heads is None:
1603 if self.heads is None:
1604 # We pulled every thing possible
1604 # We pulled every thing possible
1605 # sync on everything common
1605 # sync on everything common
1606 c = set(self.common)
1606 c = set(self.common)
1607 ret = list(self.common)
1607 ret = list(self.common)
1608 for n in self.rheads:
1608 for n in self.rheads:
1609 if n not in c:
1609 if n not in c:
1610 ret.append(n)
1610 ret.append(n)
1611 return ret
1611 return ret
1612 else:
1612 else:
1613 # We pulled a specific subset
1613 # We pulled a specific subset
1614 # sync on this subset
1614 # sync on this subset
1615 return self.heads
1615 return self.heads
1616
1616
1617 @util.propertycache
1617 @util.propertycache
1618 def canusebundle2(self):
1618 def canusebundle2(self):
1619 return not _forcebundle1(self)
1619 return not _forcebundle1(self)
1620
1620
1621 @util.propertycache
1621 @util.propertycache
1622 def remotebundle2caps(self):
1622 def remotebundle2caps(self):
1623 return bundle2.bundle2caps(self.remote)
1623 return bundle2.bundle2caps(self.remote)
1624
1624
1625 def gettransaction(self):
1625 def gettransaction(self):
1626 # deprecated; talk to trmanager directly
1626 # deprecated; talk to trmanager directly
1627 return self.trmanager.transaction()
1627 return self.trmanager.transaction()
1628
1628
1629
1629
1630 class transactionmanager(util.transactional):
1630 class transactionmanager(util.transactional):
1631 """An object to manage the life cycle of a transaction
1631 """An object to manage the life cycle of a transaction
1632
1632
1633 It creates the transaction on demand and calls the appropriate hooks when
1633 It creates the transaction on demand and calls the appropriate hooks when
1634 closing the transaction."""
1634 closing the transaction."""
1635
1635
1636 def __init__(self, repo, source, url):
1636 def __init__(self, repo, source, url):
1637 self.repo = repo
1637 self.repo = repo
1638 self.source = source
1638 self.source = source
1639 self.url = url
1639 self.url = url
1640 self._tr = None
1640 self._tr = None
1641
1641
1642 def transaction(self):
1642 def transaction(self):
1643 """Return an open transaction object, constructing if necessary"""
1643 """Return an open transaction object, constructing if necessary"""
1644 if not self._tr:
1644 if not self._tr:
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1646 self._tr = self.repo.transaction(trname)
1646 self._tr = self.repo.transaction(trname)
1647 self._tr.hookargs[b'source'] = self.source
1647 self._tr.hookargs[b'source'] = self.source
1648 self._tr.hookargs[b'url'] = self.url
1648 self._tr.hookargs[b'url'] = self.url
1649 return self._tr
1649 return self._tr
1650
1650
1651 def close(self):
1651 def close(self):
1652 """close transaction if created"""
1652 """close transaction if created"""
1653 if self._tr is not None:
1653 if self._tr is not None:
1654 self._tr.close()
1654 self._tr.close()
1655
1655
1656 def release(self):
1656 def release(self):
1657 """release transaction if created"""
1657 """release transaction if created"""
1658 if self._tr is not None:
1658 if self._tr is not None:
1659 self._tr.release()
1659 self._tr.release()
1660
1660
1661
1661
1662 def listkeys(remote, namespace):
1662 def listkeys(remote, namespace):
1663 with remote.commandexecutor() as e:
1663 with remote.commandexecutor() as e:
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1665
1665
1666
1666
1667 def _fullpullbundle2(repo, pullop):
1667 def _fullpullbundle2(repo, pullop):
1668 # The server may send a partial reply, i.e. when inlining
1668 # The server may send a partial reply, i.e. when inlining
1669 # pre-computed bundles. In that case, update the common
1669 # pre-computed bundles. In that case, update the common
1670 # set based on the results and pull another bundle.
1670 # set based on the results and pull another bundle.
1671 #
1671 #
1672 # There are two indicators that the process is finished:
1672 # There are two indicators that the process is finished:
1673 # - no changeset has been added, or
1673 # - no changeset has been added, or
1674 # - all remote heads are known locally.
1674 # - all remote heads are known locally.
1675 # The head check must use the unfiltered view as obsoletion
1675 # The head check must use the unfiltered view as obsoletion
1676 # markers can hide heads.
1676 # markers can hide heads.
1677 unfi = repo.unfiltered()
1677 unfi = repo.unfiltered()
1678 unficl = unfi.changelog
1678 unficl = unfi.changelog
1679
1679
1680 def headsofdiff(h1, h2):
1680 def headsofdiff(h1, h2):
1681 """Returns heads(h1 % h2)"""
1681 """Returns heads(h1 % h2)"""
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1683 return {ctx.node() for ctx in res}
1683 return {ctx.node() for ctx in res}
1684
1684
1685 def headsofunion(h1, h2):
1685 def headsofunion(h1, h2):
1686 """Returns heads((h1 + h2) - null)"""
1686 """Returns heads((h1 + h2) - null)"""
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1688 return {ctx.node() for ctx in res}
1688 return {ctx.node() for ctx in res}
1689
1689
1690 while True:
1690 while True:
1691 old_heads = unficl.heads()
1691 old_heads = unficl.heads()
1692 clstart = len(unficl)
1692 clstart = len(unficl)
1693 _pullbundle2(pullop)
1693 _pullbundle2(pullop)
1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1695 # XXX narrow clones filter the heads on the server side during
1695 # XXX narrow clones filter the heads on the server side during
1696 # XXX getbundle and result in partial replies as well.
1696 # XXX getbundle and result in partial replies as well.
1697 # XXX Disable pull bundles in this case as band aid to avoid
1697 # XXX Disable pull bundles in this case as band aid to avoid
1698 # XXX extra round trips.
1698 # XXX extra round trips.
1699 break
1699 break
1700 if clstart == len(unficl):
1700 if clstart == len(unficl):
1701 break
1701 break
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1703 break
1703 break
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1707
1707
1708
1708
1709 def add_confirm_callback(repo, pullop):
1709 def add_confirm_callback(repo, pullop):
1710 """ adds a finalize callback to transaction which can be used to show stats
1710 """ adds a finalize callback to transaction which can be used to show stats
1711 to user and confirm the pull before committing transaction """
1711 to user and confirm the pull before committing transaction """
1712
1712
1713 tr = pullop.trmanager.transaction()
1713 tr = pullop.trmanager.transaction()
1714 scmutil.registersummarycallback(
1714 scmutil.registersummarycallback(
1715 repo, tr, txnname=b'pull', as_validator=True
1715 repo, tr, txnname=b'pull', as_validator=True
1716 )
1716 )
1717 reporef = weakref.ref(repo.unfiltered())
1717 reporef = weakref.ref(repo.unfiltered())
1718
1718
1719 def prompt(tr):
1719 def prompt(tr):
1720 repo = reporef()
1720 repo = reporef()
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1722 if repo.ui.promptchoice(cm):
1722 if repo.ui.promptchoice(cm):
1723 raise error.Abort("user aborted")
1723 raise error.Abort("user aborted")
1724
1724
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1726
1726
1727
1727
1728 def pull(
1728 def pull(
1729 repo,
1729 repo,
1730 remote,
1730 remote,
1731 heads=None,
1731 heads=None,
1732 force=False,
1732 force=False,
1733 bookmarks=(),
1733 bookmarks=(),
1734 opargs=None,
1734 opargs=None,
1735 streamclonerequested=None,
1735 streamclonerequested=None,
1736 includepats=None,
1736 includepats=None,
1737 excludepats=None,
1737 excludepats=None,
1738 depth=None,
1738 depth=None,
1739 confirm=None,
1739 confirm=None,
1740 ):
1740 ):
1741 """Fetch repository data from a remote.
1741 """Fetch repository data from a remote.
1742
1742
1743 This is the main function used to retrieve data from a remote repository.
1743 This is the main function used to retrieve data from a remote repository.
1744
1744
1745 ``repo`` is the local repository to clone into.
1745 ``repo`` is the local repository to clone into.
1746 ``remote`` is a peer instance.
1746 ``remote`` is a peer instance.
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1748 default) means to pull everything from the remote.
1748 default) means to pull everything from the remote.
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1750 default, all remote bookmarks are pulled.
1750 default, all remote bookmarks are pulled.
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1752 initialization.
1752 initialization.
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1755 of revlogs from the server. This only works when the local repository is
1755 of revlogs from the server. This only works when the local repository is
1756 empty. The default value of ``None`` means to respect the server
1756 empty. The default value of ``None`` means to respect the server
1757 configuration for preferring stream clones.
1757 configuration for preferring stream clones.
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1759 include and exclude in storage, respectively. If not defined, narrow
1759 include and exclude in storage, respectively. If not defined, narrow
1760 patterns from the repo instance are used, if available.
1760 patterns from the repo instance are used, if available.
1761 ``depth`` is an integer indicating the DAG depth of history we're
1761 ``depth`` is an integer indicating the DAG depth of history we're
1762 interested in. If defined, for each revision specified in ``heads``, we
1762 interested in. If defined, for each revision specified in ``heads``, we
1763 will fetch up to this many of its ancestors and data associated with them.
1763 will fetch up to this many of its ancestors and data associated with them.
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1765 before committing the transaction. This overrides HGPLAIN.
1765 before committing the transaction. This overrides HGPLAIN.
1766
1766
1767 Returns the ``pulloperation`` created for this pull.
1767 Returns the ``pulloperation`` created for this pull.
1768 """
1768 """
1769 if opargs is None:
1769 if opargs is None:
1770 opargs = {}
1770 opargs = {}
1771
1771
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1773 # flexibility for API consumers.
1773 # flexibility for API consumers.
1774 if includepats or excludepats:
1774 if includepats or excludepats:
1775 includepats = includepats or set()
1775 includepats = includepats or set()
1776 excludepats = excludepats or set()
1776 excludepats = excludepats or set()
1777 else:
1777 else:
1778 includepats, excludepats = repo.narrowpats
1778 includepats, excludepats = repo.narrowpats
1779
1779
1780 narrowspec.validatepatterns(includepats)
1780 narrowspec.validatepatterns(includepats)
1781 narrowspec.validatepatterns(excludepats)
1781 narrowspec.validatepatterns(excludepats)
1782
1782
1783 pullop = pulloperation(
1783 pullop = pulloperation(
1784 repo,
1784 repo,
1785 remote,
1785 remote,
1786 heads,
1786 heads,
1787 force,
1787 force,
1788 bookmarks=bookmarks,
1788 bookmarks=bookmarks,
1789 streamclonerequested=streamclonerequested,
1789 streamclonerequested=streamclonerequested,
1790 includepats=includepats,
1790 includepats=includepats,
1791 excludepats=excludepats,
1791 excludepats=excludepats,
1792 depth=depth,
1792 depth=depth,
1793 **pycompat.strkwargs(opargs)
1793 **pycompat.strkwargs(opargs)
1794 )
1794 )
1795
1795
1796 peerlocal = pullop.remote.local()
1796 peerlocal = pullop.remote.local()
1797 if peerlocal:
1797 if peerlocal:
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1799 if missing:
1799 if missing:
1800 msg = _(
1800 msg = _(
1801 b"required features are not"
1801 b"required features are not"
1802 b" supported in the destination:"
1802 b" supported in the destination:"
1803 b" %s"
1803 b" %s"
1804 ) % (b', '.join(sorted(missing)))
1804 ) % (b', '.join(sorted(missing)))
1805 raise error.Abort(msg)
1805 raise error.Abort(msg)
1806
1806
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1808 wlock = util.nullcontextmanager()
1808 wlock = util.nullcontextmanager()
1809 if not bookmod.bookmarksinstore(repo):
1809 if not bookmod.bookmarksinstore(repo):
1810 wlock = repo.wlock()
1810 wlock = repo.wlock()
1811 with wlock, repo.lock(), pullop.trmanager:
1811 with wlock, repo.lock(), pullop.trmanager:
1812 if confirm or (
1812 if confirm or (
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1814 ):
1814 ):
1815 add_confirm_callback(repo, pullop)
1815 add_confirm_callback(repo, pullop)
1816
1816
1817 # Use the modern wire protocol, if available.
1817 # Use the modern wire protocol, if available.
1818 if remote.capable(b'command-changesetdata'):
1818 if remote.capable(b'command-changesetdata'):
1819 exchangev2.pull(pullop)
1819 exchangev2.pull(pullop)
1820 else:
1820 else:
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1822 # before discovery to avoid extra work.
1822 # before discovery to avoid extra work.
1823 _maybeapplyclonebundle(pullop)
1823 _maybeapplyclonebundle(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1825 _pulldiscovery(pullop)
1825 _pulldiscovery(pullop)
1826 if pullop.canusebundle2:
1826 if pullop.canusebundle2:
1827 _fullpullbundle2(repo, pullop)
1827 _fullpullbundle2(repo, pullop)
1828 _pullchangeset(pullop)
1828 _pullchangeset(pullop)
1829 _pullphase(pullop)
1829 _pullphase(pullop)
1830 _pullbookmarks(pullop)
1830 _pullbookmarks(pullop)
1831 _pullobsolete(pullop)
1831 _pullobsolete(pullop)
1832
1832
1833 # storing remotenames
1833 # storing remotenames
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1835 logexchange.pullremotenames(repo, remote)
1835 logexchange.pullremotenames(repo, remote)
1836
1836
1837 return pullop
1837 return pullop
1838
1838
1839
1839
1840 # list of steps to perform discovery before pull
1840 # list of steps to perform discovery before pull
1841 pulldiscoveryorder = []
1841 pulldiscoveryorder = []
1842
1842
1843 # Mapping between step name and function
1843 # Mapping between step name and function
1844 #
1844 #
1845 # This exists to help extensions wrap steps if necessary
1845 # This exists to help extensions wrap steps if necessary
1846 pulldiscoverymapping = {}
1846 pulldiscoverymapping = {}
1847
1847
1848
1848
1849 def pulldiscovery(stepname):
1849 def pulldiscovery(stepname):
1850 """decorator for function performing discovery before pull
1850 """decorator for function performing discovery before pull
1851
1851
1852 The function is added to the step -> function mapping and appended to the
1852 The function is added to the step -> function mapping and appended to the
1853 list of steps. Beware that decorated function will be added in order (this
1853 list of steps. Beware that decorated function will be added in order (this
1854 may matter).
1854 may matter).
1855
1855
1856 You can only use this decorator for a new step, if you want to wrap a step
1856 You can only use this decorator for a new step, if you want to wrap a step
1857 from an extension, change the pulldiscovery dictionary directly."""
1857 from an extension, change the pulldiscovery dictionary directly."""
1858
1858
1859 def dec(func):
1859 def dec(func):
1860 assert stepname not in pulldiscoverymapping
1860 assert stepname not in pulldiscoverymapping
1861 pulldiscoverymapping[stepname] = func
1861 pulldiscoverymapping[stepname] = func
1862 pulldiscoveryorder.append(stepname)
1862 pulldiscoveryorder.append(stepname)
1863 return func
1863 return func
1864
1864
1865 return dec
1865 return dec
1866
1866
1867
1867
1868 def _pulldiscovery(pullop):
1868 def _pulldiscovery(pullop):
1869 """Run all discovery steps"""
1869 """Run all discovery steps"""
1870 for stepname in pulldiscoveryorder:
1870 for stepname in pulldiscoveryorder:
1871 step = pulldiscoverymapping[stepname]
1871 step = pulldiscoverymapping[stepname]
1872 step(pullop)
1872 step(pullop)
1873
1873
1874
1874
1875 @pulldiscovery(b'b1:bookmarks')
1875 @pulldiscovery(b'b1:bookmarks')
1876 def _pullbookmarkbundle1(pullop):
1876 def _pullbookmarkbundle1(pullop):
1877 """fetch bookmark data in bundle1 case
1877 """fetch bookmark data in bundle1 case
1878
1878
1879 If not using bundle2, we have to fetch bookmarks before changeset
1879 If not using bundle2, we have to fetch bookmarks before changeset
1880 discovery to reduce the chance and impact of race conditions."""
1880 discovery to reduce the chance and impact of race conditions."""
1881 if pullop.remotebookmarks is not None:
1881 if pullop.remotebookmarks is not None:
1882 return
1882 return
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1885 # new implementation.
1885 # new implementation.
1886 return
1886 return
1887 books = listkeys(pullop.remote, b'bookmarks')
1887 books = listkeys(pullop.remote, b'bookmarks')
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1889
1889
1890
1890
1891 @pulldiscovery(b'changegroup')
1891 @pulldiscovery(b'changegroup')
1892 def _pulldiscoverychangegroup(pullop):
1892 def _pulldiscoverychangegroup(pullop):
1893 """discovery phase for the pull
1893 """discovery phase for the pull
1894
1894
1895 Current handle changeset discovery only, will change handle all discovery
1895 Current handle changeset discovery only, will change handle all discovery
1896 at some point."""
1896 at some point."""
1897 tmp = discovery.findcommonincoming(
1897 tmp = discovery.findcommonincoming(
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1899 )
1899 )
1900 common, fetch, rheads = tmp
1900 common, fetch, rheads = tmp
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1902 if fetch and rheads:
1902 if fetch and rheads:
1903 # If a remote heads is filtered locally, put in back in common.
1903 # If a remote heads is filtered locally, put in back in common.
1904 #
1904 #
1905 # This is a hackish solution to catch most of "common but locally
1905 # This is a hackish solution to catch most of "common but locally
1906 # hidden situation". We do not performs discovery on unfiltered
1906 # hidden situation". We do not performs discovery on unfiltered
1907 # repository because it end up doing a pathological amount of round
1907 # repository because it end up doing a pathological amount of round
1908 # trip for w huge amount of changeset we do not care about.
1908 # trip for w huge amount of changeset we do not care about.
1909 #
1909 #
1910 # If a set of such "common but filtered" changeset exist on the server
1910 # If a set of such "common but filtered" changeset exist on the server
1911 # but are not including a remote heads, we'll not be able to detect it,
1911 # but are not including a remote heads, we'll not be able to detect it,
1912 scommon = set(common)
1912 scommon = set(common)
1913 for n in rheads:
1913 for n in rheads:
1914 if has_node(n):
1914 if has_node(n):
1915 if n not in scommon:
1915 if n not in scommon:
1916 common.append(n)
1916 common.append(n)
1917 if set(rheads).issubset(set(common)):
1917 if set(rheads).issubset(set(common)):
1918 fetch = []
1918 fetch = []
1919 pullop.common = common
1919 pullop.common = common
1920 pullop.fetch = fetch
1920 pullop.fetch = fetch
1921 pullop.rheads = rheads
1921 pullop.rheads = rheads
1922
1922
1923
1923
1924 def _pullbundle2(pullop):
1924 def _pullbundle2(pullop):
1925 """pull data using bundle2
1925 """pull data using bundle2
1926
1926
1927 For now, the only supported data are changegroup."""
1927 For now, the only supported data are changegroup."""
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1929
1929
1930 # make ui easier to access
1930 # make ui easier to access
1931 ui = pullop.repo.ui
1931 ui = pullop.repo.ui
1932
1932
1933 # At the moment we don't do stream clones over bundle2. If that is
1933 # At the moment we don't do stream clones over bundle2. If that is
1934 # implemented then here's where the check for that will go.
1934 # implemented then here's where the check for that will go.
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1936
1936
1937 # declare pull perimeters
1937 # declare pull perimeters
1938 kwargs[b'common'] = pullop.common
1938 kwargs[b'common'] = pullop.common
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1940
1940
1941 # check server supports narrow and then adding includepats and excludepats
1941 # check server supports narrow and then adding includepats and excludepats
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1943 if servernarrow and pullop.includepats:
1943 if servernarrow and pullop.includepats:
1944 kwargs[b'includepats'] = pullop.includepats
1944 kwargs[b'includepats'] = pullop.includepats
1945 if servernarrow and pullop.excludepats:
1945 if servernarrow and pullop.excludepats:
1946 kwargs[b'excludepats'] = pullop.excludepats
1946 kwargs[b'excludepats'] = pullop.excludepats
1947
1947
1948 if streaming:
1948 if streaming:
1949 kwargs[b'cg'] = False
1949 kwargs[b'cg'] = False
1950 kwargs[b'stream'] = True
1950 kwargs[b'stream'] = True
1951 pullop.stepsdone.add(b'changegroup')
1951 pullop.stepsdone.add(b'changegroup')
1952 pullop.stepsdone.add(b'phases')
1952 pullop.stepsdone.add(b'phases')
1953
1953
1954 else:
1954 else:
1955 # pulling changegroup
1955 # pulling changegroup
1956 pullop.stepsdone.add(b'changegroup')
1956 pullop.stepsdone.add(b'changegroup')
1957
1957
1958 kwargs[b'cg'] = pullop.fetch
1958 kwargs[b'cg'] = pullop.fetch
1959
1959
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1962 if not legacyphase and hasbinaryphase:
1962 if not legacyphase and hasbinaryphase:
1963 kwargs[b'phases'] = True
1963 kwargs[b'phases'] = True
1964 pullop.stepsdone.add(b'phases')
1964 pullop.stepsdone.add(b'phases')
1965
1965
1966 if b'listkeys' in pullop.remotebundle2caps:
1966 if b'listkeys' in pullop.remotebundle2caps:
1967 if b'phases' not in pullop.stepsdone:
1967 if b'phases' not in pullop.stepsdone:
1968 kwargs[b'listkeys'] = [b'phases']
1968 kwargs[b'listkeys'] = [b'phases']
1969
1969
1970 bookmarksrequested = False
1970 bookmarksrequested = False
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1973
1973
1974 if pullop.remotebookmarks is not None:
1974 if pullop.remotebookmarks is not None:
1975 pullop.stepsdone.add(b'request-bookmarks')
1975 pullop.stepsdone.add(b'request-bookmarks')
1976
1976
1977 if (
1977 if (
1978 b'request-bookmarks' not in pullop.stepsdone
1978 b'request-bookmarks' not in pullop.stepsdone
1979 and pullop.remotebookmarks is None
1979 and pullop.remotebookmarks is None
1980 and not legacybookmark
1980 and not legacybookmark
1981 and hasbinarybook
1981 and hasbinarybook
1982 ):
1982 ):
1983 kwargs[b'bookmarks'] = True
1983 kwargs[b'bookmarks'] = True
1984 bookmarksrequested = True
1984 bookmarksrequested = True
1985
1985
1986 if b'listkeys' in pullop.remotebundle2caps:
1986 if b'listkeys' in pullop.remotebundle2caps:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1988 # make sure to always includes bookmark data when migrating
1988 # make sure to always includes bookmark data when migrating
1989 # `hg incoming --bundle` to using this function.
1989 # `hg incoming --bundle` to using this function.
1990 pullop.stepsdone.add(b'request-bookmarks')
1990 pullop.stepsdone.add(b'request-bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1992
1992
1993 # If this is a full pull / clone and the server supports the clone bundles
1993 # If this is a full pull / clone and the server supports the clone bundles
1994 # feature, tell the server whether we attempted a clone bundle. The
1994 # feature, tell the server whether we attempted a clone bundle. The
1995 # presence of this flag indicates the client supports clone bundles. This
1995 # presence of this flag indicates the client supports clone bundles. This
1996 # will enable the server to treat clients that support clone bundles
1996 # will enable the server to treat clients that support clone bundles
1997 # differently from those that don't.
1997 # differently from those that don't.
1998 if (
1998 if (
1999 pullop.remote.capable(b'clonebundles')
1999 pullop.remote.capable(b'clonebundles')
2000 and pullop.heads is None
2000 and pullop.heads is None
2001 and list(pullop.common) == [nullid]
2001 and list(pullop.common) == [nullid]
2002 ):
2002 ):
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2004
2004
2005 if streaming:
2005 if streaming:
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2007 elif not pullop.fetch:
2007 elif not pullop.fetch:
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2009 pullop.cgresult = 0
2009 pullop.cgresult = 0
2010 else:
2010 else:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2015 if obsolete.commonversion(remoteversions) is not None:
2015 if obsolete.commonversion(remoteversions) is not None:
2016 kwargs[b'obsmarkers'] = True
2016 kwargs[b'obsmarkers'] = True
2017 pullop.stepsdone.add(b'obsmarkers')
2017 pullop.stepsdone.add(b'obsmarkers')
2018 _pullbundle2extraprepare(pullop, kwargs)
2018 _pullbundle2extraprepare(pullop, kwargs)
2019
2019
2020 with pullop.remote.commandexecutor() as e:
2020 with pullop.remote.commandexecutor() as e:
2021 args = dict(kwargs)
2021 args = dict(kwargs)
2022 args[b'source'] = b'pull'
2022 args[b'source'] = b'pull'
2023 bundle = e.callcommand(b'getbundle', args).result()
2023 bundle = e.callcommand(b'getbundle', args).result()
2024
2024
2025 try:
2025 try:
2026 op = bundle2.bundleoperation(
2026 op = bundle2.bundleoperation(
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2028 )
2028 )
2029 op.modes[b'bookmarks'] = b'records'
2029 op.modes[b'bookmarks'] = b'records'
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2031 except bundle2.AbortFromPart as exc:
2031 except bundle2.AbortFromPart as exc:
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2034 except error.BundleValueError as exc:
2034 except error.BundleValueError as exc:
2035 raise error.Abort(_(b'missing support for %s') % exc)
2035 raise error.Abort(_(b'missing support for %s') % exc)
2036
2036
2037 if pullop.fetch:
2037 if pullop.fetch:
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2039
2039
2040 # processing phases change
2040 # processing phases change
2041 for namespace, value in op.records[b'listkeys']:
2041 for namespace, value in op.records[b'listkeys']:
2042 if namespace == b'phases':
2042 if namespace == b'phases':
2043 _pullapplyphases(pullop, value)
2043 _pullapplyphases(pullop, value)
2044
2044
2045 # processing bookmark update
2045 # processing bookmark update
2046 if bookmarksrequested:
2046 if bookmarksrequested:
2047 books = {}
2047 books = {}
2048 for record in op.records[b'bookmarks']:
2048 for record in op.records[b'bookmarks']:
2049 books[record[b'bookmark']] = record[b"node"]
2049 books[record[b'bookmark']] = record[b"node"]
2050 pullop.remotebookmarks = books
2050 pullop.remotebookmarks = books
2051 else:
2051 else:
2052 for namespace, value in op.records[b'listkeys']:
2052 for namespace, value in op.records[b'listkeys']:
2053 if namespace == b'bookmarks':
2053 if namespace == b'bookmarks':
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2055
2055
2056 # bookmark data were either already there or pulled in the bundle
2056 # bookmark data were either already there or pulled in the bundle
2057 if pullop.remotebookmarks is not None:
2057 if pullop.remotebookmarks is not None:
2058 _pullbookmarks(pullop)
2058 _pullbookmarks(pullop)
2059
2059
2060
2060
2061 def _pullbundle2extraprepare(pullop, kwargs):
2061 def _pullbundle2extraprepare(pullop, kwargs):
2062 """hook function so that extensions can extend the getbundle call"""
2062 """hook function so that extensions can extend the getbundle call"""
2063
2063
2064
2064
2065 def _pullchangeset(pullop):
2065 def _pullchangeset(pullop):
2066 """pull changeset from unbundle into the local repo"""
2066 """pull changeset from unbundle into the local repo"""
2067 # We delay the open of the transaction as late as possible so we
2067 # We delay the open of the transaction as late as possible so we
2068 # don't open transaction for nothing or you break future useful
2068 # don't open transaction for nothing or you break future useful
2069 # rollback call
2069 # rollback call
2070 if b'changegroup' in pullop.stepsdone:
2070 if b'changegroup' in pullop.stepsdone:
2071 return
2071 return
2072 pullop.stepsdone.add(b'changegroup')
2072 pullop.stepsdone.add(b'changegroup')
2073 if not pullop.fetch:
2073 if not pullop.fetch:
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2075 pullop.cgresult = 0
2075 pullop.cgresult = 0
2076 return
2076 return
2077 tr = pullop.gettransaction()
2077 tr = pullop.gettransaction()
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2081 # issue1320, avoid a race if remote changed after discovery
2081 # issue1320, avoid a race if remote changed after discovery
2082 pullop.heads = pullop.rheads
2082 pullop.heads = pullop.rheads
2083
2083
2084 if pullop.remote.capable(b'getbundle'):
2084 if pullop.remote.capable(b'getbundle'):
2085 # TODO: get bundlecaps from remote
2085 # TODO: get bundlecaps from remote
2086 cg = pullop.remote.getbundle(
2086 cg = pullop.remote.getbundle(
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2088 )
2088 )
2089 elif pullop.heads is None:
2089 elif pullop.heads is None:
2090 with pullop.remote.commandexecutor() as e:
2090 with pullop.remote.commandexecutor() as e:
2091 cg = e.callcommand(
2091 cg = e.callcommand(
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2093 ).result()
2093 ).result()
2094
2094
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2096 raise error.Abort(
2096 raise error.Abort(
2097 _(
2097 _(
2098 b"partial pull cannot be done because "
2098 b"partial pull cannot be done because "
2099 b"other repository doesn't support "
2099 b"other repository doesn't support "
2100 b"changegroupsubset."
2100 b"changegroupsubset."
2101 )
2101 )
2102 )
2102 )
2103 else:
2103 else:
2104 with pullop.remote.commandexecutor() as e:
2104 with pullop.remote.commandexecutor() as e:
2105 cg = e.callcommand(
2105 cg = e.callcommand(
2106 b'changegroupsubset',
2106 b'changegroupsubset',
2107 {
2107 {
2108 b'bases': pullop.fetch,
2108 b'bases': pullop.fetch,
2109 b'heads': pullop.heads,
2109 b'heads': pullop.heads,
2110 b'source': b'pull',
2110 b'source': b'pull',
2111 },
2111 },
2112 ).result()
2112 ).result()
2113
2113
2114 bundleop = bundle2.applybundle(
2114 bundleop = bundle2.applybundle(
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2116 )
2116 )
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2118
2118
2119
2119
2120 def _pullphase(pullop):
2120 def _pullphase(pullop):
2121 # Get remote phases data from remote
2121 # Get remote phases data from remote
2122 if b'phases' in pullop.stepsdone:
2122 if b'phases' in pullop.stepsdone:
2123 return
2123 return
2124 remotephases = listkeys(pullop.remote, b'phases')
2124 remotephases = listkeys(pullop.remote, b'phases')
2125 _pullapplyphases(pullop, remotephases)
2125 _pullapplyphases(pullop, remotephases)
2126
2126
2127
2127
2128 def _pullapplyphases(pullop, remotephases):
2128 def _pullapplyphases(pullop, remotephases):
2129 """apply phase movement from observed remote state"""
2129 """apply phase movement from observed remote state"""
2130 if b'phases' in pullop.stepsdone:
2130 if b'phases' in pullop.stepsdone:
2131 return
2131 return
2132 pullop.stepsdone.add(b'phases')
2132 pullop.stepsdone.add(b'phases')
2133 publishing = bool(remotephases.get(b'publishing', False))
2133 publishing = bool(remotephases.get(b'publishing', False))
2134 if remotephases and not publishing:
2134 if remotephases and not publishing:
2135 # remote is new and non-publishing
2135 # remote is new and non-publishing
2136 pheads, _dr = phases.analyzeremotephases(
2136 pheads, _dr = phases.analyzeremotephases(
2137 pullop.repo, pullop.pulledsubset, remotephases
2137 pullop.repo, pullop.pulledsubset, remotephases
2138 )
2138 )
2139 dheads = pullop.pulledsubset
2139 dheads = pullop.pulledsubset
2140 else:
2140 else:
2141 # Remote is old or publishing all common changesets
2141 # Remote is old or publishing all common changesets
2142 # should be seen as public
2142 # should be seen as public
2143 pheads = pullop.pulledsubset
2143 pheads = pullop.pulledsubset
2144 dheads = []
2144 dheads = []
2145 unfi = pullop.repo.unfiltered()
2145 unfi = pullop.repo.unfiltered()
2146 phase = unfi._phasecache.phase
2146 phase = unfi._phasecache.phase
2147 rev = unfi.changelog.index.get_rev
2147 rev = unfi.changelog.index.get_rev
2148 public = phases.public
2148 public = phases.public
2149 draft = phases.draft
2149 draft = phases.draft
2150
2150
2151 # exclude changesets already public locally and update the others
2151 # exclude changesets already public locally and update the others
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2153 if pheads:
2153 if pheads:
2154 tr = pullop.gettransaction()
2154 tr = pullop.gettransaction()
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2156
2156
2157 # exclude changesets already draft locally and update the others
2157 # exclude changesets already draft locally and update the others
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2159 if dheads:
2159 if dheads:
2160 tr = pullop.gettransaction()
2160 tr = pullop.gettransaction()
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2162
2162
2163
2163
2164 def _pullbookmarks(pullop):
2164 def _pullbookmarks(pullop):
2165 """process the remote bookmark information to update the local one"""
2165 """process the remote bookmark information to update the local one"""
2166 if b'bookmarks' in pullop.stepsdone:
2166 if b'bookmarks' in pullop.stepsdone:
2167 return
2167 return
2168 pullop.stepsdone.add(b'bookmarks')
2168 pullop.stepsdone.add(b'bookmarks')
2169 repo = pullop.repo
2169 repo = pullop.repo
2170 remotebookmarks = pullop.remotebookmarks
2170 remotebookmarks = pullop.remotebookmarks
2171 bookmod.updatefromremote(
2171 bookmod.updatefromremote(
2172 repo.ui,
2172 repo.ui,
2173 repo,
2173 repo,
2174 remotebookmarks,
2174 remotebookmarks,
2175 pullop.remote.url(),
2175 pullop.remote.url(),
2176 pullop.gettransaction,
2176 pullop.gettransaction,
2177 explicit=pullop.explicitbookmarks,
2177 explicit=pullop.explicitbookmarks,
2178 )
2178 )
2179
2179
2180
2180
2181 def _pullobsolete(pullop):
2181 def _pullobsolete(pullop):
2182 """utility function to pull obsolete markers from a remote
2182 """utility function to pull obsolete markers from a remote
2183
2183
2184 The `gettransaction` is function that return the pull transaction, creating
2184 The `gettransaction` is function that return the pull transaction, creating
2185 one if necessary. We return the transaction to inform the calling code that
2185 one if necessary. We return the transaction to inform the calling code that
2186 a new transaction have been created (when applicable).
2186 a new transaction have been created (when applicable).
2187
2187
2188 Exists mostly to allow overriding for experimentation purpose"""
2188 Exists mostly to allow overriding for experimentation purpose"""
2189 if b'obsmarkers' in pullop.stepsdone:
2189 if b'obsmarkers' in pullop.stepsdone:
2190 return
2190 return
2191 pullop.stepsdone.add(b'obsmarkers')
2191 pullop.stepsdone.add(b'obsmarkers')
2192 tr = None
2192 tr = None
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2196 if b'dump0' in remoteobs:
2196 if b'dump0' in remoteobs:
2197 tr = pullop.gettransaction()
2197 tr = pullop.gettransaction()
2198 markers = []
2198 markers = []
2199 for key in sorted(remoteobs, reverse=True):
2199 for key in sorted(remoteobs, reverse=True):
2200 if key.startswith(b'dump'):
2200 if key.startswith(b'dump'):
2201 data = util.b85decode(remoteobs[key])
2201 data = util.b85decode(remoteobs[key])
2202 version, newmarks = obsolete._readmarkers(data)
2202 version, newmarks = obsolete._readmarkers(data)
2203 markers += newmarks
2203 markers += newmarks
2204 if markers:
2204 if markers:
2205 pullop.repo.obsstore.add(tr, markers)
2205 pullop.repo.obsstore.add(tr, markers)
2206 pullop.repo.invalidatevolatilesets()
2206 pullop.repo.invalidatevolatilesets()
2207 return tr
2207 return tr
2208
2208
2209
2209
2210 def applynarrowacl(repo, kwargs):
2210 def applynarrowacl(repo, kwargs):
2211 """Apply narrow fetch access control.
2211 """Apply narrow fetch access control.
2212
2212
2213 This massages the named arguments for getbundle wire protocol commands
2213 This massages the named arguments for getbundle wire protocol commands
2214 so requested data is filtered through access control rules.
2214 so requested data is filtered through access control rules.
2215 """
2215 """
2216 ui = repo.ui
2216 ui = repo.ui
2217 # TODO this assumes existence of HTTP and is a layering violation.
2217 # TODO this assumes existence of HTTP and is a layering violation.
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2219 user_includes = ui.configlist(
2219 user_includes = ui.configlist(
2220 _NARROWACL_SECTION,
2220 _NARROWACL_SECTION,
2221 username + b'.includes',
2221 username + b'.includes',
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2223 )
2223 )
2224 user_excludes = ui.configlist(
2224 user_excludes = ui.configlist(
2225 _NARROWACL_SECTION,
2225 _NARROWACL_SECTION,
2226 username + b'.excludes',
2226 username + b'.excludes',
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2228 )
2228 )
2229 if not user_includes:
2229 if not user_includes:
2230 raise error.Abort(
2230 raise error.Abort(
2231 _(b"%s configuration for user %s is empty")
2231 _(b"%s configuration for user %s is empty")
2232 % (_NARROWACL_SECTION, username)
2232 % (_NARROWACL_SECTION, username)
2233 )
2233 )
2234
2234
2235 user_includes = [
2235 user_includes = [
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2237 ]
2237 ]
2238 user_excludes = [
2238 user_excludes = [
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2240 ]
2240 ]
2241
2241
2242 req_includes = set(kwargs.get('includepats', []))
2242 req_includes = set(kwargs.get('includepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2244
2244
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2246 req_includes, req_excludes, user_includes, user_excludes
2246 req_includes, req_excludes, user_includes, user_excludes
2247 )
2247 )
2248
2248
2249 if invalid_includes:
2249 if invalid_includes:
2250 raise error.Abort(
2250 raise error.Abort(
2251 _(b"The following includes are not accessible for %s: %s")
2251 _(b"The following includes are not accessible for %s: %s")
2252 % (username, stringutil.pprint(invalid_includes))
2252 % (username, stringutil.pprint(invalid_includes))
2253 )
2253 )
2254
2254
2255 new_args = {}
2255 new_args = {}
2256 new_args.update(kwargs)
2256 new_args.update(kwargs)
2257 new_args['narrow'] = True
2257 new_args['narrow'] = True
2258 new_args['narrow_acl'] = True
2258 new_args['narrow_acl'] = True
2259 new_args['includepats'] = req_includes
2259 new_args['includepats'] = req_includes
2260 if req_excludes:
2260 if req_excludes:
2261 new_args['excludepats'] = req_excludes
2261 new_args['excludepats'] = req_excludes
2262
2262
2263 return new_args
2263 return new_args
2264
2264
2265
2265
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2267 """Compute the shape of a narrowed DAG.
2267 """Compute the shape of a narrowed DAG.
2268
2268
2269 Args:
2269 Args:
2270 repo: The repository we're transferring.
2270 repo: The repository we're transferring.
2271 common: The roots of the DAG range we're transferring.
2271 common: The roots of the DAG range we're transferring.
2272 May be just [nullid], which means all ancestors of heads.
2272 May be just [nullid], which means all ancestors of heads.
2273 heads: The heads of the DAG range we're transferring.
2273 heads: The heads of the DAG range we're transferring.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2275 depth: If not None, only consider nodes to be full nodes if they are at
2275 depth: If not None, only consider nodes to be full nodes if they are at
2276 most depth changesets away from one of heads.
2276 most depth changesets away from one of heads.
2277
2277
2278 Returns:
2278 Returns:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2280
2280
2281 visitnodes: The list of nodes (either full or ellipsis) which
2281 visitnodes: The list of nodes (either full or ellipsis) which
2282 need to be sent to the client.
2282 need to be sent to the client.
2283 relevant_nodes: The set of changelog nodes which change a file inside
2283 relevant_nodes: The set of changelog nodes which change a file inside
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2285 ellipsisroots: A dict of {rev: parents} that is used in
2285 ellipsisroots: A dict of {rev: parents} that is used in
2286 narrowchangegroup to produce ellipsis nodes with the
2286 narrowchangegroup to produce ellipsis nodes with the
2287 correct parents.
2287 correct parents.
2288 """
2288 """
2289 cl = repo.changelog
2289 cl = repo.changelog
2290 mfl = repo.manifestlog
2290 mfl = repo.manifestlog
2291
2291
2292 clrev = cl.rev
2292 clrev = cl.rev
2293
2293
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2295 headsrevs = {clrev(n) for n in heads}
2295 headsrevs = {clrev(n) for n in heads}
2296
2296
2297 if depth:
2297 if depth:
2298 revdepth = {h: 0 for h in headsrevs}
2298 revdepth = {h: 0 for h in headsrevs}
2299
2299
2300 ellipsisheads = collections.defaultdict(set)
2300 ellipsisheads = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2302
2302
2303 def addroot(head, curchange):
2303 def addroot(head, curchange):
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2305 ellipsisroots[head].add(curchange)
2305 ellipsisroots[head].add(curchange)
2306 # Recursively split ellipsis heads with 3 roots by finding the
2306 # Recursively split ellipsis heads with 3 roots by finding the
2307 # roots' youngest common descendant which is an elided merge commit.
2307 # roots' youngest common descendant which is an elided merge commit.
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2309 # root of the head.
2309 # root of the head.
2310 while len(ellipsisroots[head]) > 2:
2310 while len(ellipsisroots[head]) > 2:
2311 child, roots = splithead(head)
2311 child, roots = splithead(head)
2312 splitroots(head, child, roots)
2312 splitroots(head, child, roots)
2313 head = child # Recurse in case we just added a 3rd root
2313 head = child # Recurse in case we just added a 3rd root
2314
2314
2315 def splitroots(head, child, roots):
2315 def splitroots(head, child, roots):
2316 ellipsisroots[head].difference_update(roots)
2316 ellipsisroots[head].difference_update(roots)
2317 ellipsisroots[head].add(child)
2317 ellipsisroots[head].add(child)
2318 ellipsisroots[child].update(roots)
2318 ellipsisroots[child].update(roots)
2319 ellipsisroots[child].discard(child)
2319 ellipsisroots[child].discard(child)
2320
2320
2321 def splithead(head):
2321 def splithead(head):
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2324 mid = repo.revs(
2324 mid = repo.revs(
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2326 )
2326 )
2327 for j in mid:
2327 for j in mid:
2328 if j == nr2:
2328 if j == nr2:
2329 return nr2, (nr1, nr2)
2329 return nr2, (nr1, nr2)
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2331 return j, (nr1, nr2)
2331 return j, (nr1, nr2)
2332 raise error.Abort(
2332 raise error.Abort(
2333 _(
2333 _(
2334 b'Failed to split up ellipsis node! head: %d, '
2334 b'Failed to split up ellipsis node! head: %d, '
2335 b'roots: %d %d %d'
2335 b'roots: %d %d %d'
2336 )
2336 )
2337 % (head, r1, r2, r3)
2337 % (head, r1, r2, r3)
2338 )
2338 )
2339
2339
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2341 visit = reversed(missing)
2341 visit = reversed(missing)
2342 relevant_nodes = set()
2342 relevant_nodes = set()
2343 visitnodes = [cl.node(m) for m in missing]
2343 visitnodes = [cl.node(m) for m in missing]
2344 required = set(headsrevs) | known
2344 required = set(headsrevs) | known
2345 for rev in visit:
2345 for rev in visit:
2346 clrev = cl.changelogrevision(rev)
2346 clrev = cl.changelogrevision(rev)
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2348 if depth is not None:
2348 if depth is not None:
2349 curdepth = revdepth[rev]
2349 curdepth = revdepth[rev]
2350 for p in ps:
2350 for p in ps:
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2352 needed = False
2352 needed = False
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2354 if shallow_enough:
2354 if shallow_enough:
2355 curmf = mfl[clrev.manifest].read()
2355 curmf = mfl[clrev.manifest].read()
2356 if ps:
2356 if ps:
2357 # We choose to not trust the changed files list in
2357 # We choose to not trust the changed files list in
2358 # changesets because it's not always correct. TODO: could
2358 # changesets because it's not always correct. TODO: could
2359 # we trust it for the non-merge case?
2359 # we trust it for the non-merge case?
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2361 needed = bool(curmf.diff(p1mf, match))
2361 needed = bool(curmf.diff(p1mf, match))
2362 if not needed and len(ps) > 1:
2362 if not needed and len(ps) > 1:
2363 # For merge changes, the list of changed files is not
2363 # For merge changes, the list of changed files is not
2364 # helpful, since we need to emit the merge if a file
2364 # helpful, since we need to emit the merge if a file
2365 # in the narrow spec has changed on either side of the
2365 # in the narrow spec has changed on either side of the
2366 # merge. As a result, we do a manifest diff to check.
2366 # merge. As a result, we do a manifest diff to check.
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2368 needed = bool(curmf.diff(p2mf, match))
2368 needed = bool(curmf.diff(p2mf, match))
2369 else:
2369 else:
2370 # For a root node, we need to include the node if any
2370 # For a root node, we need to include the node if any
2371 # files in the node match the narrowspec.
2371 # files in the node match the narrowspec.
2372 needed = any(curmf.walk(match))
2372 needed = any(curmf.walk(match))
2373
2373
2374 if needed:
2374 if needed:
2375 for head in ellipsisheads[rev]:
2375 for head in ellipsisheads[rev]:
2376 addroot(head, rev)
2376 addroot(head, rev)
2377 for p in ps:
2377 for p in ps:
2378 required.add(p)
2378 required.add(p)
2379 relevant_nodes.add(cl.node(rev))
2379 relevant_nodes.add(cl.node(rev))
2380 else:
2380 else:
2381 if not ps:
2381 if not ps:
2382 ps = [nullrev]
2382 ps = [nullrev]
2383 if rev in required:
2383 if rev in required:
2384 for head in ellipsisheads[rev]:
2384 for head in ellipsisheads[rev]:
2385 addroot(head, rev)
2385 addroot(head, rev)
2386 for p in ps:
2386 for p in ps:
2387 ellipsisheads[p].add(rev)
2387 ellipsisheads[p].add(rev)
2388 else:
2388 else:
2389 for p in ps:
2389 for p in ps:
2390 ellipsisheads[p] |= ellipsisheads[rev]
2390 ellipsisheads[p] |= ellipsisheads[rev]
2391
2391
2392 # add common changesets as roots of their reachable ellipsis heads
2392 # add common changesets as roots of their reachable ellipsis heads
2393 for c in commonrevs:
2393 for c in commonrevs:
2394 for head in ellipsisheads[c]:
2394 for head in ellipsisheads[c]:
2395 addroot(head, c)
2395 addroot(head, c)
2396 return visitnodes, relevant_nodes, ellipsisroots
2396 return visitnodes, relevant_nodes, ellipsisroots
2397
2397
2398
2398
2399 def caps20to10(repo, role):
2399 def caps20to10(repo, role):
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2401 caps = {b'HG20'}
2401 caps = {b'HG20'}
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2404 return caps
2404 return caps
2405
2405
2406
2406
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2408 getbundle2partsorder = []
2408 getbundle2partsorder = []
2409
2409
2410 # Mapping between step name and function
2410 # Mapping between step name and function
2411 #
2411 #
2412 # This exists to help extensions wrap steps if necessary
2412 # This exists to help extensions wrap steps if necessary
2413 getbundle2partsmapping = {}
2413 getbundle2partsmapping = {}
2414
2414
2415
2415
2416 def getbundle2partsgenerator(stepname, idx=None):
2416 def getbundle2partsgenerator(stepname, idx=None):
2417 """decorator for function generating bundle2 part for getbundle
2417 """decorator for function generating bundle2 part for getbundle
2418
2418
2419 The function is added to the step -> function mapping and appended to the
2419 The function is added to the step -> function mapping and appended to the
2420 list of steps. Beware that decorated functions will be added in order
2420 list of steps. Beware that decorated functions will be added in order
2421 (this may matter).
2421 (this may matter).
2422
2422
2423 You can only use this decorator for new steps, if you want to wrap a step
2423 You can only use this decorator for new steps, if you want to wrap a step
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2425
2425
2426 def dec(func):
2426 def dec(func):
2427 assert stepname not in getbundle2partsmapping
2427 assert stepname not in getbundle2partsmapping
2428 getbundle2partsmapping[stepname] = func
2428 getbundle2partsmapping[stepname] = func
2429 if idx is None:
2429 if idx is None:
2430 getbundle2partsorder.append(stepname)
2430 getbundle2partsorder.append(stepname)
2431 else:
2431 else:
2432 getbundle2partsorder.insert(idx, stepname)
2432 getbundle2partsorder.insert(idx, stepname)
2433 return func
2433 return func
2434
2434
2435 return dec
2435 return dec
2436
2436
2437
2437
2438 def bundle2requested(bundlecaps):
2438 def bundle2requested(bundlecaps):
2439 if bundlecaps is not None:
2439 if bundlecaps is not None:
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2441 return False
2441 return False
2442
2442
2443
2443
2444 def getbundlechunks(
2444 def getbundlechunks(
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2446 ):
2446 ):
2447 """Return chunks constituting a bundle's raw data.
2447 """Return chunks constituting a bundle's raw data.
2448
2448
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2450 passed.
2450 passed.
2451
2451
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2453 and an iterator over raw chunks (of varying sizes).
2453 and an iterator over raw chunks (of varying sizes).
2454 """
2454 """
2455 kwargs = pycompat.byteskwargs(kwargs)
2455 kwargs = pycompat.byteskwargs(kwargs)
2456 info = {}
2456 info = {}
2457 usebundle2 = bundle2requested(bundlecaps)
2457 usebundle2 = bundle2requested(bundlecaps)
2458 # bundle10 case
2458 # bundle10 case
2459 if not usebundle2:
2459 if not usebundle2:
2460 if bundlecaps and not kwargs.get(b'cg', True):
2460 if bundlecaps and not kwargs.get(b'cg', True):
2461 raise ValueError(
2461 raise ValueError(
2462 _(b'request for bundle10 must include changegroup')
2462 _(b'request for bundle10 must include changegroup')
2463 )
2463 )
2464
2464
2465 if kwargs:
2465 if kwargs:
2466 raise ValueError(
2466 raise ValueError(
2467 _(b'unsupported getbundle arguments: %s')
2467 _(b'unsupported getbundle arguments: %s')
2468 % b', '.join(sorted(kwargs.keys()))
2468 % b', '.join(sorted(kwargs.keys()))
2469 )
2469 )
2470 outgoing = _computeoutgoing(repo, heads, common)
2470 outgoing = _computeoutgoing(repo, heads, common)
2471 info[b'bundleversion'] = 1
2471 info[b'bundleversion'] = 1
2472 return (
2472 return (
2473 info,
2473 info,
2474 changegroup.makestream(
2474 changegroup.makestream(
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2476 ),
2476 ),
2477 )
2477 )
2478
2478
2479 # bundle20 case
2479 # bundle20 case
2480 info[b'bundleversion'] = 2
2480 info[b'bundleversion'] = 2
2481 b2caps = {}
2481 b2caps = {}
2482 for bcaps in bundlecaps:
2482 for bcaps in bundlecaps:
2483 if bcaps.startswith(b'bundle2='):
2483 if bcaps.startswith(b'bundle2='):
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2485 b2caps.update(bundle2.decodecaps(blob))
2485 b2caps.update(bundle2.decodecaps(blob))
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2487
2487
2488 kwargs[b'heads'] = heads
2488 kwargs[b'heads'] = heads
2489 kwargs[b'common'] = common
2489 kwargs[b'common'] = common
2490
2490
2491 for name in getbundle2partsorder:
2491 for name in getbundle2partsorder:
2492 func = getbundle2partsmapping[name]
2492 func = getbundle2partsmapping[name]
2493 func(
2493 func(
2494 bundler,
2494 bundler,
2495 repo,
2495 repo,
2496 source,
2496 source,
2497 bundlecaps=bundlecaps,
2497 bundlecaps=bundlecaps,
2498 b2caps=b2caps,
2498 b2caps=b2caps,
2499 **pycompat.strkwargs(kwargs)
2499 **pycompat.strkwargs(kwargs)
2500 )
2500 )
2501
2501
2502 info[b'prefercompressed'] = bundler.prefercompressed
2502 info[b'prefercompressed'] = bundler.prefercompressed
2503
2503
2504 return info, bundler.getchunks()
2504 return info, bundler.getchunks()
2505
2505
2506
2506
2507 @getbundle2partsgenerator(b'stream2')
2507 @getbundle2partsgenerator(b'stream2')
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2510
2510
2511
2511
2512 @getbundle2partsgenerator(b'changegroup')
2512 @getbundle2partsgenerator(b'changegroup')
2513 def _getbundlechangegrouppart(
2513 def _getbundlechangegrouppart(
2514 bundler,
2514 bundler,
2515 repo,
2515 repo,
2516 source,
2516 source,
2517 bundlecaps=None,
2517 bundlecaps=None,
2518 b2caps=None,
2518 b2caps=None,
2519 heads=None,
2519 heads=None,
2520 common=None,
2520 common=None,
2521 **kwargs
2521 **kwargs
2522 ):
2522 ):
2523 """add a changegroup part to the requested bundle"""
2523 """add a changegroup part to the requested bundle"""
2524 if not kwargs.get('cg', True) or not b2caps:
2524 if not kwargs.get('cg', True) or not b2caps:
2525 return
2525 return
2526
2526
2527 version = b'01'
2527 version = b'01'
2528 cgversions = b2caps.get(b'changegroup')
2528 cgversions = b2caps.get(b'changegroup')
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2530 cgversions = [
2530 cgversions = [
2531 v
2531 v
2532 for v in cgversions
2532 for v in cgversions
2533 if v in changegroup.supportedoutgoingversions(repo)
2533 if v in changegroup.supportedoutgoingversions(repo)
2534 ]
2534 ]
2535 if not cgversions:
2535 if not cgversions:
2536 raise error.Abort(_(b'no common changegroup version'))
2536 raise error.Abort(_(b'no common changegroup version'))
2537 version = max(cgversions)
2537 version = max(cgversions)
2538
2538
2539 outgoing = _computeoutgoing(repo, heads, common)
2539 outgoing = _computeoutgoing(repo, heads, common)
2540 if not outgoing.missing:
2540 if not outgoing.missing:
2541 return
2541 return
2542
2542
2543 if kwargs.get('narrow', False):
2543 if kwargs.get('narrow', False):
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2547 else:
2547 else:
2548 matcher = None
2548 matcher = None
2549
2549
2550 cgstream = changegroup.makestream(
2550 cgstream = changegroup.makestream(
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2552 )
2552 )
2553
2553
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2555 if cgversions:
2555 if cgversions:
2556 part.addparam(b'version', version)
2556 part.addparam(b'version', version)
2557
2557
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2559
2559
2560 if b'treemanifest' in repo.requirements:
2560 if b'treemanifest' in repo.requirements:
2561 part.addparam(b'treemanifest', b'1')
2561 part.addparam(b'treemanifest', b'1')
2562
2562
2563 if b'exp-sidedata-flag' in repo.requirements:
2563 if b'exp-sidedata-flag' in repo.requirements:
2564 part.addparam(b'exp-sidedata', b'1')
2564 part.addparam(b'exp-sidedata', b'1')
2565
2565
2566 if (
2566 if (
2567 kwargs.get('narrow', False)
2567 kwargs.get('narrow', False)
2568 and kwargs.get('narrow_acl', False)
2568 and kwargs.get('narrow_acl', False)
2569 and (include or exclude)
2569 and (include or exclude)
2570 ):
2570 ):
2571 # this is mandatory because otherwise ACL clients won't work
2571 # this is mandatory because otherwise ACL clients won't work
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2573 narrowspecpart.data = b'%s\0%s' % (
2573 narrowspecpart.data = b'%s\0%s' % (
2574 b'\n'.join(include),
2574 b'\n'.join(include),
2575 b'\n'.join(exclude),
2575 b'\n'.join(exclude),
2576 )
2576 )
2577
2577
2578
2578
2579 @getbundle2partsgenerator(b'bookmarks')
2579 @getbundle2partsgenerator(b'bookmarks')
2580 def _getbundlebookmarkpart(
2580 def _getbundlebookmarkpart(
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2582 ):
2582 ):
2583 """add a bookmark part to the requested bundle"""
2583 """add a bookmark part to the requested bundle"""
2584 if not kwargs.get('bookmarks', False):
2584 if not kwargs.get('bookmarks', False):
2585 return
2585 return
2586 if not b2caps or b'bookmarks' not in b2caps:
2586 if not b2caps or b'bookmarks' not in b2caps:
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2588 books = bookmod.listbinbookmarks(repo)
2588 books = bookmod.listbinbookmarks(repo)
2589 data = bookmod.binaryencode(books)
2589 data = bookmod.binaryencode(books)
2590 if data:
2590 if data:
2591 bundler.newpart(b'bookmarks', data=data)
2591 bundler.newpart(b'bookmarks', data=data)
2592
2592
2593
2593
2594 @getbundle2partsgenerator(b'listkeys')
2594 @getbundle2partsgenerator(b'listkeys')
2595 def _getbundlelistkeysparts(
2595 def _getbundlelistkeysparts(
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2597 ):
2597 ):
2598 """add parts containing listkeys namespaces to the requested bundle"""
2598 """add parts containing listkeys namespaces to the requested bundle"""
2599 listkeys = kwargs.get('listkeys', ())
2599 listkeys = kwargs.get('listkeys', ())
2600 for namespace in listkeys:
2600 for namespace in listkeys:
2601 part = bundler.newpart(b'listkeys')
2601 part = bundler.newpart(b'listkeys')
2602 part.addparam(b'namespace', namespace)
2602 part.addparam(b'namespace', namespace)
2603 keys = repo.listkeys(namespace).items()
2603 keys = repo.listkeys(namespace).items()
2604 part.data = pushkey.encodekeys(keys)
2604 part.data = pushkey.encodekeys(keys)
2605
2605
2606
2606
2607 @getbundle2partsgenerator(b'obsmarkers')
2607 @getbundle2partsgenerator(b'obsmarkers')
2608 def _getbundleobsmarkerpart(
2608 def _getbundleobsmarkerpart(
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2610 ):
2610 ):
2611 """add an obsolescence markers part to the requested bundle"""
2611 """add an obsolescence markers part to the requested bundle"""
2612 if kwargs.get('obsmarkers', False):
2612 if kwargs.get('obsmarkers', False):
2613 if heads is None:
2613 if heads is None:
2614 heads = repo.heads()
2614 heads = repo.heads()
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2616 markers = repo.obsstore.relevantmarkers(subset)
2616 markers = repo.obsstore.relevantmarkers(subset)
2617 markers = obsutil.sortedmarkers(markers)
2617 markers = obsutil.sortedmarkers(markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2619
2619
2620
2620
2621 @getbundle2partsgenerator(b'phases')
2621 @getbundle2partsgenerator(b'phases')
2622 def _getbundlephasespart(
2622 def _getbundlephasespart(
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2624 ):
2624 ):
2625 """add phase heads part to the requested bundle"""
2625 """add phase heads part to the requested bundle"""
2626 if kwargs.get('phases', False):
2626 if kwargs.get('phases', False):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2628 raise error.Abort(_(b'no common phases exchange method'))
2628 raise error.Abort(_(b'no common phases exchange method'))
2629 if heads is None:
2629 if heads is None:
2630 heads = repo.heads()
2630 heads = repo.heads()
2631
2631
2632 headsbyphase = collections.defaultdict(set)
2632 headsbyphase = collections.defaultdict(set)
2633 if repo.publishing():
2633 if repo.publishing():
2634 headsbyphase[phases.public] = heads
2634 headsbyphase[phases.public] = heads
2635 else:
2635 else:
2636 # find the appropriate heads to move
2636 # find the appropriate heads to move
2637
2637
2638 phase = repo._phasecache.phase
2638 phase = repo._phasecache.phase
2639 node = repo.changelog.node
2639 node = repo.changelog.node
2640 rev = repo.changelog.rev
2640 rev = repo.changelog.rev
2641 for h in heads:
2641 for h in heads:
2642 headsbyphase[phase(repo, rev(h))].add(h)
2642 headsbyphase[phase(repo, rev(h))].add(h)
2643 seenphases = list(headsbyphase.keys())
2643 seenphases = list(headsbyphase.keys())
2644
2644
2645 # We do not handle anything but public and draft phase for now)
2645 # We do not handle anything but public and draft phase for now)
2646 if seenphases:
2646 if seenphases:
2647 assert max(seenphases) <= phases.draft
2647 assert max(seenphases) <= phases.draft
2648
2648
2649 # if client is pulling non-public changesets, we need to find
2649 # if client is pulling non-public changesets, we need to find
2650 # intermediate public heads.
2650 # intermediate public heads.
2651 draftheads = headsbyphase.get(phases.draft, set())
2651 draftheads = headsbyphase.get(phases.draft, set())
2652 if draftheads:
2652 if draftheads:
2653 publicheads = headsbyphase.get(phases.public, set())
2653 publicheads = headsbyphase.get(phases.public, set())
2654
2654
2655 revset = b'heads(only(%ln, %ln) and public())'
2655 revset = b'heads(only(%ln, %ln) and public())'
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2657 for r in extraheads:
2657 for r in extraheads:
2658 headsbyphase[phases.public].add(node(r))
2658 headsbyphase[phases.public].add(node(r))
2659
2659
2660 # transform data in a format used by the encoding function
2660 # transform data in a format used by the encoding function
2661 phasemapping = []
2661 phasemapping = []
2662 for phase in phases.allphases:
2662 for phase in phases.allphases:
2663 phasemapping.append(sorted(headsbyphase[phase]))
2663 phasemapping.append(sorted(headsbyphase[phase]))
2664
2664
2665 # generate the actual part
2665 # generate the actual part
2666 phasedata = phases.binaryencode(phasemapping)
2666 phasedata = phases.binaryencode(phasemapping)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2668
2668
2669
2669
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2671 def _getbundletagsfnodes(
2671 def _getbundletagsfnodes(
2672 bundler,
2672 bundler,
2673 repo,
2673 repo,
2674 source,
2674 source,
2675 bundlecaps=None,
2675 bundlecaps=None,
2676 b2caps=None,
2676 b2caps=None,
2677 heads=None,
2677 heads=None,
2678 common=None,
2678 common=None,
2679 **kwargs
2679 **kwargs
2680 ):
2680 ):
2681 """Transfer the .hgtags filenodes mapping.
2681 """Transfer the .hgtags filenodes mapping.
2682
2682
2683 Only values for heads in this bundle will be transferred.
2683 Only values for heads in this bundle will be transferred.
2684
2684
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2686 filenodes raw values.
2686 filenodes raw values.
2687 """
2687 """
2688 # Don't send unless:
2688 # Don't send unless:
2689 # - changeset are being exchanged,
2689 # - changeset are being exchanged,
2690 # - the client supports it.
2690 # - the client supports it.
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2692 return
2692 return
2693
2693
2694 outgoing = _computeoutgoing(repo, heads, common)
2694 outgoing = _computeoutgoing(repo, heads, common)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2696
2696
2697
2697
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2699 def _getbundlerevbranchcache(
2699 def _getbundlerevbranchcache(
2700 bundler,
2700 bundler,
2701 repo,
2701 repo,
2702 source,
2702 source,
2703 bundlecaps=None,
2703 bundlecaps=None,
2704 b2caps=None,
2704 b2caps=None,
2705 heads=None,
2705 heads=None,
2706 common=None,
2706 common=None,
2707 **kwargs
2707 **kwargs
2708 ):
2708 ):
2709 """Transfer the rev-branch-cache mapping
2709 """Transfer the rev-branch-cache mapping
2710
2710
2711 The payload is a series of data related to each branch
2711 The payload is a series of data related to each branch
2712
2712
2713 1) branch name length
2713 1) branch name length
2714 2) number of open heads
2714 2) number of open heads
2715 3) number of closed heads
2715 3) number of closed heads
2716 4) open heads nodes
2716 4) open heads nodes
2717 5) closed heads nodes
2717 5) closed heads nodes
2718 """
2718 """
2719 # Don't send unless:
2719 # Don't send unless:
2720 # - changeset are being exchanged,
2720 # - changeset are being exchanged,
2721 # - the client supports it.
2721 # - the client supports it.
2722 # - narrow bundle isn't in play (not currently compatible).
2722 # - narrow bundle isn't in play (not currently compatible).
2723 if (
2723 if (
2724 not kwargs.get('cg', True)
2724 not kwargs.get('cg', True)
2725 or not b2caps
2725 or not b2caps
2726 or b'rev-branch-cache' not in b2caps
2726 or b'rev-branch-cache' not in b2caps
2727 or kwargs.get('narrow', False)
2727 or kwargs.get('narrow', False)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2729 ):
2729 ):
2730 return
2730 return
2731
2731
2732 outgoing = _computeoutgoing(repo, heads, common)
2732 outgoing = _computeoutgoing(repo, heads, common)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2734
2734
2735
2735
2736 def check_heads(repo, their_heads, context):
2736 def check_heads(repo, their_heads, context):
2737 """check if the heads of a repo have been modified
2737 """check if the heads of a repo have been modified
2738
2738
2739 Used by peer for unbundling.
2739 Used by peer for unbundling.
2740 """
2740 """
2741 heads = repo.heads()
2741 heads = repo.heads()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2743 if not (
2743 if not (
2744 their_heads == [b'force']
2744 their_heads == [b'force']
2745 or their_heads == heads
2745 or their_heads == heads
2746 or their_heads == [b'hashed', heads_hash]
2746 or their_heads == [b'hashed', heads_hash]
2747 ):
2747 ):
2748 # someone else committed/pushed/unbundled while we
2748 # someone else committed/pushed/unbundled while we
2749 # were transferring data
2749 # were transferring data
2750 raise error.PushRaced(
2750 raise error.PushRaced(
2751 b'repository changed while %s - please try again' % context
2751 b'repository changed while %s - please try again' % context
2752 )
2752 )
2753
2753
2754
2754
2755 def unbundle(repo, cg, heads, source, url):
2755 def unbundle(repo, cg, heads, source, url):
2756 """Apply a bundle to a repo.
2756 """Apply a bundle to a repo.
2757
2757
2758 this function makes sure the repo is locked during the application and have
2758 this function makes sure the repo is locked during the application and have
2759 mechanism to check that no push race occurred between the creation of the
2759 mechanism to check that no push race occurred between the creation of the
2760 bundle and its application.
2760 bundle and its application.
2761
2761
2762 If the push was raced as PushRaced exception is raised."""
2762 If the push was raced as PushRaced exception is raised."""
2763 r = 0
2763 r = 0
2764 # need a transaction when processing a bundle2 stream
2764 # need a transaction when processing a bundle2 stream
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2766 lockandtr = [None, None, None]
2766 lockandtr = [None, None, None]
2767 recordout = None
2767 recordout = None
2768 # quick fix for output mismatch with bundle2 in 3.4
2768 # quick fix for output mismatch with bundle2 in 3.4
2769 captureoutput = repo.ui.configbool(
2769 captureoutput = repo.ui.configbool(
2770 b'experimental', b'bundle2-output-capture'
2770 b'experimental', b'bundle2-output-capture'
2771 )
2771 )
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2773 captureoutput = True
2773 captureoutput = True
2774 try:
2774 try:
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2776 # 'check_heads' call wil be a no-op
2776 # 'check_heads' call wil be a no-op
2777 check_heads(repo, heads, b'uploading changes')
2777 check_heads(repo, heads, b'uploading changes')
2778 # push can proceed
2778 # push can proceed
2779 if not isinstance(cg, bundle2.unbundle20):
2779 if not isinstance(cg, bundle2.unbundle20):
2780 # legacy case: bundle1 (changegroup 01)
2780 # legacy case: bundle1 (changegroup 01)
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2782 with repo.lock(), repo.transaction(txnname) as tr:
2782 with repo.lock(), repo.transaction(txnname) as tr:
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2784 r = bundle2.combinechangegroupresults(op)
2784 r = bundle2.combinechangegroupresults(op)
2785 else:
2785 else:
2786 r = None
2786 r = None
2787 try:
2787 try:
2788
2788
2789 def gettransaction():
2789 def gettransaction():
2790 if not lockandtr[2]:
2790 if not lockandtr[2]:
2791 if not bookmod.bookmarksinstore(repo):
2791 if not bookmod.bookmarksinstore(repo):
2792 lockandtr[0] = repo.wlock()
2792 lockandtr[0] = repo.wlock()
2793 lockandtr[1] = repo.lock()
2793 lockandtr[1] = repo.lock()
2794 lockandtr[2] = repo.transaction(source)
2794 lockandtr[2] = repo.transaction(source)
2795 lockandtr[2].hookargs[b'source'] = source
2795 lockandtr[2].hookargs[b'source'] = source
2796 lockandtr[2].hookargs[b'url'] = url
2796 lockandtr[2].hookargs[b'url'] = url
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2798 return lockandtr[2]
2798 return lockandtr[2]
2799
2799
2800 # Do greedy locking by default until we're satisfied with lazy
2800 # Do greedy locking by default until we're satisfied with lazy
2801 # locking.
2801 # locking.
2802 if not repo.ui.configbool(
2802 if not repo.ui.configbool(
2803 b'experimental', b'bundle2lazylocking'
2803 b'experimental', b'bundle2lazylocking'
2804 ):
2804 ):
2805 gettransaction()
2805 gettransaction()
2806
2806
2807 op = bundle2.bundleoperation(
2807 op = bundle2.bundleoperation(
2808 repo,
2808 repo,
2809 gettransaction,
2809 gettransaction,
2810 captureoutput=captureoutput,
2810 captureoutput=captureoutput,
2811 source=b'push',
2811 source=b'push',
2812 )
2812 )
2813 try:
2813 try:
2814 op = bundle2.processbundle(repo, cg, op=op)
2814 op = bundle2.processbundle(repo, cg, op=op)
2815 finally:
2815 finally:
2816 r = op.reply
2816 r = op.reply
2817 if captureoutput and r is not None:
2817 if captureoutput and r is not None:
2818 repo.ui.pushbuffer(error=True, subproc=True)
2818 repo.ui.pushbuffer(error=True, subproc=True)
2819
2819
2820 def recordout(output):
2820 def recordout(output):
2821 r.newpart(b'output', data=output, mandatory=False)
2821 r.newpart(b'output', data=output, mandatory=False)
2822
2822
2823 if lockandtr[2] is not None:
2823 if lockandtr[2] is not None:
2824 lockandtr[2].close()
2824 lockandtr[2].close()
2825 except BaseException as exc:
2825 except BaseException as exc:
2826 exc.duringunbundle2 = True
2826 exc.duringunbundle2 = True
2827 if captureoutput and r is not None:
2827 if captureoutput and r is not None:
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2829
2829
2830 def recordout(output):
2830 def recordout(output):
2831 part = bundle2.bundlepart(
2831 part = bundle2.bundlepart(
2832 b'output', data=output, mandatory=False
2832 b'output', data=output, mandatory=False
2833 )
2833 )
2834 parts.append(part)
2834 parts.append(part)
2835
2835
2836 raise
2836 raise
2837 finally:
2837 finally:
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2839 if recordout is not None:
2839 if recordout is not None:
2840 recordout(repo.ui.popbuffer())
2840 recordout(repo.ui.popbuffer())
2841 return r
2841 return r
2842
2842
2843
2843
2844 def _maybeapplyclonebundle(pullop):
2844 def _maybeapplyclonebundle(pullop):
2845 """Apply a clone bundle from a remote, if possible."""
2845 """Apply a clone bundle from a remote, if possible."""
2846
2846
2847 repo = pullop.repo
2847 repo = pullop.repo
2848 remote = pullop.remote
2848 remote = pullop.remote
2849
2849
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2851 return
2851 return
2852
2852
2853 # Only run if local repo is empty.
2853 # Only run if local repo is empty.
2854 if len(repo):
2854 if len(repo):
2855 return
2855 return
2856
2856
2857 if pullop.heads:
2857 if pullop.heads:
2858 return
2858 return
2859
2859
2860 if not remote.capable(b'clonebundles'):
2860 if not remote.capable(b'clonebundles'):
2861 return
2861 return
2862
2862
2863 with remote.commandexecutor() as e:
2863 with remote.commandexecutor() as e:
2864 res = e.callcommand(b'clonebundles', {}).result()
2864 res = e.callcommand(b'clonebundles', {}).result()
2865
2865
2866 # If we call the wire protocol command, that's good enough to record the
2866 # If we call the wire protocol command, that's good enough to record the
2867 # attempt.
2867 # attempt.
2868 pullop.clonebundleattempted = True
2868 pullop.clonebundleattempted = True
2869
2869
2870 entries = parseclonebundlesmanifest(repo, res)
2870 entries = parseclonebundlesmanifest(repo, res)
2871 if not entries:
2871 if not entries:
2872 repo.ui.note(
2872 repo.ui.note(
2873 _(
2873 _(
2874 b'no clone bundles available on remote; '
2874 b'no clone bundles available on remote; '
2875 b'falling back to regular clone\n'
2875 b'falling back to regular clone\n'
2876 )
2876 )
2877 )
2877 )
2878 return
2878 return
2879
2879
2880 entries = filterclonebundleentries(
2880 entries = filterclonebundleentries(
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2882 )
2882 )
2883
2883
2884 if not entries:
2884 if not entries:
2885 # There is a thundering herd concern here. However, if a server
2885 # There is a thundering herd concern here. However, if a server
2886 # operator doesn't advertise bundles appropriate for its clients,
2886 # operator doesn't advertise bundles appropriate for its clients,
2887 # they deserve what's coming. Furthermore, from a client's
2887 # they deserve what's coming. Furthermore, from a client's
2888 # perspective, no automatic fallback would mean not being able to
2888 # perspective, no automatic fallback would mean not being able to
2889 # clone!
2889 # clone!
2890 repo.ui.warn(
2890 repo.ui.warn(
2891 _(
2891 _(
2892 b'no compatible clone bundles available on server; '
2892 b'no compatible clone bundles available on server; '
2893 b'falling back to regular clone\n'
2893 b'falling back to regular clone\n'
2894 )
2894 )
2895 )
2895 )
2896 repo.ui.warn(
2896 repo.ui.warn(
2897 _(b'(you may want to report this to the server operator)\n')
2897 _(b'(you may want to report this to the server operator)\n')
2898 )
2898 )
2899 return
2899 return
2900
2900
2901 entries = sortclonebundleentries(repo.ui, entries)
2901 entries = sortclonebundleentries(repo.ui, entries)
2902
2902
2903 url = entries[0][b'URL']
2903 url = entries[0][b'URL']
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2905 if trypullbundlefromurl(repo.ui, repo, url):
2905 if trypullbundlefromurl(repo.ui, repo, url):
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2907 # Bundle failed.
2907 # Bundle failed.
2908 #
2908 #
2909 # We abort by default to avoid the thundering herd of
2909 # We abort by default to avoid the thundering herd of
2910 # clients flooding a server that was expecting expensive
2910 # clients flooding a server that was expecting expensive
2911 # clone load to be offloaded.
2911 # clone load to be offloaded.
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2914 else:
2914 else:
2915 raise error.Abort(
2915 raise error.Abort(
2916 _(b'error applying bundle'),
2916 _(b'error applying bundle'),
2917 hint=_(
2917 hint=_(
2918 b'if this error persists, consider contacting '
2918 b'if this error persists, consider contacting '
2919 b'the server operator or disable clone '
2919 b'the server operator or disable clone '
2920 b'bundles via '
2920 b'bundles via '
2921 b'"--config ui.clonebundles=false"'
2921 b'"--config ui.clonebundles=false"'
2922 ),
2922 ),
2923 )
2923 )
2924
2924
2925
2925
2926 def parseclonebundlesmanifest(repo, s):
2926 def parseclonebundlesmanifest(repo, s):
2927 """Parses the raw text of a clone bundles manifest.
2927 """Parses the raw text of a clone bundles manifest.
2928
2928
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2930 to the URL and other keys are the attributes for the entry.
2930 to the URL and other keys are the attributes for the entry.
2931 """
2931 """
2932 m = []
2932 m = []
2933 for line in s.splitlines():
2933 for line in s.splitlines():
2934 fields = line.split()
2934 fields = line.split()
2935 if not fields:
2935 if not fields:
2936 continue
2936 continue
2937 attrs = {b'URL': fields[0]}
2937 attrs = {b'URL': fields[0]}
2938 for rawattr in fields[1:]:
2938 for rawattr in fields[1:]:
2939 key, value = rawattr.split(b'=', 1)
2939 key, value = rawattr.split(b'=', 1)
2940 key = urlreq.unquote(key)
2940 key = urlreq.unquote(key)
2941 value = urlreq.unquote(value)
2941 value = urlreq.unquote(value)
2942 attrs[key] = value
2942 attrs[key] = value
2943
2943
2944 # Parse BUNDLESPEC into components. This makes client-side
2944 # Parse BUNDLESPEC into components. This makes client-side
2945 # preferences easier to specify since you can prefer a single
2945 # preferences easier to specify since you can prefer a single
2946 # component of the BUNDLESPEC.
2946 # component of the BUNDLESPEC.
2947 if key == b'BUNDLESPEC':
2947 if key == b'BUNDLESPEC':
2948 try:
2948 try:
2949 bundlespec = parsebundlespec(repo, value)
2949 bundlespec = parsebundlespec(repo, value)
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2951 attrs[b'VERSION'] = bundlespec.version
2951 attrs[b'VERSION'] = bundlespec.version
2952 except error.InvalidBundleSpecification:
2952 except error.InvalidBundleSpecification:
2953 pass
2953 pass
2954 except error.UnsupportedBundleSpecification:
2954 except error.UnsupportedBundleSpecification:
2955 pass
2955 pass
2956
2956
2957 m.append(attrs)
2957 m.append(attrs)
2958
2958
2959 return m
2959 return m
2960
2960
2961
2961
2962 def isstreamclonespec(bundlespec):
2962 def isstreamclonespec(bundlespec):
2963 # Stream clone v1
2963 # Stream clone v1
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2965 return True
2965 return True
2966
2966
2967 # Stream clone v2
2967 # Stream clone v2
2968 if (
2968 if (
2969 bundlespec.wirecompression == b'UN'
2969 bundlespec.wirecompression == b'UN'
2970 and bundlespec.wireversion == b'02'
2970 and bundlespec.wireversion == b'02'
2971 and bundlespec.contentopts.get(b'streamv2')
2971 and bundlespec.contentopts.get(b'streamv2')
2972 ):
2972 ):
2973 return True
2973 return True
2974
2974
2975 return False
2975 return False
2976
2976
2977
2977
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2979 """Remove incompatible clone bundle manifest entries.
2979 """Remove incompatible clone bundle manifest entries.
2980
2980
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2982 and returns a new list consisting of only the entries that this client
2982 and returns a new list consisting of only the entries that this client
2983 should be able to apply.
2983 should be able to apply.
2984
2984
2985 There is no guarantee we'll be able to apply all returned entries because
2985 There is no guarantee we'll be able to apply all returned entries because
2986 the metadata we use to filter on may be missing or wrong.
2986 the metadata we use to filter on may be missing or wrong.
2987 """
2987 """
2988 newentries = []
2988 newentries = []
2989 for entry in entries:
2989 for entry in entries:
2990 spec = entry.get(b'BUNDLESPEC')
2990 spec = entry.get(b'BUNDLESPEC')
2991 if spec:
2991 if spec:
2992 try:
2992 try:
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2994
2994
2995 # If a stream clone was requested, filter out non-streamclone
2995 # If a stream clone was requested, filter out non-streamclone
2996 # entries.
2996 # entries.
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2998 repo.ui.debug(
2998 repo.ui.debug(
2999 b'filtering %s because not a stream clone\n'
2999 b'filtering %s because not a stream clone\n'
3000 % entry[b'URL']
3000 % entry[b'URL']
3001 )
3001 )
3002 continue
3002 continue
3003
3003
3004 except error.InvalidBundleSpecification as e:
3004 except error.InvalidBundleSpecification as e:
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3006 continue
3006 continue
3007 except error.UnsupportedBundleSpecification as e:
3007 except error.UnsupportedBundleSpecification as e:
3008 repo.ui.debug(
3008 repo.ui.debug(
3009 b'filtering %s because unsupported bundle '
3009 b'filtering %s because unsupported bundle '
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3011 )
3011 )
3012 continue
3012 continue
3013 # If we don't have a spec and requested a stream clone, we don't know
3013 # If we don't have a spec and requested a stream clone, we don't know
3014 # what the entry is so don't attempt to apply it.
3014 # what the entry is so don't attempt to apply it.
3015 elif streamclonerequested:
3015 elif streamclonerequested:
3016 repo.ui.debug(
3016 repo.ui.debug(
3017 b'filtering %s because cannot determine if a stream '
3017 b'filtering %s because cannot determine if a stream '
3018 b'clone bundle\n' % entry[b'URL']
3018 b'clone bundle\n' % entry[b'URL']
3019 )
3019 )
3020 continue
3020 continue
3021
3021
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3023 repo.ui.debug(
3023 repo.ui.debug(
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3025 )
3025 )
3026 continue
3026 continue
3027
3027
3028 if b'REQUIREDRAM' in entry:
3029 try:
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3031 except error.ParseError:
3032 repo.ui.debug(
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3034 % entry[b'URL']
3035 )
3036 continue
3037 actualram = repo.ui.estimatememory()
3038 if actualram is not None and actualram * 0.66 < requiredram:
3039 repo.ui.debug(
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3041 % entry[b'URL']
3042 )
3043 continue
3044
3028 newentries.append(entry)
3045 newentries.append(entry)
3029
3046
3030 return newentries
3047 return newentries
3031
3048
3032
3049
3033 class clonebundleentry(object):
3050 class clonebundleentry(object):
3034 """Represents an item in a clone bundles manifest.
3051 """Represents an item in a clone bundles manifest.
3035
3052
3036 This rich class is needed to support sorting since sorted() in Python 3
3053 This rich class is needed to support sorting since sorted() in Python 3
3037 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3038 won't work.
3055 won't work.
3039 """
3056 """
3040
3057
3041 def __init__(self, value, prefers):
3058 def __init__(self, value, prefers):
3042 self.value = value
3059 self.value = value
3043 self.prefers = prefers
3060 self.prefers = prefers
3044
3061
3045 def _cmp(self, other):
3062 def _cmp(self, other):
3046 for prefkey, prefvalue in self.prefers:
3063 for prefkey, prefvalue in self.prefers:
3047 avalue = self.value.get(prefkey)
3064 avalue = self.value.get(prefkey)
3048 bvalue = other.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3049
3066
3050 # Special case for b missing attribute and a matches exactly.
3067 # Special case for b missing attribute and a matches exactly.
3051 if avalue is not None and bvalue is None and avalue == prefvalue:
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3052 return -1
3069 return -1
3053
3070
3054 # Special case for a missing attribute and b matches exactly.
3071 # Special case for a missing attribute and b matches exactly.
3055 if bvalue is not None and avalue is None and bvalue == prefvalue:
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3056 return 1
3073 return 1
3057
3074
3058 # We can't compare unless attribute present on both.
3075 # We can't compare unless attribute present on both.
3059 if avalue is None or bvalue is None:
3076 if avalue is None or bvalue is None:
3060 continue
3077 continue
3061
3078
3062 # Same values should fall back to next attribute.
3079 # Same values should fall back to next attribute.
3063 if avalue == bvalue:
3080 if avalue == bvalue:
3064 continue
3081 continue
3065
3082
3066 # Exact matches come first.
3083 # Exact matches come first.
3067 if avalue == prefvalue:
3084 if avalue == prefvalue:
3068 return -1
3085 return -1
3069 if bvalue == prefvalue:
3086 if bvalue == prefvalue:
3070 return 1
3087 return 1
3071
3088
3072 # Fall back to next attribute.
3089 # Fall back to next attribute.
3073 continue
3090 continue
3074
3091
3075 # If we got here we couldn't sort by attributes and prefers. Fall
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3076 # back to index order.
3093 # back to index order.
3077 return 0
3094 return 0
3078
3095
3079 def __lt__(self, other):
3096 def __lt__(self, other):
3080 return self._cmp(other) < 0
3097 return self._cmp(other) < 0
3081
3098
3082 def __gt__(self, other):
3099 def __gt__(self, other):
3083 return self._cmp(other) > 0
3100 return self._cmp(other) > 0
3084
3101
3085 def __eq__(self, other):
3102 def __eq__(self, other):
3086 return self._cmp(other) == 0
3103 return self._cmp(other) == 0
3087
3104
3088 def __le__(self, other):
3105 def __le__(self, other):
3089 return self._cmp(other) <= 0
3106 return self._cmp(other) <= 0
3090
3107
3091 def __ge__(self, other):
3108 def __ge__(self, other):
3092 return self._cmp(other) >= 0
3109 return self._cmp(other) >= 0
3093
3110
3094 def __ne__(self, other):
3111 def __ne__(self, other):
3095 return self._cmp(other) != 0
3112 return self._cmp(other) != 0
3096
3113
3097
3114
3098 def sortclonebundleentries(ui, entries):
3115 def sortclonebundleentries(ui, entries):
3099 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3100 if not prefers:
3117 if not prefers:
3101 return list(entries)
3118 return list(entries)
3102
3119
3103 def _split(p):
3120 def _split(p):
3104 if b'=' not in p:
3121 if b'=' not in p:
3105 hint = _(b"each comma separated item should be key=value pairs")
3122 hint = _(b"each comma separated item should be key=value pairs")
3106 raise error.Abort(
3123 raise error.Abort(
3107 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3108 )
3125 )
3109 return p.split(b'=', 1)
3126 return p.split(b'=', 1)
3110
3127
3111 prefers = [_split(p) for p in prefers]
3128 prefers = [_split(p) for p in prefers]
3112
3129
3113 items = sorted(clonebundleentry(v, prefers) for v in entries)
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3114 return [i.value for i in items]
3131 return [i.value for i in items]
3115
3132
3116
3133
3117 def trypullbundlefromurl(ui, repo, url):
3134 def trypullbundlefromurl(ui, repo, url):
3118 """Attempt to apply a bundle from a URL."""
3135 """Attempt to apply a bundle from a URL."""
3119 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3120 try:
3137 try:
3121 fh = urlmod.open(ui, url)
3138 fh = urlmod.open(ui, url)
3122 cg = readbundle(ui, fh, b'stream')
3139 cg = readbundle(ui, fh, b'stream')
3123
3140
3124 if isinstance(cg, streamclone.streamcloneapplier):
3141 if isinstance(cg, streamclone.streamcloneapplier):
3125 cg.apply(repo)
3142 cg.apply(repo)
3126 else:
3143 else:
3127 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3128 return True
3145 return True
3129 except urlerr.httperror as e:
3146 except urlerr.httperror as e:
3130 ui.warn(
3147 ui.warn(
3131 _(b'HTTP error fetching bundle: %s\n')
3148 _(b'HTTP error fetching bundle: %s\n')
3132 % stringutil.forcebytestr(e)
3149 % stringutil.forcebytestr(e)
3133 )
3150 )
3134 except urlerr.urlerror as e:
3151 except urlerr.urlerror as e:
3135 ui.warn(
3152 ui.warn(
3136 _(b'error fetching bundle: %s\n')
3153 _(b'error fetching bundle: %s\n')
3137 % stringutil.forcebytestr(e.reason)
3154 % stringutil.forcebytestr(e.reason)
3138 )
3155 )
3139
3156
3140 return False
3157 return False
@@ -1,31 +1,35 b''
1 == New Features ==
1 == New Features ==
2
2
3 * clonebundles can be annotated with the expected memory requirements
4 using the `REQUIREDRAM` option. This allows clients to skip
5 bundles created with large zstd windows and fallback to larger, but
6 less demanding bundles.
3
7
4 == New Experimental Features ==
8 == New Experimental Features ==
5
9
6 * The core of some hg operations have been (and are being)
10 * The core of some hg operations have been (and are being)
7 implemented in rust, for speed. `hg status` on a repository with
11 implemented in rust, for speed. `hg status` on a repository with
8 300k tracked files goes from 1.8s to 0.6s for instance.
12 300k tracked files goes from 1.8s to 0.6s for instance.
9 This has currently been tested only on linux, and does not build on
13 This has currently been tested only on linux, and does not build on
10 windows. See rust/README.rst in the mercurial repository for
14 windows. See rust/README.rst in the mercurial repository for
11 instructions to opt into this.
15 instructions to opt into this.
12
16
13 == Backwards Compatibility Changes ==
17 == Backwards Compatibility Changes ==
14
18
15 * Mercurial now requires at least Python 2.7.9 or a Python version that
19 * Mercurial now requires at least Python 2.7.9 or a Python version that
16 backported modern SSL/TLS features (as defined in PEP 466), and that Python
20 backported modern SSL/TLS features (as defined in PEP 466), and that Python
17 was compiled against a OpenSSL version supporting TLS 1.1 or TLS 1.2
21 was compiled against a OpenSSL version supporting TLS 1.1 or TLS 1.2
18 (likely this requires the OpenSSL version to be at least 1.0.1).
22 (likely this requires the OpenSSL version to be at least 1.0.1).
19
23
20 * The `hg perfwrite` command from contrib/perf.py was made more flexible and
24 * The `hg perfwrite` command from contrib/perf.py was made more flexible and
21 changed its default behavior. To get the previous behavior, run `hg perfwrite
25 changed its default behavior. To get the previous behavior, run `hg perfwrite
22 --nlines=100000 --nitems=1 --item='Testing write performance' --batch-line`.
26 --nlines=100000 --nitems=1 --item='Testing write performance' --batch-line`.
23
27
24
28
25 == Internal API Changes ==
29 == Internal API Changes ==
26
30
27 * logcmdutil.diffordiffstat() now takes contexts instead of nodes.
31 * logcmdutil.diffordiffstat() now takes contexts instead of nodes.
28
32
29 * The `mergestate` class along with some related methods and constants have
33 * The `mergestate` class along with some related methods and constants have
30 moved from `mercurial.merge` to a new `mercurial.mergestate` module.
34 moved from `mercurial.merge` to a new `mercurial.mergestate` module.
31
35
@@ -1,553 +1,638 b''
1 #require no-reposimplestore no-chg
1 #require no-reposimplestore no-chg
2
2
3 Set up a server
3 Set up a server
4
4
5 $ hg init server
5 $ hg init server
6 $ cd server
6 $ cd server
7 $ cat >> .hg/hgrc << EOF
7 $ cat >> .hg/hgrc << EOF
8 > [extensions]
8 > [extensions]
9 > clonebundles =
9 > clonebundles =
10 > EOF
10 > EOF
11
11
12 $ touch foo
12 $ touch foo
13 $ hg -q commit -A -m 'add foo'
13 $ hg -q commit -A -m 'add foo'
14 $ touch bar
14 $ touch bar
15 $ hg -q commit -A -m 'add bar'
15 $ hg -q commit -A -m 'add bar'
16
16
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 $ cat hg.pid >> $DAEMON_PIDS
18 $ cat hg.pid >> $DAEMON_PIDS
19 $ cd ..
19 $ cd ..
20
20
21 Missing manifest should not result in server lookup
21 Missing manifest should not result in server lookup
22
22
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 requesting all changes
24 requesting all changes
25 adding changesets
25 adding changesets
26 adding manifests
26 adding manifests
27 adding file changes
27 adding file changes
28 added 2 changesets with 2 changes to 2 files
28 added 2 changesets with 2 changes to 2 files
29 new changesets 53245c60e682:aaff8d2ffbbf
29 new changesets 53245c60e682:aaff8d2ffbbf
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31
31
32 $ cat server/access.log
32 $ cat server/access.log
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36
36
37 Empty manifest file results in retrieval
37 Empty manifest file results in retrieval
38 (the extension only checks if the manifest file exists)
38 (the extension only checks if the manifest file exists)
39
39
40 $ touch server/.hg/clonebundles.manifest
40 $ touch server/.hg/clonebundles.manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 no clone bundles available on remote; falling back to regular clone
42 no clone bundles available on remote; falling back to regular clone
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 2 changes to 2 files
47 added 2 changesets with 2 changes to 2 files
48 new changesets 53245c60e682:aaff8d2ffbbf
48 new changesets 53245c60e682:aaff8d2ffbbf
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50
50
51 Manifest file with invalid URL aborts
51 Manifest file with invalid URL aborts
52
52
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 $ hg clone http://localhost:$HGPORT 404-url
54 $ hg clone http://localhost:$HGPORT 404-url
55 applying clone bundle from http://does.not.exist/bundle.hg
55 applying clone bundle from http://does.not.exist/bundle.hg
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 abort: error applying bundle
58 abort: error applying bundle
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 [255]
60 [255]
61
61
62 Server is not running aborts
62 Server is not running aborts
63
63
64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
65 $ hg clone http://localhost:$HGPORT server-not-runner
65 $ hg clone http://localhost:$HGPORT server-not-runner
66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
68 abort: error applying bundle
68 abort: error applying bundle
69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
70 [255]
70 [255]
71
71
72 Server returns 404
72 Server returns 404
73
73
74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
75 $ cat http.pid >> $DAEMON_PIDS
75 $ cat http.pid >> $DAEMON_PIDS
76 $ hg clone http://localhost:$HGPORT running-404
76 $ hg clone http://localhost:$HGPORT running-404
77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
78 HTTP error fetching bundle: HTTP Error 404: File not found
78 HTTP error fetching bundle: HTTP Error 404: File not found
79 abort: error applying bundle
79 abort: error applying bundle
80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
81 [255]
81 [255]
82
82
83 We can override failure to fall back to regular clone
83 We can override failure to fall back to regular clone
84
84
85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
87 HTTP error fetching bundle: HTTP Error 404: File not found
87 HTTP error fetching bundle: HTTP Error 404: File not found
88 falling back to normal clone
88 falling back to normal clone
89 requesting all changes
89 requesting all changes
90 adding changesets
90 adding changesets
91 adding manifests
91 adding manifests
92 adding file changes
92 adding file changes
93 added 2 changesets with 2 changes to 2 files
93 added 2 changesets with 2 changes to 2 files
94 new changesets 53245c60e682:aaff8d2ffbbf
94 new changesets 53245c60e682:aaff8d2ffbbf
95
95
96 Bundle with partial content works
96 Bundle with partial content works
97
97
98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
99 1 changesets found
99 1 changesets found
100
100
101 We verify exact bundle content as an extra check against accidental future
101 We verify exact bundle content as an extra check against accidental future
102 changes. If this output changes, we could break old clients.
102 changes. If this output changes, we could break old clients.
103
103
104 $ f --size --hexdump partial.hg
104 $ f --size --hexdump partial.hg
105 partial.hg: size=207
105 partial.hg: size=207
106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
119
119
120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
121 $ hg clone -U http://localhost:$HGPORT partial-bundle
121 $ hg clone -U http://localhost:$HGPORT partial-bundle
122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
123 adding changesets
123 adding changesets
124 adding manifests
124 adding manifests
125 adding file changes
125 adding file changes
126 added 1 changesets with 1 changes to 1 files
126 added 1 changesets with 1 changes to 1 files
127 finished applying clone bundle
127 finished applying clone bundle
128 searching for changes
128 searching for changes
129 adding changesets
129 adding changesets
130 adding manifests
130 adding manifests
131 adding file changes
131 adding file changes
132 added 1 changesets with 1 changes to 1 files
132 added 1 changesets with 1 changes to 1 files
133 new changesets aaff8d2ffbbf
133 new changesets aaff8d2ffbbf
134 1 local changesets published
134 1 local changesets published
135
135
136 Incremental pull doesn't fetch bundle
136 Incremental pull doesn't fetch bundle
137
137
138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
139 adding changesets
139 adding changesets
140 adding manifests
140 adding manifests
141 adding file changes
141 adding file changes
142 added 1 changesets with 1 changes to 1 files
142 added 1 changesets with 1 changes to 1 files
143 new changesets 53245c60e682
143 new changesets 53245c60e682
144
144
145 $ cd partial-clone
145 $ cd partial-clone
146 $ hg pull
146 $ hg pull
147 pulling from http://localhost:$HGPORT/
147 pulling from http://localhost:$HGPORT/
148 searching for changes
148 searching for changes
149 adding changesets
149 adding changesets
150 adding manifests
150 adding manifests
151 adding file changes
151 adding file changes
152 added 1 changesets with 1 changes to 1 files
152 added 1 changesets with 1 changes to 1 files
153 new changesets aaff8d2ffbbf
153 new changesets aaff8d2ffbbf
154 (run 'hg update' to get a working copy)
154 (run 'hg update' to get a working copy)
155 $ cd ..
155 $ cd ..
156
156
157 Bundle with full content works
157 Bundle with full content works
158
158
159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
160 2 changesets found
160 2 changesets found
161
161
162 Again, we perform an extra check against bundle content changes. If this content
162 Again, we perform an extra check against bundle content changes. If this content
163 changes, clone bundles produced by new Mercurial versions may not be readable
163 changes, clone bundles produced by new Mercurial versions may not be readable
164 by old clients.
164 by old clients.
165
165
166 $ f --size --hexdump full.hg
166 $ f --size --hexdump full.hg
167 full.hg: size=442
167 full.hg: size=442
168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
196
196
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
198 $ hg clone -U http://localhost:$HGPORT full-bundle
198 $ hg clone -U http://localhost:$HGPORT full-bundle
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
199 applying clone bundle from http://localhost:$HGPORT1/full.hg
200 adding changesets
200 adding changesets
201 adding manifests
201 adding manifests
202 adding file changes
202 adding file changes
203 added 2 changesets with 2 changes to 2 files
203 added 2 changesets with 2 changes to 2 files
204 finished applying clone bundle
204 finished applying clone bundle
205 searching for changes
205 searching for changes
206 no changes found
206 no changes found
207 2 local changesets published
207 2 local changesets published
208
208
209 Feature works over SSH
209 Feature works over SSH
210
210
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 adding changesets
213 adding changesets
214 adding manifests
214 adding manifests
215 adding file changes
215 adding file changes
216 added 2 changesets with 2 changes to 2 files
216 added 2 changesets with 2 changes to 2 files
217 finished applying clone bundle
217 finished applying clone bundle
218 searching for changes
218 searching for changes
219 no changes found
219 no changes found
220 2 local changesets published
220 2 local changesets published
221
221
222 Entry with unknown BUNDLESPEC is filtered and not used
222 Entry with unknown BUNDLESPEC is filtered and not used
223
223
224 $ cat > server/.hg/clonebundles.manifest << EOF
224 $ cat > server/.hg/clonebundles.manifest << EOF
225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
226 > http://bad.entry2 BUNDLESPEC=xz-v1
226 > http://bad.entry2 BUNDLESPEC=xz-v1
227 > http://bad.entry3 BUNDLESPEC=none-v100
227 > http://bad.entry3 BUNDLESPEC=none-v100
228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
229 > EOF
229 > EOF
230
230
231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
232 applying clone bundle from http://localhost:$HGPORT1/full.hg
232 applying clone bundle from http://localhost:$HGPORT1/full.hg
233 adding changesets
233 adding changesets
234 adding manifests
234 adding manifests
235 adding file changes
235 adding file changes
236 added 2 changesets with 2 changes to 2 files
236 added 2 changesets with 2 changes to 2 files
237 finished applying clone bundle
237 finished applying clone bundle
238 searching for changes
238 searching for changes
239 no changes found
239 no changes found
240 2 local changesets published
240 2 local changesets published
241
241
242 Automatic fallback when all entries are filtered
242 Automatic fallback when all entries are filtered
243
243
244 $ cat > server/.hg/clonebundles.manifest << EOF
244 $ cat > server/.hg/clonebundles.manifest << EOF
245 > http://bad.entry BUNDLESPEC=UNKNOWN
245 > http://bad.entry BUNDLESPEC=UNKNOWN
246 > EOF
246 > EOF
247
247
248 $ hg clone -U http://localhost:$HGPORT filter-all
248 $ hg clone -U http://localhost:$HGPORT filter-all
249 no compatible clone bundles available on server; falling back to regular clone
249 no compatible clone bundles available on server; falling back to regular clone
250 (you may want to report this to the server operator)
250 (you may want to report this to the server operator)
251 requesting all changes
251 requesting all changes
252 adding changesets
252 adding changesets
253 adding manifests
253 adding manifests
254 adding file changes
254 adding file changes
255 added 2 changesets with 2 changes to 2 files
255 added 2 changesets with 2 changes to 2 files
256 new changesets 53245c60e682:aaff8d2ffbbf
256 new changesets 53245c60e682:aaff8d2ffbbf
257
257
258 We require a Python version that supports SNI. Therefore, URLs requiring SNI
258 We require a Python version that supports SNI. Therefore, URLs requiring SNI
259 are not filtered.
259 are not filtered.
260
260
261 $ cp full.hg sni.hg
261 $ cp full.hg sni.hg
262 $ cat > server/.hg/clonebundles.manifest << EOF
262 $ cat > server/.hg/clonebundles.manifest << EOF
263 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
263 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
264 > http://localhost:$HGPORT1/full.hg
264 > http://localhost:$HGPORT1/full.hg
265 > EOF
265 > EOF
266
266
267 $ hg clone -U http://localhost:$HGPORT sni-supported
267 $ hg clone -U http://localhost:$HGPORT sni-supported
268 applying clone bundle from http://localhost:$HGPORT1/sni.hg
268 applying clone bundle from http://localhost:$HGPORT1/sni.hg
269 adding changesets
269 adding changesets
270 adding manifests
270 adding manifests
271 adding file changes
271 adding file changes
272 added 2 changesets with 2 changes to 2 files
272 added 2 changesets with 2 changes to 2 files
273 finished applying clone bundle
273 finished applying clone bundle
274 searching for changes
274 searching for changes
275 no changes found
275 no changes found
276 2 local changesets published
276 2 local changesets published
277
277
278 Stream clone bundles are supported
278 Stream clone bundles are supported
279
279
280 $ hg -R server debugcreatestreamclonebundle packed.hg
280 $ hg -R server debugcreatestreamclonebundle packed.hg
281 writing 613 bytes for 4 files
281 writing 613 bytes for 4 files
282 bundle requirements: generaldelta, revlogv1, sparserevlog
282 bundle requirements: generaldelta, revlogv1, sparserevlog
283
283
284 No bundle spec should work
284 No bundle spec should work
285
285
286 $ cat > server/.hg/clonebundles.manifest << EOF
286 $ cat > server/.hg/clonebundles.manifest << EOF
287 > http://localhost:$HGPORT1/packed.hg
287 > http://localhost:$HGPORT1/packed.hg
288 > EOF
288 > EOF
289
289
290 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
290 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
291 applying clone bundle from http://localhost:$HGPORT1/packed.hg
291 applying clone bundle from http://localhost:$HGPORT1/packed.hg
292 4 files to transfer, 613 bytes of data
292 4 files to transfer, 613 bytes of data
293 transferred 613 bytes in *.* seconds (*) (glob)
293 transferred 613 bytes in *.* seconds (*) (glob)
294 finished applying clone bundle
294 finished applying clone bundle
295 searching for changes
295 searching for changes
296 no changes found
296 no changes found
297
297
298 Bundle spec without parameters should work
298 Bundle spec without parameters should work
299
299
300 $ cat > server/.hg/clonebundles.manifest << EOF
300 $ cat > server/.hg/clonebundles.manifest << EOF
301 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
301 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
302 > EOF
302 > EOF
303
303
304 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
304 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
305 applying clone bundle from http://localhost:$HGPORT1/packed.hg
305 applying clone bundle from http://localhost:$HGPORT1/packed.hg
306 4 files to transfer, 613 bytes of data
306 4 files to transfer, 613 bytes of data
307 transferred 613 bytes in *.* seconds (*) (glob)
307 transferred 613 bytes in *.* seconds (*) (glob)
308 finished applying clone bundle
308 finished applying clone bundle
309 searching for changes
309 searching for changes
310 no changes found
310 no changes found
311
311
312 Bundle spec with format requirements should work
312 Bundle spec with format requirements should work
313
313
314 $ cat > server/.hg/clonebundles.manifest << EOF
314 $ cat > server/.hg/clonebundles.manifest << EOF
315 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
315 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
316 > EOF
316 > EOF
317
317
318 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
318 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
319 applying clone bundle from http://localhost:$HGPORT1/packed.hg
319 applying clone bundle from http://localhost:$HGPORT1/packed.hg
320 4 files to transfer, 613 bytes of data
320 4 files to transfer, 613 bytes of data
321 transferred 613 bytes in *.* seconds (*) (glob)
321 transferred 613 bytes in *.* seconds (*) (glob)
322 finished applying clone bundle
322 finished applying clone bundle
323 searching for changes
323 searching for changes
324 no changes found
324 no changes found
325
325
326 Stream bundle spec with unknown requirements should be filtered out
326 Stream bundle spec with unknown requirements should be filtered out
327
327
328 $ cat > server/.hg/clonebundles.manifest << EOF
328 $ cat > server/.hg/clonebundles.manifest << EOF
329 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
329 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
330 > EOF
330 > EOF
331
331
332 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
332 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
333 no compatible clone bundles available on server; falling back to regular clone
333 no compatible clone bundles available on server; falling back to regular clone
334 (you may want to report this to the server operator)
334 (you may want to report this to the server operator)
335 requesting all changes
335 requesting all changes
336 adding changesets
336 adding changesets
337 adding manifests
337 adding manifests
338 adding file changes
338 adding file changes
339 added 2 changesets with 2 changes to 2 files
339 added 2 changesets with 2 changes to 2 files
340 new changesets 53245c60e682:aaff8d2ffbbf
340 new changesets 53245c60e682:aaff8d2ffbbf
341
341
342 Set up manifest for testing preferences
342 Set up manifest for testing preferences
343 (Remember, the TYPE does not have to match reality - the URL is
343 (Remember, the TYPE does not have to match reality - the URL is
344 important)
344 important)
345
345
346 $ cp full.hg gz-a.hg
346 $ cp full.hg gz-a.hg
347 $ cp full.hg gz-b.hg
347 $ cp full.hg gz-b.hg
348 $ cp full.hg bz2-a.hg
348 $ cp full.hg bz2-a.hg
349 $ cp full.hg bz2-b.hg
349 $ cp full.hg bz2-b.hg
350 $ cat > server/.hg/clonebundles.manifest << EOF
350 $ cat > server/.hg/clonebundles.manifest << EOF
351 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
351 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
352 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
352 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
353 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
353 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
354 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
354 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
355 > EOF
355 > EOF
356
356
357 Preferring an undefined attribute will take first entry
357 Preferring an undefined attribute will take first entry
358
358
359 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
359 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
360 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
360 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
361 adding changesets
361 adding changesets
362 adding manifests
362 adding manifests
363 adding file changes
363 adding file changes
364 added 2 changesets with 2 changes to 2 files
364 added 2 changesets with 2 changes to 2 files
365 finished applying clone bundle
365 finished applying clone bundle
366 searching for changes
366 searching for changes
367 no changes found
367 no changes found
368 2 local changesets published
368 2 local changesets published
369
369
370 Preferring bz2 type will download first entry of that type
370 Preferring bz2 type will download first entry of that type
371
371
372 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
372 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
373 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
373 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
374 adding changesets
374 adding changesets
375 adding manifests
375 adding manifests
376 adding file changes
376 adding file changes
377 added 2 changesets with 2 changes to 2 files
377 added 2 changesets with 2 changes to 2 files
378 finished applying clone bundle
378 finished applying clone bundle
379 searching for changes
379 searching for changes
380 no changes found
380 no changes found
381 2 local changesets published
381 2 local changesets published
382
382
383 Preferring multiple values of an option works
383 Preferring multiple values of an option works
384
384
385 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
385 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
386 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
386 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
387 adding changesets
387 adding changesets
388 adding manifests
388 adding manifests
389 adding file changes
389 adding file changes
390 added 2 changesets with 2 changes to 2 files
390 added 2 changesets with 2 changes to 2 files
391 finished applying clone bundle
391 finished applying clone bundle
392 searching for changes
392 searching for changes
393 no changes found
393 no changes found
394 2 local changesets published
394 2 local changesets published
395
395
396 Sorting multiple values should get us back to original first entry
396 Sorting multiple values should get us back to original first entry
397
397
398 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
398 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
399 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
399 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
400 adding changesets
400 adding changesets
401 adding manifests
401 adding manifests
402 adding file changes
402 adding file changes
403 added 2 changesets with 2 changes to 2 files
403 added 2 changesets with 2 changes to 2 files
404 finished applying clone bundle
404 finished applying clone bundle
405 searching for changes
405 searching for changes
406 no changes found
406 no changes found
407 2 local changesets published
407 2 local changesets published
408
408
409 Preferring multiple attributes has correct order
409 Preferring multiple attributes has correct order
410
410
411 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
411 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
412 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
412 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
413 adding changesets
413 adding changesets
414 adding manifests
414 adding manifests
415 adding file changes
415 adding file changes
416 added 2 changesets with 2 changes to 2 files
416 added 2 changesets with 2 changes to 2 files
417 finished applying clone bundle
417 finished applying clone bundle
418 searching for changes
418 searching for changes
419 no changes found
419 no changes found
420 2 local changesets published
420 2 local changesets published
421
421
422 Test where attribute is missing from some entries
422 Test where attribute is missing from some entries
423
423
424 $ cat > server/.hg/clonebundles.manifest << EOF
424 $ cat > server/.hg/clonebundles.manifest << EOF
425 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
425 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
426 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
426 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
427 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
427 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
428 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
428 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
429 > EOF
429 > EOF
430
430
431 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
431 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
432 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
432 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
433 adding changesets
433 adding changesets
434 adding manifests
434 adding manifests
435 adding file changes
435 adding file changes
436 added 2 changesets with 2 changes to 2 files
436 added 2 changesets with 2 changes to 2 files
437 finished applying clone bundle
437 finished applying clone bundle
438 searching for changes
438 searching for changes
439 no changes found
439 no changes found
440 2 local changesets published
440 2 local changesets published
441
441
442 Test a bad attribute list
442 Test a bad attribute list
443
443
444 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
444 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
445 abort: invalid ui.clonebundleprefers item: bad
445 abort: invalid ui.clonebundleprefers item: bad
446 (each comma separated item should be key=value pairs)
446 (each comma separated item should be key=value pairs)
447 [255]
447 [255]
448 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
448 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
449 > -U http://localhost:$HGPORT bad-input
449 > -U http://localhost:$HGPORT bad-input
450 abort: invalid ui.clonebundleprefers item: bad
450 abort: invalid ui.clonebundleprefers item: bad
451 (each comma separated item should be key=value pairs)
451 (each comma separated item should be key=value pairs)
452 [255]
452 [255]
453
453
454
454
455 Test interaction between clone bundles and --stream
455 Test interaction between clone bundles and --stream
456
456
457 A manifest with just a gzip bundle
457 A manifest with just a gzip bundle
458
458
459 $ cat > server/.hg/clonebundles.manifest << EOF
459 $ cat > server/.hg/clonebundles.manifest << EOF
460 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
460 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
461 > EOF
461 > EOF
462
462
463 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
463 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
464 no compatible clone bundles available on server; falling back to regular clone
464 no compatible clone bundles available on server; falling back to regular clone
465 (you may want to report this to the server operator)
465 (you may want to report this to the server operator)
466 streaming all changes
466 streaming all changes
467 9 files to transfer, 816 bytes of data
467 9 files to transfer, 816 bytes of data
468 transferred 816 bytes in * seconds (*) (glob)
468 transferred 816 bytes in * seconds (*) (glob)
469
469
470 A manifest with a stream clone but no BUNDLESPEC
470 A manifest with a stream clone but no BUNDLESPEC
471
471
472 $ cat > server/.hg/clonebundles.manifest << EOF
472 $ cat > server/.hg/clonebundles.manifest << EOF
473 > http://localhost:$HGPORT1/packed.hg
473 > http://localhost:$HGPORT1/packed.hg
474 > EOF
474 > EOF
475
475
476 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
476 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
477 no compatible clone bundles available on server; falling back to regular clone
477 no compatible clone bundles available on server; falling back to regular clone
478 (you may want to report this to the server operator)
478 (you may want to report this to the server operator)
479 streaming all changes
479 streaming all changes
480 9 files to transfer, 816 bytes of data
480 9 files to transfer, 816 bytes of data
481 transferred 816 bytes in * seconds (*) (glob)
481 transferred 816 bytes in * seconds (*) (glob)
482
482
483 A manifest with a gzip bundle and a stream clone
483 A manifest with a gzip bundle and a stream clone
484
484
485 $ cat > server/.hg/clonebundles.manifest << EOF
485 $ cat > server/.hg/clonebundles.manifest << EOF
486 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
486 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
487 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
487 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
488 > EOF
488 > EOF
489
489
490 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
490 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
491 applying clone bundle from http://localhost:$HGPORT1/packed.hg
491 applying clone bundle from http://localhost:$HGPORT1/packed.hg
492 4 files to transfer, 613 bytes of data
492 4 files to transfer, 613 bytes of data
493 transferred 613 bytes in * seconds (*) (glob)
493 transferred 613 bytes in * seconds (*) (glob)
494 finished applying clone bundle
494 finished applying clone bundle
495 searching for changes
495 searching for changes
496 no changes found
496 no changes found
497
497
498 A manifest with a gzip bundle and stream clone with supported requirements
498 A manifest with a gzip bundle and stream clone with supported requirements
499
499
500 $ cat > server/.hg/clonebundles.manifest << EOF
500 $ cat > server/.hg/clonebundles.manifest << EOF
501 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
501 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
502 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
502 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
503 > EOF
503 > EOF
504
504
505 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
505 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
506 applying clone bundle from http://localhost:$HGPORT1/packed.hg
506 applying clone bundle from http://localhost:$HGPORT1/packed.hg
507 4 files to transfer, 613 bytes of data
507 4 files to transfer, 613 bytes of data
508 transferred 613 bytes in * seconds (*) (glob)
508 transferred 613 bytes in * seconds (*) (glob)
509 finished applying clone bundle
509 finished applying clone bundle
510 searching for changes
510 searching for changes
511 no changes found
511 no changes found
512
512
513 A manifest with a gzip bundle and a stream clone with unsupported requirements
513 A manifest with a gzip bundle and a stream clone with unsupported requirements
514
514
515 $ cat > server/.hg/clonebundles.manifest << EOF
515 $ cat > server/.hg/clonebundles.manifest << EOF
516 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
516 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
517 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
517 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
518 > EOF
518 > EOF
519
519
520 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
520 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
521 no compatible clone bundles available on server; falling back to regular clone
521 no compatible clone bundles available on server; falling back to regular clone
522 (you may want to report this to the server operator)
522 (you may want to report this to the server operator)
523 streaming all changes
523 streaming all changes
524 9 files to transfer, 816 bytes of data
524 9 files to transfer, 816 bytes of data
525 transferred 816 bytes in * seconds (*) (glob)
525 transferred 816 bytes in * seconds (*) (glob)
526
526
527 Test clone bundle retrieved through bundle2
527 Test clone bundle retrieved through bundle2
528
528
529 $ cat << EOF >> $HGRCPATH
529 $ cat << EOF >> $HGRCPATH
530 > [extensions]
530 > [extensions]
531 > largefiles=
531 > largefiles=
532 > EOF
532 > EOF
533 $ killdaemons.py
533 $ killdaemons.py
534 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
534 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
535 $ cat hg.pid >> $DAEMON_PIDS
535 $ cat hg.pid >> $DAEMON_PIDS
536
536
537 $ hg -R server debuglfput gz-a.hg
537 $ hg -R server debuglfput gz-a.hg
538 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
538 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
539
539
540 $ cat > server/.hg/clonebundles.manifest << EOF
540 $ cat > server/.hg/clonebundles.manifest << EOF
541 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
541 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
542 > EOF
542 > EOF
543
543
544 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
544 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
545 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
545 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
546 adding changesets
546 adding changesets
547 adding manifests
547 adding manifests
548 adding file changes
548 adding file changes
549 added 2 changesets with 2 changes to 2 files
549 added 2 changesets with 2 changes to 2 files
550 finished applying clone bundle
550 finished applying clone bundle
551 searching for changes
551 searching for changes
552 no changes found
552 no changes found
553 2 local changesets published
553 2 local changesets published
554 $ killdaemons.py
555
556 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
557 on a 32MB system.
558
559 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
560 $ cat http.pid >> $DAEMON_PIDS
561 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
562 $ cat hg.pid >> $DAEMON_PIDS
563
564 $ cat > server/.hg/clonebundles.manifest << EOF
565 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
566 > EOF
567
568 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
569 using http://localhost:$HGPORT/
570 sending capabilities command
571 sending clonebundles command
572 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
573 no compatible clone bundles available on server; falling back to regular clone
574 (you may want to report this to the server operator)
575 query 1; heads
576 sending batch command
577 requesting all changes
578 sending getbundle command
579 bundle2-input-bundle: with-transaction
580 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
581 adding changesets
582 add changeset 53245c60e682
583 add changeset aaff8d2ffbbf
584 adding manifests
585 adding file changes
586 adding bar revisions
587 adding foo revisions
588 bundle2-input-part: total payload size 920
589 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
590 bundle2-input-part: "phase-heads" supported
591 bundle2-input-part: total payload size 24
592 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
593 bundle2-input-part: total payload size 59
594 bundle2-input-bundle: 4 parts total
595 checking for updated bookmarks
596 updating the branch cache
597 added 2 changesets with 2 changes to 2 files
598 new changesets 53245c60e682:aaff8d2ffbbf
599 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
600 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
601
602 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
603 using http://localhost:$HGPORT/
604 sending capabilities command
605 sending clonebundles command
606 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
607 bundle2-input-bundle: 1 params with-transaction
608 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
609 adding changesets
610 add changeset 53245c60e682
611 add changeset aaff8d2ffbbf
612 adding manifests
613 adding file changes
614 adding bar revisions
615 adding foo revisions
616 bundle2-input-part: total payload size 920
617 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
618 bundle2-input-part: total payload size 59
619 bundle2-input-bundle: 2 parts total
620 updating the branch cache
621 added 2 changesets with 2 changes to 2 files
622 finished applying clone bundle
623 query 1; heads
624 sending batch command
625 searching for changes
626 all remote heads known locally
627 no changes found
628 sending getbundle command
629 bundle2-input-bundle: with-transaction
630 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
631 bundle2-input-part: "phase-heads" supported
632 bundle2-input-part: total payload size 24
633 bundle2-input-bundle: 2 parts total
634 checking for updated bookmarks
635 2 local changesets published
636 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
637 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
638 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now